diff --git a/docs/Recording file format.md b/docs/Recording file format.md index b01eaf4..875c638 100644 --- a/docs/Recording file format.md +++ b/docs/Recording file format.md @@ -23,7 +23,7 @@ Formats with only minor version incremented are backward compatible. VmaReplay application supports all older versions. Current version is: - 1,3 + 1,4 # Configuration @@ -204,6 +204,11 @@ No parameters. - pool : pointer +**vmaResizeAllocation** (min format version: 1.4) + +- allocation : pointer +- newSize : uint64 + # Data types **bool** @@ -228,7 +233,7 @@ It should not contain end-of-line characters - results are then undefined. # Example file Vulkan Memory Allocator,Calls recording - 1,3 + 1,4 Config,Begin PhysicalDevice,apiVersion,4198477 PhysicalDevice,driverVersion,8388653 @@ -284,4 +289,4 @@ It should not contain end-of-line characters - results are then undefined. 12552,0.695,0,vmaDestroyImage,000001D85B8B1620 12552,0.695,0,vmaDestroyBuffer,000001D85B8B16C0 12552,0.695,0,vmaDestroyBuffer,000001D85B8B1A80 - 12552,0.695,0,vmaDestroyAllocator \ No newline at end of file + 12552,0.695,0,vmaDestroyAllocator diff --git a/docs/html/globals.html b/docs/html/globals.html index e3c81b7..baa9589 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -316,7 +316,7 @@ $(function() { : vk_mem_alloc.h
  • VmaPoolCreateFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaPoolCreateFlags : vk_mem_alloc.h @@ -336,6 +336,9 @@ $(function() {
  • VmaRecordSettings : vk_mem_alloc.h
  • +
  • vmaResizeAllocation() +: vk_mem_alloc.h +
  • vmaSetAllocationUserData() : vk_mem_alloc.h
  • diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index bcad79b..01c7244 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -163,6 +163,9 @@ $(function() {
  • vmaMapMemory() : vk_mem_alloc.h
  • +
  • vmaResizeAllocation() +: vk_mem_alloc.h +
  • vmaSetAllocationUserData() : vk_mem_alloc.h
  • diff --git a/docs/html/search/all_10.js b/docs/html/search/all_10.js index f2abd6d..fa59846 100644 --- a/docs/html/search/all_10.js +++ b/docs/html/search/all_10.js @@ -107,6 +107,7 @@ var searchData= ['vmarecordflagbits',['VmaRecordFlagBits',['../vk__mem__alloc_8h.html#a4dd2c44642312a147a4e93373a6e64d2',1,'VmaRecordFlagBits(): vk_mem_alloc.h'],['../vk__mem__alloc_8h.html#ade20b626a6635ce1bf30ea53dea774e4',1,'VmaRecordFlagBits(): vk_mem_alloc.h']]], ['vmarecordflags',['VmaRecordFlags',['../vk__mem__alloc_8h.html#af3929a1a4547c592fc0b0e55ef452828',1,'vk_mem_alloc.h']]], ['vmarecordsettings',['VmaRecordSettings',['../struct_vma_record_settings.html',1,'VmaRecordSettings'],['../vk__mem__alloc_8h.html#a0ab61e87ff6365f1d59915eadc37a9f0',1,'VmaRecordSettings(): vk_mem_alloc.h']]], + ['vmaresizeallocation',['vmaResizeAllocation',['../vk__mem__alloc_8h.html#a0ff488958ca72b28e545880463cb8696',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], ['vmastatinfo',['VmaStatInfo',['../struct_vma_stat_info.html',1,'VmaStatInfo'],['../vk__mem__alloc_8h.html#a810b009a788ee8aac72a25b42ffbe31c',1,'VmaStatInfo(): vk_mem_alloc.h']]], diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index 0ab7deb..a3a5531 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -33,6 +33,7 @@ var searchData= ['vmainvalidateallocation',['vmaInvalidateAllocation',['../vk__mem__alloc_8h.html#a0d0eb0c1102268fa9a476d12ecbe4006',1,'vk_mem_alloc.h']]], ['vmamakepoolallocationslost',['vmaMakePoolAllocationsLost',['../vk__mem__alloc_8h.html#a736bd6cbda886f36c891727e73bd4024',1,'vk_mem_alloc.h']]], ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], + ['vmaresizeallocation',['vmaResizeAllocation',['../vk__mem__alloc_8h.html#a0ff488958ca72b28e545880463cb8696',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a43d8ba9673c846f049089a5029d5c73a',1,'vk_mem_alloc.h']]], diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 86a5da9..5d84dc1 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -310,6 +310,9 @@ Functions void vmaFreeMemory (VmaAllocator allocator, VmaAllocation allocation)  Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). More...
      +VkResult vmaResizeAllocation (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize) + Tries to resize an allocation in place, if there is enough free memory after it. More...
    +  void vmaGetAllocationInfo (VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)  Returns current information about specified allocation and atomically marks it as used in current frame. More...
      @@ -2364,6 +2367,50 @@ Functions

    This function fails when used on allocation made in memory type that is not HOST_VISIBLE.

    This function always fails when called for allocation that was created with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be mapped.

    + + + +

    ◆ vmaResizeAllocation()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaResizeAllocation (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize newSize 
    )
    +
    + +

    Tries to resize an allocation in place, if there is enough free memory after it.

    +

    Tries to change allocation's size without moving or reallocating it. You can both shrink and grow allocation size. When growing, it succeeds only when the allocation belongs to a memory block with enough free space after it.

    +

    Returns VK_SUCCESS if allocation's size has been successfully changed. Returns VK_ERROR_OUT_OF_POOL_MEMORY if allocation's size could not be changed.

    +

    After successful call to this function, VmaAllocationInfo::size of this allocation changes. All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer.

    + +
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index b5e44ad..3e42ce3 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2394  VmaAllocator allocator,
    2395  VmaAllocation allocation,
    2396  VmaAllocationInfo* pAllocationInfo);
    2397 
    2412 VkBool32 vmaTouchAllocation(
    2413  VmaAllocator allocator,
    2414  VmaAllocation allocation);
    2415 
    2430  VmaAllocator allocator,
    2431  VmaAllocation allocation,
    2432  void* pUserData);
    2433 
    2445  VmaAllocator allocator,
    2446  VmaAllocation* pAllocation);
    2447 
    2482 VkResult vmaMapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation,
    2485  void** ppData);
    2486 
    2491 void vmaUnmapMemory(
    2492  VmaAllocator allocator,
    2493  VmaAllocation allocation);
    2494 
    2507 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2508 
    2521 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2522 
    2539 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2540 
    2542 typedef struct VmaDefragmentationInfo {
    2547  VkDeviceSize maxBytesToMove;
    2554 
    2556 typedef struct VmaDefragmentationStats {
    2558  VkDeviceSize bytesMoved;
    2560  VkDeviceSize bytesFreed;
    2566 
    2605 VkResult vmaDefragment(
    2606  VmaAllocator allocator,
    2607  VmaAllocation* pAllocations,
    2608  size_t allocationCount,
    2609  VkBool32* pAllocationsChanged,
    2610  const VmaDefragmentationInfo *pDefragmentationInfo,
    2611  VmaDefragmentationStats* pDefragmentationStats);
    2612 
    2625 VkResult vmaBindBufferMemory(
    2626  VmaAllocator allocator,
    2627  VmaAllocation allocation,
    2628  VkBuffer buffer);
    2629 
    2642 VkResult vmaBindImageMemory(
    2643  VmaAllocator allocator,
    2644  VmaAllocation allocation,
    2645  VkImage image);
    2646 
    2673 VkResult vmaCreateBuffer(
    2674  VmaAllocator allocator,
    2675  const VkBufferCreateInfo* pBufferCreateInfo,
    2676  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2677  VkBuffer* pBuffer,
    2678  VmaAllocation* pAllocation,
    2679  VmaAllocationInfo* pAllocationInfo);
    2680 
    2692 void vmaDestroyBuffer(
    2693  VmaAllocator allocator,
    2694  VkBuffer buffer,
    2695  VmaAllocation allocation);
    2696 
    2698 VkResult vmaCreateImage(
    2699  VmaAllocator allocator,
    2700  const VkImageCreateInfo* pImageCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkImage* pImage,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyImage(
    2718  VmaAllocator allocator,
    2719  VkImage image,
    2720  VmaAllocation allocation);
    2721 
    2722 #ifdef __cplusplus
    2723 }
    2724 #endif
    2725 
    2726 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2727 
    2728 // For Visual Studio IntelliSense.
    2729 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2730 #define VMA_IMPLEMENTATION
    2731 #endif
    2732 
    2733 #ifdef VMA_IMPLEMENTATION
    2734 #undef VMA_IMPLEMENTATION
    2735 
    2736 #include <cstdint>
    2737 #include <cstdlib>
    2738 #include <cstring>
    2739 
    2740 /*******************************************************************************
    2741 CONFIGURATION SECTION
    2742 
    2743 Define some of these macros before each #include of this header or change them
    2744 here if you need other then default behavior depending on your environment.
    2745 */
    2746 
    2747 /*
    2748 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2749 internally, like:
    2750 
    2751  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2752 
    2753 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2754 VmaAllocatorCreateInfo::pVulkanFunctions.
    2755 */
    2756 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2757 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2758 #endif
    2759 
    2760 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2761 //#define VMA_USE_STL_CONTAINERS 1
    2762 
    2763 /* Set this macro to 1 to make the library including and using STL containers:
    2764 std::pair, std::vector, std::list, std::unordered_map.
    2765 
    2766 Set it to 0 or undefined to make the library using its own implementation of
    2767 the containers.
    2768 */
    2769 #if VMA_USE_STL_CONTAINERS
    2770  #define VMA_USE_STL_VECTOR 1
    2771  #define VMA_USE_STL_UNORDERED_MAP 1
    2772  #define VMA_USE_STL_LIST 1
    2773 #endif
    2774 
    2775 #if VMA_USE_STL_VECTOR
    2776  #include <vector>
    2777 #endif
    2778 
    2779 #if VMA_USE_STL_UNORDERED_MAP
    2780  #include <unordered_map>
    2781 #endif
    2782 
    2783 #if VMA_USE_STL_LIST
    2784  #include <list>
    2785 #endif
    2786 
    2787 /*
    2788 Following headers are used in this CONFIGURATION section only, so feel free to
    2789 remove them if not needed.
    2790 */
    2791 #include <cassert> // for assert
    2792 #include <algorithm> // for min, max
    2793 #include <mutex> // for std::mutex
    2794 #include <atomic> // for std::atomic
    2795 
    2796 #ifndef VMA_NULL
    2797  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2798  #define VMA_NULL nullptr
    2799 #endif
    2800 
    2801 #if defined(__APPLE__) || defined(__ANDROID__)
    2802 #include <cstdlib>
    2803 void *aligned_alloc(size_t alignment, size_t size)
    2804 {
    2805  // alignment must be >= sizeof(void*)
    2806  if(alignment < sizeof(void*))
    2807  {
    2808  alignment = sizeof(void*);
    2809  }
    2810 
    2811  void *pointer;
    2812  if(posix_memalign(&pointer, alignment, size) == 0)
    2813  return pointer;
    2814  return VMA_NULL;
    2815 }
    2816 #endif
    2817 
    2818 // If your compiler is not compatible with C++11 and definition of
    2819 // aligned_alloc() function is missing, uncommeting following line may help:
    2820 
    2821 //#include <malloc.h>
    2822 
    2823 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2824 #ifndef VMA_ASSERT
    2825  #ifdef _DEBUG
    2826  #define VMA_ASSERT(expr) assert(expr)
    2827  #else
    2828  #define VMA_ASSERT(expr)
    2829  #endif
    2830 #endif
    2831 
    2832 // Assert that will be called very often, like inside data structures e.g. operator[].
    2833 // Making it non-empty can make program slow.
    2834 #ifndef VMA_HEAVY_ASSERT
    2835  #ifdef _DEBUG
    2836  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2837  #else
    2838  #define VMA_HEAVY_ASSERT(expr)
    2839  #endif
    2840 #endif
    2841 
    2842 #ifndef VMA_ALIGN_OF
    2843  #define VMA_ALIGN_OF(type) (__alignof(type))
    2844 #endif
    2845 
    2846 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2847  #if defined(_WIN32)
    2848  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2849  #else
    2850  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2851  #endif
    2852 #endif
    2853 
    2854 #ifndef VMA_SYSTEM_FREE
    2855  #if defined(_WIN32)
    2856  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2857  #else
    2858  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2859  #endif
    2860 #endif
    2861 
    2862 #ifndef VMA_MIN
    2863  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2864 #endif
    2865 
    2866 #ifndef VMA_MAX
    2867  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2868 #endif
    2869 
    2870 #ifndef VMA_SWAP
    2871  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2872 #endif
    2873 
    2874 #ifndef VMA_SORT
    2875  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2876 #endif
    2877 
    2878 #ifndef VMA_DEBUG_LOG
    2879  #define VMA_DEBUG_LOG(format, ...)
    2880  /*
    2881  #define VMA_DEBUG_LOG(format, ...) do { \
    2882  printf(format, __VA_ARGS__); \
    2883  printf("\n"); \
    2884  } while(false)
    2885  */
    2886 #endif
    2887 
    2888 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2889 #if VMA_STATS_STRING_ENABLED
    2890  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2891  {
    2892  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2893  }
    2894  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2895  {
    2896  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2897  }
    2898  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2899  {
    2900  snprintf(outStr, strLen, "%p", ptr);
    2901  }
    2902 #endif
    2903 
    2904 #ifndef VMA_MUTEX
    2905  class VmaMutex
    2906  {
    2907  public:
    2908  VmaMutex() { }
    2909  ~VmaMutex() { }
    2910  void Lock() { m_Mutex.lock(); }
    2911  void Unlock() { m_Mutex.unlock(); }
    2912  private:
    2913  std::mutex m_Mutex;
    2914  };
    2915  #define VMA_MUTEX VmaMutex
    2916 #endif
    2917 
    2918 /*
    2919 If providing your own implementation, you need to implement a subset of std::atomic:
    2920 
    2921 - Constructor(uint32_t desired)
    2922 - uint32_t load() const
    2923 - void store(uint32_t desired)
    2924 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2925 */
    2926 #ifndef VMA_ATOMIC_UINT32
    2927  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2928 #endif
    2929 
    2930 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2931 
    2935  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2936 #endif
    2937 
    2938 #ifndef VMA_DEBUG_ALIGNMENT
    2939 
    2943  #define VMA_DEBUG_ALIGNMENT (1)
    2944 #endif
    2945 
    2946 #ifndef VMA_DEBUG_MARGIN
    2947 
    2951  #define VMA_DEBUG_MARGIN (0)
    2952 #endif
    2953 
    2954 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2955 
    2959  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2963 
    2968  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2969 #endif
    2970 
    2971 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2972 
    2976  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2980 
    2984  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2985 #endif
    2986 
    2987 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2988  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2990 #endif
    2991 
    2992 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2993  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2995 #endif
    2996 
    2997 #ifndef VMA_CLASS_NO_COPY
    2998  #define VMA_CLASS_NO_COPY(className) \
    2999  private: \
    3000  className(const className&) = delete; \
    3001  className& operator=(const className&) = delete;
    3002 #endif
    3003 
    3004 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3005 
    3006 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3007 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3008 
    3009 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3010 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3011 
    3012 /*******************************************************************************
    3013 END OF CONFIGURATION
    3014 */
    3015 
    3016 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3017  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3018 
    3019 // Returns number of bits set to 1 in (v).
    3020 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3021 {
    3022  uint32_t c = v - ((v >> 1) & 0x55555555);
    3023  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3024  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3025  c = ((c >> 8) + c) & 0x00FF00FF;
    3026  c = ((c >> 16) + c) & 0x0000FFFF;
    3027  return c;
    3028 }
    3029 
    3030 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3031 // Use types like uint32_t, uint64_t as T.
    3032 template <typename T>
    3033 static inline T VmaAlignUp(T val, T align)
    3034 {
    3035  return (val + align - 1) / align * align;
    3036 }
    3037 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3038 // Use types like uint32_t, uint64_t as T.
    3039 template <typename T>
    3040 static inline T VmaAlignDown(T val, T align)
    3041 {
    3042  return val / align * align;
    3043 }
    3044 
    3045 // Division with mathematical rounding to nearest number.
    3046 template <typename T>
    3047 static inline T VmaRoundDiv(T x, T y)
    3048 {
    3049  return (x + (y / (T)2)) / y;
    3050 }
    3051 
    3052 /*
    3053 Returns true if given number is a power of two.
    3054 T must be unsigned integer number or signed integer but always nonnegative.
    3055 For 0 returns true.
    3056 */
    3057 template <typename T>
    3058 inline bool VmaIsPow2(T x)
    3059 {
    3060  return (x & (x-1)) == 0;
    3061 }
    3062 
    3063 // Returns smallest power of 2 greater or equal to v.
    3064 static inline uint32_t VmaNextPow2(uint32_t v)
    3065 {
    3066  v--;
    3067  v |= v >> 1;
    3068  v |= v >> 2;
    3069  v |= v >> 4;
    3070  v |= v >> 8;
    3071  v |= v >> 16;
    3072  v++;
    3073  return v;
    3074 }
    3075 static inline uint64_t VmaNextPow2(uint64_t v)
    3076 {
    3077  v--;
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v |= v >> 32;
    3084  v++;
    3085  return v;
    3086 }
    3087 
    3088 // Returns largest power of 2 less or equal to v.
    3089 static inline uint32_t VmaPrevPow2(uint32_t v)
    3090 {
    3091  v |= v >> 1;
    3092  v |= v >> 2;
    3093  v |= v >> 4;
    3094  v |= v >> 8;
    3095  v |= v >> 16;
    3096  v = v ^ (v >> 1);
    3097  return v;
    3098 }
    3099 static inline uint64_t VmaPrevPow2(uint64_t v)
    3100 {
    3101  v |= v >> 1;
    3102  v |= v >> 2;
    3103  v |= v >> 4;
    3104  v |= v >> 8;
    3105  v |= v >> 16;
    3106  v |= v >> 32;
    3107  v = v ^ (v >> 1);
    3108  return v;
    3109 }
    3110 
    3111 static inline bool VmaStrIsEmpty(const char* pStr)
    3112 {
    3113  return pStr == VMA_NULL || *pStr == '\0';
    3114 }
    3115 
    3116 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3117 {
    3118  switch(algorithm)
    3119  {
    3121  return "Linear";
    3123  return "Buddy";
    3124  case 0:
    3125  return "Default";
    3126  default:
    3127  VMA_ASSERT(0);
    3128  return "";
    3129  }
    3130 }
    3131 
    3132 #ifndef VMA_SORT
    3133 
    3134 template<typename Iterator, typename Compare>
    3135 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3136 {
    3137  Iterator centerValue = end; --centerValue;
    3138  Iterator insertIndex = beg;
    3139  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3140  {
    3141  if(cmp(*memTypeIndex, *centerValue))
    3142  {
    3143  if(insertIndex != memTypeIndex)
    3144  {
    3145  VMA_SWAP(*memTypeIndex, *insertIndex);
    3146  }
    3147  ++insertIndex;
    3148  }
    3149  }
    3150  if(insertIndex != centerValue)
    3151  {
    3152  VMA_SWAP(*insertIndex, *centerValue);
    3153  }
    3154  return insertIndex;
    3155 }
    3156 
    3157 template<typename Iterator, typename Compare>
    3158 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3159 {
    3160  if(beg < end)
    3161  {
    3162  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3163  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3164  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3165  }
    3166 }
    3167 
    3168 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3169 
    3170 #endif // #ifndef VMA_SORT
    3171 
    3172 /*
    3173 Returns true if two memory blocks occupy overlapping pages.
    3174 ResourceA must be in less memory offset than ResourceB.
    3175 
    3176 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3177 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3178 */
    3179 static inline bool VmaBlocksOnSamePage(
    3180  VkDeviceSize resourceAOffset,
    3181  VkDeviceSize resourceASize,
    3182  VkDeviceSize resourceBOffset,
    3183  VkDeviceSize pageSize)
    3184 {
    3185  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3186  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3187  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3188  VkDeviceSize resourceBStart = resourceBOffset;
    3189  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3190  return resourceAEndPage == resourceBStartPage;
    3191 }
    3192 
    3193 enum VmaSuballocationType
    3194 {
    3195  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3196  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3197  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3198  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3199  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3200  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3201  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3202 };
    3203 
    3204 /*
    3205 Returns true if given suballocation types could conflict and must respect
    3206 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3207 or linear image and another one is optimal image. If type is unknown, behave
    3208 conservatively.
    3209 */
    3210 static inline bool VmaIsBufferImageGranularityConflict(
    3211  VmaSuballocationType suballocType1,
    3212  VmaSuballocationType suballocType2)
    3213 {
    3214  if(suballocType1 > suballocType2)
    3215  {
    3216  VMA_SWAP(suballocType1, suballocType2);
    3217  }
    3218 
    3219  switch(suballocType1)
    3220  {
    3221  case VMA_SUBALLOCATION_TYPE_FREE:
    3222  return false;
    3223  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3224  return true;
    3225  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3228  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3229  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3230  return
    3231  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3232  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3233  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3234  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3235  return
    3236  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3237  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3238  return false;
    3239  default:
    3240  VMA_ASSERT(0);
    3241  return true;
    3242  }
    3243 }
    3244 
    3245 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3246 {
    3247  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3248  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3249  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3250  {
    3251  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3252  }
    3253 }
    3254 
    3255 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3256 {
    3257  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3258  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3259  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3260  {
    3261  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3262  {
    3263  return false;
    3264  }
    3265  }
    3266  return true;
    3267 }
    3268 
    3269 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3270 struct VmaMutexLock
    3271 {
    3272  VMA_CLASS_NO_COPY(VmaMutexLock)
    3273 public:
    3274  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3275  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3276  {
    3277  if(m_pMutex)
    3278  {
    3279  m_pMutex->Lock();
    3280  }
    3281  }
    3282 
    3283  ~VmaMutexLock()
    3284  {
    3285  if(m_pMutex)
    3286  {
    3287  m_pMutex->Unlock();
    3288  }
    3289  }
    3290 
    3291 private:
    3292  VMA_MUTEX* m_pMutex;
    3293 };
    3294 
    3295 #if VMA_DEBUG_GLOBAL_MUTEX
    3296  static VMA_MUTEX gDebugGlobalMutex;
    3297  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3298 #else
    3299  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3300 #endif
    3301 
    3302 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3303 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3304 
    3305 /*
    3306 Performs binary search and returns iterator to first element that is greater or
    3307 equal to (key), according to comparison (cmp).
    3308 
    3309 Cmp should return true if first argument is less than second argument.
    3310 
    3311 Returned value is the found element, if present in the collection or place where
    3312 new element with value (key) should be inserted.
    3313 */
    3314 template <typename CmpLess, typename IterT, typename KeyT>
    3315 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3316 {
    3317  size_t down = 0, up = (end - beg);
    3318  while(down < up)
    3319  {
    3320  const size_t mid = (down + up) / 2;
    3321  if(cmp(*(beg+mid), key))
    3322  {
    3323  down = mid + 1;
    3324  }
    3325  else
    3326  {
    3327  up = mid;
    3328  }
    3329  }
    3330  return beg + down;
    3331 }
    3332 
    3334 // Memory allocation
    3335 
    3336 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3337 {
    3338  if((pAllocationCallbacks != VMA_NULL) &&
    3339  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3340  {
    3341  return (*pAllocationCallbacks->pfnAllocation)(
    3342  pAllocationCallbacks->pUserData,
    3343  size,
    3344  alignment,
    3345  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3346  }
    3347  else
    3348  {
    3349  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3350  }
    3351 }
    3352 
    3353 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3354 {
    3355  if((pAllocationCallbacks != VMA_NULL) &&
    3356  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3357  {
    3358  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3359  }
    3360  else
    3361  {
    3362  VMA_SYSTEM_FREE(ptr);
    3363  }
    3364 }
    3365 
    3366 template<typename T>
    3367 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3368 {
    3369  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3370 }
    3371 
    3372 template<typename T>
    3373 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3374 {
    3375  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3376 }
    3377 
    3378 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3379 
    3380 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3381 
    3382 template<typename T>
    3383 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3384 {
    3385  ptr->~T();
    3386  VmaFree(pAllocationCallbacks, ptr);
    3387 }
    3388 
    3389 template<typename T>
    3390 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3391 {
    3392  if(ptr != VMA_NULL)
    3393  {
    3394  for(size_t i = count; i--; )
    3395  {
    3396  ptr[i].~T();
    3397  }
    3398  VmaFree(pAllocationCallbacks, ptr);
    3399  }
    3400 }
    3401 
    3402 // STL-compatible allocator.
    3403 template<typename T>
    3404 class VmaStlAllocator
    3405 {
    3406 public:
    3407  const VkAllocationCallbacks* const m_pCallbacks;
    3408  typedef T value_type;
    3409 
    3410  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3411  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3412 
    3413  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3414  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3415 
    3416  template<typename U>
    3417  bool operator==(const VmaStlAllocator<U>& rhs) const
    3418  {
    3419  return m_pCallbacks == rhs.m_pCallbacks;
    3420  }
    3421  template<typename U>
    3422  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3423  {
    3424  return m_pCallbacks != rhs.m_pCallbacks;
    3425  }
    3426 
    3427  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3428 };
    3429 
    3430 #if VMA_USE_STL_VECTOR
    3431 
    3432 #define VmaVector std::vector
    3433 
    3434 template<typename T, typename allocatorT>
    3435 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3436 {
    3437  vec.insert(vec.begin() + index, item);
    3438 }
    3439 
    3440 template<typename T, typename allocatorT>
    3441 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3442 {
    3443  vec.erase(vec.begin() + index);
    3444 }
    3445 
    3446 #else // #if VMA_USE_STL_VECTOR
    3447 
    3448 /* Class with interface compatible with subset of std::vector.
    3449 T must be POD because constructors and destructors are not called and memcpy is
    3450 used for these objects. */
    3451 template<typename T, typename AllocatorT>
    3452 class VmaVector
    3453 {
    3454 public:
    3455  typedef T value_type;
    3456 
    3457  VmaVector(const AllocatorT& allocator) :
    3458  m_Allocator(allocator),
    3459  m_pArray(VMA_NULL),
    3460  m_Count(0),
    3461  m_Capacity(0)
    3462  {
    3463  }
    3464 
    3465  VmaVector(size_t count, const AllocatorT& allocator) :
    3466  m_Allocator(allocator),
    3467  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3468  m_Count(count),
    3469  m_Capacity(count)
    3470  {
    3471  }
    3472 
    3473  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3474  m_Allocator(src.m_Allocator),
    3475  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3476  m_Count(src.m_Count),
    3477  m_Capacity(src.m_Count)
    3478  {
    3479  if(m_Count != 0)
    3480  {
    3481  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3482  }
    3483  }
    3484 
    3485  ~VmaVector()
    3486  {
    3487  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3488  }
    3489 
    3490  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3491  {
    3492  if(&rhs != this)
    3493  {
    3494  resize(rhs.m_Count);
    3495  if(m_Count != 0)
    3496  {
    3497  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3498  }
    3499  }
    3500  return *this;
    3501  }
    3502 
    3503  bool empty() const { return m_Count == 0; }
    3504  size_t size() const { return m_Count; }
    3505  T* data() { return m_pArray; }
    3506  const T* data() const { return m_pArray; }
    3507 
    3508  T& operator[](size_t index)
    3509  {
    3510  VMA_HEAVY_ASSERT(index < m_Count);
    3511  return m_pArray[index];
    3512  }
    3513  const T& operator[](size_t index) const
    3514  {
    3515  VMA_HEAVY_ASSERT(index < m_Count);
    3516  return m_pArray[index];
    3517  }
    3518 
    3519  T& front()
    3520  {
    3521  VMA_HEAVY_ASSERT(m_Count > 0);
    3522  return m_pArray[0];
    3523  }
    3524  const T& front() const
    3525  {
    3526  VMA_HEAVY_ASSERT(m_Count > 0);
    3527  return m_pArray[0];
    3528  }
    3529  T& back()
    3530  {
    3531  VMA_HEAVY_ASSERT(m_Count > 0);
    3532  return m_pArray[m_Count - 1];
    3533  }
    3534  const T& back() const
    3535  {
    3536  VMA_HEAVY_ASSERT(m_Count > 0);
    3537  return m_pArray[m_Count - 1];
    3538  }
    3539 
    3540  void reserve(size_t newCapacity, bool freeMemory = false)
    3541  {
    3542  newCapacity = VMA_MAX(newCapacity, m_Count);
    3543 
    3544  if((newCapacity < m_Capacity) && !freeMemory)
    3545  {
    3546  newCapacity = m_Capacity;
    3547  }
    3548 
    3549  if(newCapacity != m_Capacity)
    3550  {
    3551  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3552  if(m_Count != 0)
    3553  {
    3554  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3555  }
    3556  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3557  m_Capacity = newCapacity;
    3558  m_pArray = newArray;
    3559  }
    3560  }
    3561 
    3562  void resize(size_t newCount, bool freeMemory = false)
    3563  {
    3564  size_t newCapacity = m_Capacity;
    3565  if(newCount > m_Capacity)
    3566  {
    3567  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3568  }
    3569  else if(freeMemory)
    3570  {
    3571  newCapacity = newCount;
    3572  }
    3573 
    3574  if(newCapacity != m_Capacity)
    3575  {
    3576  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3577  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3578  if(elementsToCopy != 0)
    3579  {
    3580  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3581  }
    3582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3583  m_Capacity = newCapacity;
    3584  m_pArray = newArray;
    3585  }
    3586 
    3587  m_Count = newCount;
    3588  }
    3589 
    3590  void clear(bool freeMemory = false)
    3591  {
    3592  resize(0, freeMemory);
    3593  }
    3594 
    3595  void insert(size_t index, const T& src)
    3596  {
    3597  VMA_HEAVY_ASSERT(index <= m_Count);
    3598  const size_t oldCount = size();
    3599  resize(oldCount + 1);
    3600  if(index < oldCount)
    3601  {
    3602  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3603  }
    3604  m_pArray[index] = src;
    3605  }
    3606 
    3607  void remove(size_t index)
    3608  {
    3609  VMA_HEAVY_ASSERT(index < m_Count);
    3610  const size_t oldCount = size();
    3611  if(index < oldCount - 1)
    3612  {
    3613  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3614  }
    3615  resize(oldCount - 1);
    3616  }
    3617 
    3618  void push_back(const T& src)
    3619  {
    3620  const size_t newIndex = size();
    3621  resize(newIndex + 1);
    3622  m_pArray[newIndex] = src;
    3623  }
    3624 
    3625  void pop_back()
    3626  {
    3627  VMA_HEAVY_ASSERT(m_Count > 0);
    3628  resize(size() - 1);
    3629  }
    3630 
    3631  void push_front(const T& src)
    3632  {
    3633  insert(0, src);
    3634  }
    3635 
    3636  void pop_front()
    3637  {
    3638  VMA_HEAVY_ASSERT(m_Count > 0);
    3639  remove(0);
    3640  }
    3641 
    3642  typedef T* iterator;
    3643 
    3644  iterator begin() { return m_pArray; }
    3645  iterator end() { return m_pArray + m_Count; }
    3646 
    3647 private:
    3648  AllocatorT m_Allocator;
    3649  T* m_pArray;
    3650  size_t m_Count;
    3651  size_t m_Capacity;
    3652 };
    3653 
    3654 template<typename T, typename allocatorT>
    3655 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3656 {
    3657  vec.insert(index, item);
    3658 }
    3659 
    3660 template<typename T, typename allocatorT>
    3661 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3662 {
    3663  vec.remove(index);
    3664 }
    3665 
    3666 #endif // #if VMA_USE_STL_VECTOR
    3667 
    3668 template<typename CmpLess, typename VectorT>
    3669 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3670 {
    3671  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3672  vector.data(),
    3673  vector.data() + vector.size(),
    3674  value,
    3675  CmpLess()) - vector.data();
    3676  VmaVectorInsert(vector, indexToInsert, value);
    3677  return indexToInsert;
    3678 }
    3679 
    3680 template<typename CmpLess, typename VectorT>
    3681 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3682 {
    3683  CmpLess comparator;
    3684  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3685  vector.begin(),
    3686  vector.end(),
    3687  value,
    3688  comparator);
    3689  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3690  {
    3691  size_t indexToRemove = it - vector.begin();
    3692  VmaVectorRemove(vector, indexToRemove);
    3693  return true;
    3694  }
    3695  return false;
    3696 }
    3697 
    3698 template<typename CmpLess, typename IterT, typename KeyT>
    3699 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3700 {
    3701  CmpLess comparator;
    3702  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3703  beg, end, value, comparator);
    3704  if(it == end ||
    3705  (!comparator(*it, value) && !comparator(value, *it)))
    3706  {
    3707  return it;
    3708  }
    3709  return end;
    3710 }
    3711 
    3713 // class VmaPoolAllocator
    3714 
    3715 /*
    3716 Allocator for objects of type T using a list of arrays (pools) to speed up
    3717 allocation. Number of elements that can be allocated is not bounded because
    3718 allocator can create multiple blocks.
    3719 */
    3720 template<typename T>
    3721 class VmaPoolAllocator
    3722 {
    3723  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3724 public:
    3725  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3726  ~VmaPoolAllocator();
    3727  void Clear();
    3728  T* Alloc();
    3729  void Free(T* ptr);
    3730 
    3731 private:
    3732  union Item
    3733  {
    3734  uint32_t NextFreeIndex;
    3735  T Value;
    3736  };
    3737 
    3738  struct ItemBlock
    3739  {
    3740  Item* pItems;
    3741  uint32_t FirstFreeIndex;
    3742  };
    3743 
    3744  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3745  size_t m_ItemsPerBlock;
    3746  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3747 
    3748  ItemBlock& CreateNewBlock();
    3749 };
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3753  m_pAllocationCallbacks(pAllocationCallbacks),
    3754  m_ItemsPerBlock(itemsPerBlock),
    3755  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3756 {
    3757  VMA_ASSERT(itemsPerBlock > 0);
    3758 }
    3759 
    3760 template<typename T>
    3761 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3762 {
    3763  Clear();
    3764 }
    3765 
    3766 template<typename T>
    3767 void VmaPoolAllocator<T>::Clear()
    3768 {
    3769  for(size_t i = m_ItemBlocks.size(); i--; )
    3770  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3771  m_ItemBlocks.clear();
    3772 }
    3773 
    3774 template<typename T>
    3775 T* VmaPoolAllocator<T>::Alloc()
    3776 {
    3777  for(size_t i = m_ItemBlocks.size(); i--; )
    3778  {
    3779  ItemBlock& block = m_ItemBlocks[i];
    3780  // This block has some free items: Use first one.
    3781  if(block.FirstFreeIndex != UINT32_MAX)
    3782  {
    3783  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3784  block.FirstFreeIndex = pItem->NextFreeIndex;
    3785  return &pItem->Value;
    3786  }
    3787  }
    3788 
    3789  // No block has free item: Create new one and use it.
    3790  ItemBlock& newBlock = CreateNewBlock();
    3791  Item* const pItem = &newBlock.pItems[0];
    3792  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3793  return &pItem->Value;
    3794 }
    3795 
    3796 template<typename T>
    3797 void VmaPoolAllocator<T>::Free(T* ptr)
    3798 {
    3799  // Search all memory blocks to find ptr.
    3800  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3801  {
    3802  ItemBlock& block = m_ItemBlocks[i];
    3803 
    3804  // Casting to union.
    3805  Item* pItemPtr;
    3806  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3807 
    3808  // Check if pItemPtr is in address range of this block.
    3809  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3810  {
    3811  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3812  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3813  block.FirstFreeIndex = index;
    3814  return;
    3815  }
    3816  }
    3817  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3818 }
    3819 
    3820 template<typename T>
    3821 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3822 {
    3823  ItemBlock newBlock = {
    3824  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3825 
    3826  m_ItemBlocks.push_back(newBlock);
    3827 
    3828  // Setup singly-linked list of all free items in this block.
    3829  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3830  newBlock.pItems[i].NextFreeIndex = i + 1;
    3831  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3832  return m_ItemBlocks.back();
    3833 }
    3834 
    3836 // class VmaRawList, VmaList
    3837 
    3838 #if VMA_USE_STL_LIST
    3839 
    3840 #define VmaList std::list
    3841 
    3842 #else // #if VMA_USE_STL_LIST
    3843 
    3844 template<typename T>
    3845 struct VmaListItem
    3846 {
    3847  VmaListItem* pPrev;
    3848  VmaListItem* pNext;
    3849  T Value;
    3850 };
    3851 
    3852 // Doubly linked list.
    3853 template<typename T>
    3854 class VmaRawList
    3855 {
    3856  VMA_CLASS_NO_COPY(VmaRawList)
    3857 public:
    3858  typedef VmaListItem<T> ItemType;
    3859 
    3860  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3861  ~VmaRawList();
    3862  void Clear();
    3863 
    3864  size_t GetCount() const { return m_Count; }
    3865  bool IsEmpty() const { return m_Count == 0; }
    3866 
    3867  ItemType* Front() { return m_pFront; }
    3868  const ItemType* Front() const { return m_pFront; }
    3869  ItemType* Back() { return m_pBack; }
    3870  const ItemType* Back() const { return m_pBack; }
    3871 
    3872  ItemType* PushBack();
    3873  ItemType* PushFront();
    3874  ItemType* PushBack(const T& value);
    3875  ItemType* PushFront(const T& value);
    3876  void PopBack();
    3877  void PopFront();
    3878 
    3879  // Item can be null - it means PushBack.
    3880  ItemType* InsertBefore(ItemType* pItem);
    3881  // Item can be null - it means PushFront.
    3882  ItemType* InsertAfter(ItemType* pItem);
    3883 
    3884  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3885  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3886 
    3887  void Remove(ItemType* pItem);
    3888 
    3889 private:
    3890  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3891  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3892  ItemType* m_pFront;
    3893  ItemType* m_pBack;
    3894  size_t m_Count;
    3895 };
    3896 
    3897 template<typename T>
    3898 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3899  m_pAllocationCallbacks(pAllocationCallbacks),
    3900  m_ItemAllocator(pAllocationCallbacks, 128),
    3901  m_pFront(VMA_NULL),
    3902  m_pBack(VMA_NULL),
    3903  m_Count(0)
    3904 {
    3905 }
    3906 
    3907 template<typename T>
    3908 VmaRawList<T>::~VmaRawList()
    3909 {
    3910  // Intentionally not calling Clear, because that would be unnecessary
    3911  // computations to return all items to m_ItemAllocator as free.
    3912 }
    3913 
    3914 template<typename T>
    3915 void VmaRawList<T>::Clear()
    3916 {
    3917  if(IsEmpty() == false)
    3918  {
    3919  ItemType* pItem = m_pBack;
    3920  while(pItem != VMA_NULL)
    3921  {
    3922  ItemType* const pPrevItem = pItem->pPrev;
    3923  m_ItemAllocator.Free(pItem);
    3924  pItem = pPrevItem;
    3925  }
    3926  m_pFront = VMA_NULL;
    3927  m_pBack = VMA_NULL;
    3928  m_Count = 0;
    3929  }
    3930 }
    3931 
    3932 template<typename T>
    3933 VmaListItem<T>* VmaRawList<T>::PushBack()
    3934 {
    3935  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3936  pNewItem->pNext = VMA_NULL;
    3937  if(IsEmpty())
    3938  {
    3939  pNewItem->pPrev = VMA_NULL;
    3940  m_pFront = pNewItem;
    3941  m_pBack = pNewItem;
    3942  m_Count = 1;
    3943  }
    3944  else
    3945  {
    3946  pNewItem->pPrev = m_pBack;
    3947  m_pBack->pNext = pNewItem;
    3948  m_pBack = pNewItem;
    3949  ++m_Count;
    3950  }
    3951  return pNewItem;
    3952 }
    3953 
    3954 template<typename T>
    3955 VmaListItem<T>* VmaRawList<T>::PushFront()
    3956 {
    3957  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3958  pNewItem->pPrev = VMA_NULL;
    3959  if(IsEmpty())
    3960  {
    3961  pNewItem->pNext = VMA_NULL;
    3962  m_pFront = pNewItem;
    3963  m_pBack = pNewItem;
    3964  m_Count = 1;
    3965  }
    3966  else
    3967  {
    3968  pNewItem->pNext = m_pFront;
    3969  m_pFront->pPrev = pNewItem;
    3970  m_pFront = pNewItem;
    3971  ++m_Count;
    3972  }
    3973  return pNewItem;
    3974 }
    3975 
    3976 template<typename T>
    3977 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3978 {
    3979  ItemType* const pNewItem = PushBack();
    3980  pNewItem->Value = value;
    3981  return pNewItem;
    3982 }
    3983 
    3984 template<typename T>
    3985 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3986 {
    3987  ItemType* const pNewItem = PushFront();
    3988  pNewItem->Value = value;
    3989  return pNewItem;
    3990 }
    3991 
    3992 template<typename T>
    3993 void VmaRawList<T>::PopBack()
    3994 {
    3995  VMA_HEAVY_ASSERT(m_Count > 0);
    3996  ItemType* const pBackItem = m_pBack;
    3997  ItemType* const pPrevItem = pBackItem->pPrev;
    3998  if(pPrevItem != VMA_NULL)
    3999  {
    4000  pPrevItem->pNext = VMA_NULL;
    4001  }
    4002  m_pBack = pPrevItem;
    4003  m_ItemAllocator.Free(pBackItem);
    4004  --m_Count;
    4005 }
    4006 
    4007 template<typename T>
    4008 void VmaRawList<T>::PopFront()
    4009 {
    4010  VMA_HEAVY_ASSERT(m_Count > 0);
    4011  ItemType* const pFrontItem = m_pFront;
    4012  ItemType* const pNextItem = pFrontItem->pNext;
    4013  if(pNextItem != VMA_NULL)
    4014  {
    4015  pNextItem->pPrev = VMA_NULL;
    4016  }
    4017  m_pFront = pNextItem;
    4018  m_ItemAllocator.Free(pFrontItem);
    4019  --m_Count;
    4020 }
    4021 
    4022 template<typename T>
    4023 void VmaRawList<T>::Remove(ItemType* pItem)
    4024 {
    4025  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4026  VMA_HEAVY_ASSERT(m_Count > 0);
    4027 
    4028  if(pItem->pPrev != VMA_NULL)
    4029  {
    4030  pItem->pPrev->pNext = pItem->pNext;
    4031  }
    4032  else
    4033  {
    4034  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4035  m_pFront = pItem->pNext;
    4036  }
    4037 
    4038  if(pItem->pNext != VMA_NULL)
    4039  {
    4040  pItem->pNext->pPrev = pItem->pPrev;
    4041  }
    4042  else
    4043  {
    4044  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4045  m_pBack = pItem->pPrev;
    4046  }
    4047 
    4048  m_ItemAllocator.Free(pItem);
    4049  --m_Count;
    4050 }
    4051 
    4052 template<typename T>
    4053 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4054 {
    4055  if(pItem != VMA_NULL)
    4056  {
    4057  ItemType* const prevItem = pItem->pPrev;
    4058  ItemType* const newItem = m_ItemAllocator.Alloc();
    4059  newItem->pPrev = prevItem;
    4060  newItem->pNext = pItem;
    4061  pItem->pPrev = newItem;
    4062  if(prevItem != VMA_NULL)
    4063  {
    4064  prevItem->pNext = newItem;
    4065  }
    4066  else
    4067  {
    4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4069  m_pFront = newItem;
    4070  }
    4071  ++m_Count;
    4072  return newItem;
    4073  }
    4074  else
    4075  return PushBack();
    4076 }
    4077 
    4078 template<typename T>
    4079 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4080 {
    4081  if(pItem != VMA_NULL)
    4082  {
    4083  ItemType* const nextItem = pItem->pNext;
    4084  ItemType* const newItem = m_ItemAllocator.Alloc();
    4085  newItem->pNext = nextItem;
    4086  newItem->pPrev = pItem;
    4087  pItem->pNext = newItem;
    4088  if(nextItem != VMA_NULL)
    4089  {
    4090  nextItem->pPrev = newItem;
    4091  }
    4092  else
    4093  {
    4094  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4095  m_pBack = newItem;
    4096  }
    4097  ++m_Count;
    4098  return newItem;
    4099  }
    4100  else
    4101  return PushFront();
    4102 }
    4103 
    4104 template<typename T>
    4105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4106 {
    4107  ItemType* const newItem = InsertBefore(pItem);
    4108  newItem->Value = value;
    4109  return newItem;
    4110 }
    4111 
    4112 template<typename T>
    4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4114 {
    4115  ItemType* const newItem = InsertAfter(pItem);
    4116  newItem->Value = value;
    4117  return newItem;
    4118 }
    4119 
    4120 template<typename T, typename AllocatorT>
    4121 class VmaList
    4122 {
    4123  VMA_CLASS_NO_COPY(VmaList)
    4124 public:
    4125  class iterator
    4126  {
    4127  public:
    4128  iterator() :
    4129  m_pList(VMA_NULL),
    4130  m_pItem(VMA_NULL)
    4131  {
    4132  }
    4133 
    4134  T& operator*() const
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4137  return m_pItem->Value;
    4138  }
    4139  T* operator->() const
    4140  {
    4141  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4142  return &m_pItem->Value;
    4143  }
    4144 
    4145  iterator& operator++()
    4146  {
    4147  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4148  m_pItem = m_pItem->pNext;
    4149  return *this;
    4150  }
    4151  iterator& operator--()
    4152  {
    4153  if(m_pItem != VMA_NULL)
    4154  {
    4155  m_pItem = m_pItem->pPrev;
    4156  }
    4157  else
    4158  {
    4159  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4160  m_pItem = m_pList->Back();
    4161  }
    4162  return *this;
    4163  }
    4164 
    4165  iterator operator++(int)
    4166  {
    4167  iterator result = *this;
    4168  ++*this;
    4169  return result;
    4170  }
    4171  iterator operator--(int)
    4172  {
    4173  iterator result = *this;
    4174  --*this;
    4175  return result;
    4176  }
    4177 
    4178  bool operator==(const iterator& rhs) const
    4179  {
    4180  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4181  return m_pItem == rhs.m_pItem;
    4182  }
    4183  bool operator!=(const iterator& rhs) const
    4184  {
    4185  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4186  return m_pItem != rhs.m_pItem;
    4187  }
    4188 
    4189  private:
    4190  VmaRawList<T>* m_pList;
    4191  VmaListItem<T>* m_pItem;
    4192 
    4193  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4194  m_pList(pList),
    4195  m_pItem(pItem)
    4196  {
    4197  }
    4198 
    4199  friend class VmaList<T, AllocatorT>;
    4200  };
    4201 
    4202  class const_iterator
    4203  {
    4204  public:
    4205  const_iterator() :
    4206  m_pList(VMA_NULL),
    4207  m_pItem(VMA_NULL)
    4208  {
    4209  }
    4210 
    4211  const_iterator(const iterator& src) :
    4212  m_pList(src.m_pList),
    4213  m_pItem(src.m_pItem)
    4214  {
    4215  }
    4216 
    4217  const T& operator*() const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4220  return m_pItem->Value;
    4221  }
    4222  const T* operator->() const
    4223  {
    4224  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4225  return &m_pItem->Value;
    4226  }
    4227 
    4228  const_iterator& operator++()
    4229  {
    4230  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4231  m_pItem = m_pItem->pNext;
    4232  return *this;
    4233  }
    4234  const_iterator& operator--()
    4235  {
    4236  if(m_pItem != VMA_NULL)
    4237  {
    4238  m_pItem = m_pItem->pPrev;
    4239  }
    4240  else
    4241  {
    4242  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4243  m_pItem = m_pList->Back();
    4244  }
    4245  return *this;
    4246  }
    4247 
    4248  const_iterator operator++(int)
    4249  {
    4250  const_iterator result = *this;
    4251  ++*this;
    4252  return result;
    4253  }
    4254  const_iterator operator--(int)
    4255  {
    4256  const_iterator result = *this;
    4257  --*this;
    4258  return result;
    4259  }
    4260 
    4261  bool operator==(const const_iterator& rhs) const
    4262  {
    4263  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4264  return m_pItem == rhs.m_pItem;
    4265  }
    4266  bool operator!=(const const_iterator& rhs) const
    4267  {
    4268  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4269  return m_pItem != rhs.m_pItem;
    4270  }
    4271 
    4272  private:
    4273  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4274  m_pList(pList),
    4275  m_pItem(pItem)
    4276  {
    4277  }
    4278 
    4279  const VmaRawList<T>* m_pList;
    4280  const VmaListItem<T>* m_pItem;
    4281 
    4282  friend class VmaList<T, AllocatorT>;
    4283  };
    4284 
    4285  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4286 
    4287  bool empty() const { return m_RawList.IsEmpty(); }
    4288  size_t size() const { return m_RawList.GetCount(); }
    4289 
    4290  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4291  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4292 
    4293  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4294  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4295 
    4296  void clear() { m_RawList.Clear(); }
    4297  void push_back(const T& value) { m_RawList.PushBack(value); }
    4298  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4299  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4300 
    4301 private:
    4302  VmaRawList<T> m_RawList;
    4303 };
    4304 
    4305 #endif // #if VMA_USE_STL_LIST
    4306 
    4308 // class VmaMap
    4309 
    4310 // Unused in this version.
    4311 #if 0
    4312 
    4313 #if VMA_USE_STL_UNORDERED_MAP
    4314 
    4315 #define VmaPair std::pair
    4316 
    4317 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4318  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4319 
    4320 #else // #if VMA_USE_STL_UNORDERED_MAP
    4321 
    4322 template<typename T1, typename T2>
    4323 struct VmaPair
    4324 {
    4325  T1 first;
    4326  T2 second;
    4327 
    4328  VmaPair() : first(), second() { }
    4329  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4330 };
    4331 
    4332 /* Class compatible with subset of interface of std::unordered_map.
    4333 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4334 */
    4335 template<typename KeyT, typename ValueT>
    4336 class VmaMap
    4337 {
    4338 public:
    4339  typedef VmaPair<KeyT, ValueT> PairType;
    4340  typedef PairType* iterator;
    4341 
    4342  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4343 
    4344  iterator begin() { return m_Vector.begin(); }
    4345  iterator end() { return m_Vector.end(); }
    4346 
    4347  void insert(const PairType& pair);
    4348  iterator find(const KeyT& key);
    4349  void erase(iterator it);
    4350 
    4351 private:
    4352  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4353 };
    4354 
    4355 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4356 
    4357 template<typename FirstT, typename SecondT>
    4358 struct VmaPairFirstLess
    4359 {
    4360  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4361  {
    4362  return lhs.first < rhs.first;
    4363  }
    4364  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4365  {
    4366  return lhs.first < rhsFirst;
    4367  }
    4368 };
    4369 
    4370 template<typename KeyT, typename ValueT>
    4371 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4372 {
    4373  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4374  m_Vector.data(),
    4375  m_Vector.data() + m_Vector.size(),
    4376  pair,
    4377  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4378  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4379 }
    4380 
    4381 template<typename KeyT, typename ValueT>
    4382 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4383 {
    4384  PairType* it = VmaBinaryFindFirstNotLess(
    4385  m_Vector.data(),
    4386  m_Vector.data() + m_Vector.size(),
    4387  key,
    4388  VmaPairFirstLess<KeyT, ValueT>());
    4389  if((it != m_Vector.end()) && (it->first == key))
    4390  {
    4391  return it;
    4392  }
    4393  else
    4394  {
    4395  return m_Vector.end();
    4396  }
    4397 }
    4398 
    4399 template<typename KeyT, typename ValueT>
    4400 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4401 {
    4402  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4403 }
    4404 
    4405 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4406 
    4407 #endif // #if 0
    4408 
    4410 
    4411 class VmaDeviceMemoryBlock;
    4412 
    4413 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4414 
    4415 struct VmaAllocation_T
    4416 {
    4417  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4418 private:
    4419  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4420 
    4421  enum FLAGS
    4422  {
    4423  FLAG_USER_DATA_STRING = 0x01,
    4424  };
    4425 
    4426 public:
    4427  enum ALLOCATION_TYPE
    4428  {
    4429  ALLOCATION_TYPE_NONE,
    4430  ALLOCATION_TYPE_BLOCK,
    4431  ALLOCATION_TYPE_DEDICATED,
    4432  };
    4433 
    4434  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4435  m_Alignment(1),
    4436  m_Size(0),
    4437  m_pUserData(VMA_NULL),
    4438  m_LastUseFrameIndex(currentFrameIndex),
    4439  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4440  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4441  m_MapCount(0),
    4442  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4443  {
    4444 #if VMA_STATS_STRING_ENABLED
    4445  m_CreationFrameIndex = currentFrameIndex;
    4446  m_BufferImageUsage = 0;
    4447 #endif
    4448  }
    4449 
    4450  ~VmaAllocation_T()
    4451  {
    4452  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4453 
    4454  // Check if owned string was freed.
    4455  VMA_ASSERT(m_pUserData == VMA_NULL);
    4456  }
    4457 
    4458  void InitBlockAllocation(
    4459  VmaPool hPool,
    4460  VmaDeviceMemoryBlock* block,
    4461  VkDeviceSize offset,
    4462  VkDeviceSize alignment,
    4463  VkDeviceSize size,
    4464  VmaSuballocationType suballocationType,
    4465  bool mapped,
    4466  bool canBecomeLost)
    4467  {
    4468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4469  VMA_ASSERT(block != VMA_NULL);
    4470  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4471  m_Alignment = alignment;
    4472  m_Size = size;
    4473  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4474  m_SuballocationType = (uint8_t)suballocationType;
    4475  m_BlockAllocation.m_hPool = hPool;
    4476  m_BlockAllocation.m_Block = block;
    4477  m_BlockAllocation.m_Offset = offset;
    4478  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4479  }
    4480 
    4481  void InitLost()
    4482  {
    4483  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4484  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4485  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4486  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4487  m_BlockAllocation.m_Block = VMA_NULL;
    4488  m_BlockAllocation.m_Offset = 0;
    4489  m_BlockAllocation.m_CanBecomeLost = true;
    4490  }
    4491 
    4492  void ChangeBlockAllocation(
    4493  VmaAllocator hAllocator,
    4494  VmaDeviceMemoryBlock* block,
    4495  VkDeviceSize offset);
    4496 
    4497  // pMappedData not null means allocation is created with MAPPED flag.
    4498  void InitDedicatedAllocation(
    4499  uint32_t memoryTypeIndex,
    4500  VkDeviceMemory hMemory,
    4501  VmaSuballocationType suballocationType,
    4502  void* pMappedData,
    4503  VkDeviceSize size)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4508  m_Alignment = 0;
    4509  m_Size = size;
    4510  m_SuballocationType = (uint8_t)suballocationType;
    4511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4512  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4513  m_DedicatedAllocation.m_hMemory = hMemory;
    4514  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4515  }
    4516 
    4517  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4518  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4519  VkDeviceSize GetSize() const { return m_Size; }
    4520  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4521  void* GetUserData() const { return m_pUserData; }
    4522  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4523  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4524 
    4525  VmaDeviceMemoryBlock* GetBlock() const
    4526  {
    4527  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4528  return m_BlockAllocation.m_Block;
    4529  }
    4530  VkDeviceSize GetOffset() const;
    4531  VkDeviceMemory GetMemory() const;
    4532  uint32_t GetMemoryTypeIndex() const;
    4533  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4534  void* GetMappedData() const;
    4535  bool CanBecomeLost() const;
    4536  VmaPool GetPool() const;
    4537 
    4538  uint32_t GetLastUseFrameIndex() const
    4539  {
    4540  return m_LastUseFrameIndex.load();
    4541  }
    4542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4543  {
    4544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4545  }
    4546  /*
    4547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4549  - Else, returns false.
    4550 
    4551  If hAllocation is already lost, assert - you should not call it then.
    4552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4553  */
    4554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4555 
    4556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4557  {
    4558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4559  outInfo.blockCount = 1;
    4560  outInfo.allocationCount = 1;
    4561  outInfo.unusedRangeCount = 0;
    4562  outInfo.usedBytes = m_Size;
    4563  outInfo.unusedBytes = 0;
    4564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4565  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4566  outInfo.unusedRangeSizeMax = 0;
    4567  }
    4568 
    4569  void BlockAllocMap();
    4570  void BlockAllocUnmap();
    4571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4573 
    4574 #if VMA_STATS_STRING_ENABLED
    4575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4577 
    4578  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4579  {
    4580  VMA_ASSERT(m_BufferImageUsage == 0);
    4581  m_BufferImageUsage = bufferImageUsage;
    4582  }
    4583 
    4584  void PrintParameters(class VmaJsonWriter& json) const;
    4585 #endif
    4586 
    4587 private:
    4588  VkDeviceSize m_Alignment;
    4589  VkDeviceSize m_Size;
    4590  void* m_pUserData;
    4591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4592  uint8_t m_Type; // ALLOCATION_TYPE
    4593  uint8_t m_SuballocationType; // VmaSuballocationType
    4594  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4595  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4596  uint8_t m_MapCount;
    4597  uint8_t m_Flags; // enum FLAGS
    4598 
    4599  // Allocation out of VmaDeviceMemoryBlock.
    4600  struct BlockAllocation
    4601  {
    4602  VmaPool m_hPool; // Null if belongs to general memory.
    4603  VmaDeviceMemoryBlock* m_Block;
    4604  VkDeviceSize m_Offset;
    4605  bool m_CanBecomeLost;
    4606  };
    4607 
    4608  // Allocation for an object that has its own private VkDeviceMemory.
    4609  struct DedicatedAllocation
    4610  {
    4611  uint32_t m_MemoryTypeIndex;
    4612  VkDeviceMemory m_hMemory;
    4613  void* m_pMappedData; // Not null means memory is mapped.
    4614  };
    4615 
    4616  union
    4617  {
    4618  // Allocation out of VmaDeviceMemoryBlock.
    4619  BlockAllocation m_BlockAllocation;
    4620  // Allocation for an object that has its own private VkDeviceMemory.
    4621  DedicatedAllocation m_DedicatedAllocation;
    4622  };
    4623 
    4624 #if VMA_STATS_STRING_ENABLED
    4625  uint32_t m_CreationFrameIndex;
    4626  uint32_t m_BufferImageUsage; // 0 if unknown.
    4627 #endif
    4628 
    4629  void FreeUserDataString(VmaAllocator hAllocator);
    4630 };
    4631 
    4632 /*
    4633 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4634 allocated memory block or free.
    4635 */
    4636 struct VmaSuballocation
    4637 {
    4638  VkDeviceSize offset;
    4639  VkDeviceSize size;
    4640  VmaAllocation hAllocation;
    4641  VmaSuballocationType type;
    4642 };
    4643 
    4644 // Comparator for offsets.
    4645 struct VmaSuballocationOffsetLess
    4646 {
    4647  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4648  {
    4649  return lhs.offset < rhs.offset;
    4650  }
    4651 };
    4652 struct VmaSuballocationOffsetGreater
    4653 {
    4654  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4655  {
    4656  return lhs.offset > rhs.offset;
    4657  }
    4658 };
    4659 
    4660 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4661 
    4662 // Cost of one additional allocation lost, as equivalent in bytes.
    4663 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4664 
    4665 /*
    4666 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4667 
    4668 If canMakeOtherLost was false:
    4669 - item points to a FREE suballocation.
    4670 - itemsToMakeLostCount is 0.
    4671 
    4672 If canMakeOtherLost was true:
    4673 - item points to first of sequence of suballocations, which are either FREE,
    4674  or point to VmaAllocations that can become lost.
    4675 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4676  the requested allocation to succeed.
    4677 */
    4678 struct VmaAllocationRequest
    4679 {
    4680  VkDeviceSize offset;
    4681  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4682  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4683  VmaSuballocationList::iterator item;
    4684  size_t itemsToMakeLostCount;
    4685  void* customData;
    4686 
    4687  VkDeviceSize CalcCost() const
    4688  {
    4689  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4690  }
    4691 };
    4692 
    4693 /*
    4694 Data structure used for bookkeeping of allocations and unused ranges of memory
    4695 in a single VkDeviceMemory block.
    4696 */
    4697 class VmaBlockMetadata
    4698 {
    4699 public:
    4700  VmaBlockMetadata(VmaAllocator hAllocator);
    4701  virtual ~VmaBlockMetadata() { }
    4702  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4703 
    4704  // Validates all data structures inside this object. If not valid, returns false.
    4705  virtual bool Validate() const = 0;
    4706  VkDeviceSize GetSize() const { return m_Size; }
    4707  virtual size_t GetAllocationCount() const = 0;
    4708  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4709  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4710  // Returns true if this block is empty - contains only single free suballocation.
    4711  virtual bool IsEmpty() const = 0;
    4712 
    4713  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4714  // Shouldn't modify blockCount.
    4715  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4716 
    4717 #if VMA_STATS_STRING_ENABLED
    4718  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4719 #endif
    4720 
    4721  // Tries to find a place for suballocation with given parameters inside this block.
    4722  // If succeeded, fills pAllocationRequest and returns true.
    4723  // If failed, returns false.
    4724  virtual bool CreateAllocationRequest(
    4725  uint32_t currentFrameIndex,
    4726  uint32_t frameInUseCount,
    4727  VkDeviceSize bufferImageGranularity,
    4728  VkDeviceSize allocSize,
    4729  VkDeviceSize allocAlignment,
    4730  bool upperAddress,
    4731  VmaSuballocationType allocType,
    4732  bool canMakeOtherLost,
    4733  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4734  VmaAllocationRequest* pAllocationRequest) = 0;
    4735 
    4736  virtual bool MakeRequestedAllocationsLost(
    4737  uint32_t currentFrameIndex,
    4738  uint32_t frameInUseCount,
    4739  VmaAllocationRequest* pAllocationRequest) = 0;
    4740 
    4741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4742 
    4743  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4744 
    4745  // Makes actual allocation based on request. Request must already be checked and valid.
    4746  virtual void Alloc(
    4747  const VmaAllocationRequest& request,
    4748  VmaSuballocationType type,
    4749  VkDeviceSize allocSize,
    4750  bool upperAddress,
    4751  VmaAllocation hAllocation) = 0;
    4752 
    4753  // Frees suballocation assigned to given memory region.
    4754  virtual void Free(const VmaAllocation allocation) = 0;
    4755  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4756 
    4757 protected:
    4758  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4759 
    4760 #if VMA_STATS_STRING_ENABLED
    4761  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4762  VkDeviceSize unusedBytes,
    4763  size_t allocationCount,
    4764  size_t unusedRangeCount) const;
    4765  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4766  VkDeviceSize offset,
    4767  VmaAllocation hAllocation) const;
    4768  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4769  VkDeviceSize offset,
    4770  VkDeviceSize size) const;
    4771  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4772 #endif
    4773 
    4774 private:
    4775  VkDeviceSize m_Size;
    4776  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4777 };
    4778 
    4779 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4780  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4781  return false; \
    4782  } } while(false)
    4783 
    4784 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4785 {
    4786  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4787 public:
    4788  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4789  virtual ~VmaBlockMetadata_Generic();
    4790  virtual void Init(VkDeviceSize size);
    4791 
    4792  virtual bool Validate() const;
    4793  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4794  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4795  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4796  virtual bool IsEmpty() const;
    4797 
    4798  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4799  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4800 
    4801 #if VMA_STATS_STRING_ENABLED
    4802  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4803 #endif
    4804 
    4805  virtual bool CreateAllocationRequest(
    4806  uint32_t currentFrameIndex,
    4807  uint32_t frameInUseCount,
    4808  VkDeviceSize bufferImageGranularity,
    4809  VkDeviceSize allocSize,
    4810  VkDeviceSize allocAlignment,
    4811  bool upperAddress,
    4812  VmaSuballocationType allocType,
    4813  bool canMakeOtherLost,
    4814  uint32_t strategy,
    4815  VmaAllocationRequest* pAllocationRequest);
    4816 
    4817  virtual bool MakeRequestedAllocationsLost(
    4818  uint32_t currentFrameIndex,
    4819  uint32_t frameInUseCount,
    4820  VmaAllocationRequest* pAllocationRequest);
    4821 
    4822  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4823 
    4824  virtual VkResult CheckCorruption(const void* pBlockData);
    4825 
    4826  virtual void Alloc(
    4827  const VmaAllocationRequest& request,
    4828  VmaSuballocationType type,
    4829  VkDeviceSize allocSize,
    4830  bool upperAddress,
    4831  VmaAllocation hAllocation);
    4832 
    4833  virtual void Free(const VmaAllocation allocation);
    4834  virtual void FreeAtOffset(VkDeviceSize offset);
    4835 
    4836 private:
    4837  uint32_t m_FreeCount;
    4838  VkDeviceSize m_SumFreeSize;
    4839  VmaSuballocationList m_Suballocations;
    4840  // Suballocations that are free and have size greater than certain threshold.
    4841  // Sorted by size, ascending.
    4842  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4843 
    4844  bool ValidateFreeSuballocationList() const;
    4845 
    4846  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4847  // If yes, fills pOffset and returns true. If no, returns false.
    4848  bool CheckAllocation(
    4849  uint32_t currentFrameIndex,
    4850  uint32_t frameInUseCount,
    4851  VkDeviceSize bufferImageGranularity,
    4852  VkDeviceSize allocSize,
    4853  VkDeviceSize allocAlignment,
    4854  VmaSuballocationType allocType,
    4855  VmaSuballocationList::const_iterator suballocItem,
    4856  bool canMakeOtherLost,
    4857  VkDeviceSize* pOffset,
    4858  size_t* itemsToMakeLostCount,
    4859  VkDeviceSize* pSumFreeSize,
    4860  VkDeviceSize* pSumItemSize) const;
    4861  // Given free suballocation, it merges it with following one, which must also be free.
    4862  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4863  // Releases given suballocation, making it free.
    4864  // Merges it with adjacent free suballocations if applicable.
    4865  // Returns iterator to new free suballocation at this place.
    4866  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4867  // Given free suballocation, it inserts it into sorted list of
    4868  // m_FreeSuballocationsBySize if it's suitable.
    4869  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4870  // Given free suballocation, it removes it from sorted list of
    4871  // m_FreeSuballocationsBySize if it's suitable.
    4872  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4873 };
    4874 
    4875 /*
    4876 Allocations and their references in internal data structure look like this:
    4877 
    4878 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4879 
    4880  0 +-------+
    4881  | |
    4882  | |
    4883  | |
    4884  +-------+
    4885  | Alloc | 1st[m_1stNullItemsBeginCount]
    4886  +-------+
    4887  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4888  +-------+
    4889  | ... |
    4890  +-------+
    4891  | Alloc | 1st[1st.size() - 1]
    4892  +-------+
    4893  | |
    4894  | |
    4895  | |
    4896 GetSize() +-------+
    4897 
    4898 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4899 
    4900  0 +-------+
    4901  | Alloc | 2nd[0]
    4902  +-------+
    4903  | Alloc | 2nd[1]
    4904  +-------+
    4905  | ... |
    4906  +-------+
    4907  | Alloc | 2nd[2nd.size() - 1]
    4908  +-------+
    4909  | |
    4910  | |
    4911  | |
    4912  +-------+
    4913  | Alloc | 1st[m_1stNullItemsBeginCount]
    4914  +-------+
    4915  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4916  +-------+
    4917  | ... |
    4918  +-------+
    4919  | Alloc | 1st[1st.size() - 1]
    4920  +-------+
    4921  | |
    4922 GetSize() +-------+
    4923 
    4924 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4925 
    4926  0 +-------+
    4927  | |
    4928  | |
    4929  | |
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount]
    4932  +-------+
    4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4934  +-------+
    4935  | ... |
    4936  +-------+
    4937  | Alloc | 1st[1st.size() - 1]
    4938  +-------+
    4939  | |
    4940  | |
    4941  | |
    4942  +-------+
    4943  | Alloc | 2nd[2nd.size() - 1]
    4944  +-------+
    4945  | ... |
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | Alloc | 2nd[0]
    4950 GetSize() +-------+
    4951 
    4952 */
    4953 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4954 {
    4955  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4956 public:
    4957  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4958  virtual ~VmaBlockMetadata_Linear();
    4959  virtual void Init(VkDeviceSize size);
    4960 
    4961  virtual bool Validate() const;
    4962  virtual size_t GetAllocationCount() const;
    4963  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4964  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4965  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4966 
    4967  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4968  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4969 
    4970 #if VMA_STATS_STRING_ENABLED
    4971  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4972 #endif
    4973 
    4974  virtual bool CreateAllocationRequest(
    4975  uint32_t currentFrameIndex,
    4976  uint32_t frameInUseCount,
    4977  VkDeviceSize bufferImageGranularity,
    4978  VkDeviceSize allocSize,
    4979  VkDeviceSize allocAlignment,
    4980  bool upperAddress,
    4981  VmaSuballocationType allocType,
    4982  bool canMakeOtherLost,
    4983  uint32_t strategy,
    4984  VmaAllocationRequest* pAllocationRequest);
    4985 
    4986  virtual bool MakeRequestedAllocationsLost(
    4987  uint32_t currentFrameIndex,
    4988  uint32_t frameInUseCount,
    4989  VmaAllocationRequest* pAllocationRequest);
    4990 
    4991  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4992 
    4993  virtual VkResult CheckCorruption(const void* pBlockData);
    4994 
    4995  virtual void Alloc(
    4996  const VmaAllocationRequest& request,
    4997  VmaSuballocationType type,
    4998  VkDeviceSize allocSize,
    4999  bool upperAddress,
    5000  VmaAllocation hAllocation);
    5001 
    5002  virtual void Free(const VmaAllocation allocation);
    5003  virtual void FreeAtOffset(VkDeviceSize offset);
    5004 
    5005 private:
    5006  /*
    5007  There are two suballocation vectors, used in ping-pong way.
    5008  The one with index m_1stVectorIndex is called 1st.
    5009  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5010  2nd can be non-empty only when 1st is not empty.
    5011  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5012  */
    5013  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5014 
    5015  enum SECOND_VECTOR_MODE
    5016  {
    5017  SECOND_VECTOR_EMPTY,
    5018  /*
    5019  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5020  all have smaller offset.
    5021  */
    5022  SECOND_VECTOR_RING_BUFFER,
    5023  /*
    5024  Suballocations in 2nd vector are upper side of double stack.
    5025  They all have offsets higher than those in 1st vector.
    5026  Top of this stack means smaller offsets, but higher indices in this vector.
    5027  */
    5028  SECOND_VECTOR_DOUBLE_STACK,
    5029  };
    5030 
    5031  VkDeviceSize m_SumFreeSize;
    5032  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5033  uint32_t m_1stVectorIndex;
    5034  SECOND_VECTOR_MODE m_2ndVectorMode;
    5035 
    5036  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5037  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5038  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5039  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5040 
    5041  // Number of items in 1st vector with hAllocation = null at the beginning.
    5042  size_t m_1stNullItemsBeginCount;
    5043  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5044  size_t m_1stNullItemsMiddleCount;
    5045  // Number of items in 2nd vector with hAllocation = null.
    5046  size_t m_2ndNullItemsCount;
    5047 
    5048  bool ShouldCompact1st() const;
    5049  void CleanupAfterFree();
    5050 };
    5051 
    5052 /*
    5053 - GetSize() is the original size of allocated memory block.
    5054 - m_UsableSize is this size aligned down to a power of two.
    5055  All allocations and calculations happen relative to m_UsableSize.
    5056 - GetUnusableSize() is the difference between them.
    5057  It is repoted as separate, unused range, not available for allocations.
    5058 
    5059 Node at level 0 has size = m_UsableSize.
    5060 Each next level contains nodes with size 2 times smaller than current level.
    5061 m_LevelCount is the maximum number of levels to use in the current object.
    5062 */
    5063 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5064 {
    5065  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5066 public:
    5067  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5068  virtual ~VmaBlockMetadata_Buddy();
    5069  virtual void Init(VkDeviceSize size);
    5070 
    5071  virtual bool Validate() const;
    5072  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5073  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5074  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5075  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5076 
    5077  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5078  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5079 
    5080 #if VMA_STATS_STRING_ENABLED
    5081  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5082 #endif
    5083 
    5084  virtual bool CreateAllocationRequest(
    5085  uint32_t currentFrameIndex,
    5086  uint32_t frameInUseCount,
    5087  VkDeviceSize bufferImageGranularity,
    5088  VkDeviceSize allocSize,
    5089  VkDeviceSize allocAlignment,
    5090  bool upperAddress,
    5091  VmaSuballocationType allocType,
    5092  bool canMakeOtherLost,
    5093  uint32_t strategy,
    5094  VmaAllocationRequest* pAllocationRequest);
    5095 
    5096  virtual bool MakeRequestedAllocationsLost(
    5097  uint32_t currentFrameIndex,
    5098  uint32_t frameInUseCount,
    5099  VmaAllocationRequest* pAllocationRequest);
    5100 
    5101  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5102 
    5103  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5104 
    5105  virtual void Alloc(
    5106  const VmaAllocationRequest& request,
    5107  VmaSuballocationType type,
    5108  VkDeviceSize allocSize,
    5109  bool upperAddress,
    5110  VmaAllocation hAllocation);
    5111 
    5112  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5113  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5114 
    5115 private:
    5116  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5117  static const size_t MAX_LEVELS = 30;
    5118 
    5119  struct ValidationContext
    5120  {
    5121  size_t calculatedAllocationCount;
    5122  size_t calculatedFreeCount;
    5123  VkDeviceSize calculatedSumFreeSize;
    5124 
    5125  ValidationContext() :
    5126  calculatedAllocationCount(0),
    5127  calculatedFreeCount(0),
    5128  calculatedSumFreeSize(0) { }
    5129  };
    5130 
    5131  struct Node
    5132  {
    5133  VkDeviceSize offset;
    5134  enum TYPE
    5135  {
    5136  TYPE_FREE,
    5137  TYPE_ALLOCATION,
    5138  TYPE_SPLIT,
    5139  TYPE_COUNT
    5140  } type;
    5141  Node* parent;
    5142  Node* buddy;
    5143 
    5144  union
    5145  {
    5146  struct
    5147  {
    5148  Node* prev;
    5149  Node* next;
    5150  } free;
    5151  struct
    5152  {
    5153  VmaAllocation alloc;
    5154  } allocation;
    5155  struct
    5156  {
    5157  Node* leftChild;
    5158  } split;
    5159  };
    5160  };
    5161 
    5162  // Size of the memory block aligned down to a power of two.
    5163  VkDeviceSize m_UsableSize;
    5164  uint32_t m_LevelCount;
    5165 
    5166  Node* m_Root;
    5167  struct {
    5168  Node* front;
    5169  Node* back;
    5170  } m_FreeList[MAX_LEVELS];
    5171  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5172  size_t m_AllocationCount;
    5173  // Number of nodes in the tree with type == TYPE_FREE.
    5174  size_t m_FreeCount;
    5175  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5176  VkDeviceSize m_SumFreeSize;
    5177 
    5178  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5179  void DeleteNode(Node* node);
    5180  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5181  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5182  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5183  // Alloc passed just for validation. Can be null.
    5184  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5185  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5186  // Adds node to the front of FreeList at given level.
    5187  // node->type must be FREE.
    5188  // node->free.prev, next can be undefined.
    5189  void AddToFreeListFront(uint32_t level, Node* node);
    5190  // Removes node from FreeList at given level.
    5191  // node->type must be FREE.
    5192  // node->free.prev, next stay untouched.
    5193  void RemoveFromFreeList(uint32_t level, Node* node);
    5194 
    5195 #if VMA_STATS_STRING_ENABLED
    5196  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5197 #endif
    5198 };
    5199 
    5200 /*
    5201 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5202 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5203 
    5204 Thread-safety: This class must be externally synchronized.
    5205 */
    5206 class VmaDeviceMemoryBlock
    5207 {
    5208  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5209 public:
    5210  VmaBlockMetadata* m_pMetadata;
    5211 
    5212  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5213 
    5214  ~VmaDeviceMemoryBlock()
    5215  {
    5216  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5217  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5218  }
    5219 
    5220  // Always call after construction.
    5221  void Init(
    5222  VmaAllocator hAllocator,
    5223  uint32_t newMemoryTypeIndex,
    5224  VkDeviceMemory newMemory,
    5225  VkDeviceSize newSize,
    5226  uint32_t id,
    5227  uint32_t algorithm);
    5228  // Always call before destruction.
    5229  void Destroy(VmaAllocator allocator);
    5230 
    5231  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5232  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5233  uint32_t GetId() const { return m_Id; }
    5234  void* GetMappedData() const { return m_pMappedData; }
    5235 
    5236  // Validates all data structures inside this object. If not valid, returns false.
    5237  bool Validate() const;
    5238 
    5239  VkResult CheckCorruption(VmaAllocator hAllocator);
    5240 
    5241  // ppData can be null.
    5242  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5243  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5244 
    5245  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5246  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5247 
    5248  VkResult BindBufferMemory(
    5249  const VmaAllocator hAllocator,
    5250  const VmaAllocation hAllocation,
    5251  VkBuffer hBuffer);
    5252  VkResult BindImageMemory(
    5253  const VmaAllocator hAllocator,
    5254  const VmaAllocation hAllocation,
    5255  VkImage hImage);
    5256 
    5257 private:
    5258  uint32_t m_MemoryTypeIndex;
    5259  uint32_t m_Id;
    5260  VkDeviceMemory m_hMemory;
    5261 
    5262  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5263  // Also protects m_MapCount, m_pMappedData.
    5264  VMA_MUTEX m_Mutex;
    5265  uint32_t m_MapCount;
    5266  void* m_pMappedData;
    5267 };
    5268 
    5269 struct VmaPointerLess
    5270 {
    5271  bool operator()(const void* lhs, const void* rhs) const
    5272  {
    5273  return lhs < rhs;
    5274  }
    5275 };
    5276 
    5277 class VmaDefragmentator;
    5278 
    5279 /*
    5280 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5281 Vulkan memory type.
    5282 
    5283 Synchronized internally with a mutex.
    5284 */
    5285 struct VmaBlockVector
    5286 {
    5287  VMA_CLASS_NO_COPY(VmaBlockVector)
    5288 public:
    5289  VmaBlockVector(
    5290  VmaAllocator hAllocator,
    5291  uint32_t memoryTypeIndex,
    5292  VkDeviceSize preferredBlockSize,
    5293  size_t minBlockCount,
    5294  size_t maxBlockCount,
    5295  VkDeviceSize bufferImageGranularity,
    5296  uint32_t frameInUseCount,
    5297  bool isCustomPool,
    5298  bool explicitBlockSize,
    5299  uint32_t algorithm);
    5300  ~VmaBlockVector();
    5301 
    5302  VkResult CreateMinBlocks();
    5303 
    5304  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5305  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5306  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5307  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5308  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5309 
    5310  void GetPoolStats(VmaPoolStats* pStats);
    5311 
    5312  bool IsEmpty() const { return m_Blocks.empty(); }
    5313  bool IsCorruptionDetectionEnabled() const;
    5314 
    5315  VkResult Allocate(
    5316  VmaPool hCurrentPool,
    5317  uint32_t currentFrameIndex,
    5318  VkDeviceSize size,
    5319  VkDeviceSize alignment,
    5320  const VmaAllocationCreateInfo& createInfo,
    5321  VmaSuballocationType suballocType,
    5322  VmaAllocation* pAllocation);
    5323 
    5324  void Free(
    5325  VmaAllocation hAllocation);
    5326 
    5327  // Adds statistics of this BlockVector to pStats.
    5328  void AddStats(VmaStats* pStats);
    5329 
    5330 #if VMA_STATS_STRING_ENABLED
    5331  void PrintDetailedMap(class VmaJsonWriter& json);
    5332 #endif
    5333 
    5334  void MakePoolAllocationsLost(
    5335  uint32_t currentFrameIndex,
    5336  size_t* pLostAllocationCount);
    5337  VkResult CheckCorruption();
    5338 
    5339  VmaDefragmentator* EnsureDefragmentator(
    5340  VmaAllocator hAllocator,
    5341  uint32_t currentFrameIndex);
    5342 
    5343  VkResult Defragment(
    5344  VmaDefragmentationStats* pDefragmentationStats,
    5345  VkDeviceSize& maxBytesToMove,
    5346  uint32_t& maxAllocationsToMove);
    5347 
    5348  void DestroyDefragmentator();
    5349 
    5350 private:
    5351  friend class VmaDefragmentator;
    5352 
    5353  const VmaAllocator m_hAllocator;
    5354  const uint32_t m_MemoryTypeIndex;
    5355  const VkDeviceSize m_PreferredBlockSize;
    5356  const size_t m_MinBlockCount;
    5357  const size_t m_MaxBlockCount;
    5358  const VkDeviceSize m_BufferImageGranularity;
    5359  const uint32_t m_FrameInUseCount;
    5360  const bool m_IsCustomPool;
    5361  const bool m_ExplicitBlockSize;
    5362  const uint32_t m_Algorithm;
    5363  bool m_HasEmptyBlock;
    5364  VMA_MUTEX m_Mutex;
    5365  // Incrementally sorted by sumFreeSize, ascending.
    5366  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5367  /* There can be at most one allocation that is completely empty - a
    5368  hysteresis to avoid pessimistic case of alternating creation and destruction
    5369  of a VkDeviceMemory. */
    5370  VmaDefragmentator* m_pDefragmentator;
    5371  uint32_t m_NextBlockId;
    5372 
    5373  VkDeviceSize CalcMaxBlockSize() const;
    5374 
    5375  // Finds and removes given block from vector.
    5376  void Remove(VmaDeviceMemoryBlock* pBlock);
    5377 
    5378  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5379  // after this call.
    5380  void IncrementallySortBlocks();
    5381 
    5382  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5383  VkResult AllocateFromBlock(
    5384  VmaDeviceMemoryBlock* pBlock,
    5385  VmaPool hCurrentPool,
    5386  uint32_t currentFrameIndex,
    5387  VkDeviceSize size,
    5388  VkDeviceSize alignment,
    5389  VmaAllocationCreateFlags allocFlags,
    5390  void* pUserData,
    5391  VmaSuballocationType suballocType,
    5392  uint32_t strategy,
    5393  VmaAllocation* pAllocation);
    5394 
    5395  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5396 };
    5397 
    5398 struct VmaPool_T
    5399 {
    5400  VMA_CLASS_NO_COPY(VmaPool_T)
    5401 public:
    5402  VmaBlockVector m_BlockVector;
    5403 
    5404  VmaPool_T(
    5405  VmaAllocator hAllocator,
    5406  const VmaPoolCreateInfo& createInfo,
    5407  VkDeviceSize preferredBlockSize);
    5408  ~VmaPool_T();
    5409 
    5410  uint32_t GetId() const { return m_Id; }
    5411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5412 
    5413 #if VMA_STATS_STRING_ENABLED
    5414  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5415 #endif
    5416 
    5417 private:
    5418  uint32_t m_Id;
    5419 };
    5420 
    5421 class VmaDefragmentator
    5422 {
    5423  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5424 private:
    5425  const VmaAllocator m_hAllocator;
    5426  VmaBlockVector* const m_pBlockVector;
    5427  uint32_t m_CurrentFrameIndex;
    5428  VkDeviceSize m_BytesMoved;
    5429  uint32_t m_AllocationsMoved;
    5430 
    5431  struct AllocationInfo
    5432  {
    5433  VmaAllocation m_hAllocation;
    5434  VkBool32* m_pChanged;
    5435 
    5436  AllocationInfo() :
    5437  m_hAllocation(VK_NULL_HANDLE),
    5438  m_pChanged(VMA_NULL)
    5439  {
    5440  }
    5441  };
    5442 
    5443  struct AllocationInfoSizeGreater
    5444  {
    5445  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5446  {
    5447  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5448  }
    5449  };
    5450 
    5451  // Used between AddAllocation and Defragment.
    5452  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5453 
    5454  struct BlockInfo
    5455  {
    5456  VmaDeviceMemoryBlock* m_pBlock;
    5457  bool m_HasNonMovableAllocations;
    5458  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5459 
    5460  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5461  m_pBlock(VMA_NULL),
    5462  m_HasNonMovableAllocations(true),
    5463  m_Allocations(pAllocationCallbacks),
    5464  m_pMappedDataForDefragmentation(VMA_NULL)
    5465  {
    5466  }
    5467 
    5468  void CalcHasNonMovableAllocations()
    5469  {
    5470  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5471  const size_t defragmentAllocCount = m_Allocations.size();
    5472  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5473  }
    5474 
    5475  void SortAllocationsBySizeDescecnding()
    5476  {
    5477  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5478  }
    5479 
    5480  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5481  void Unmap(VmaAllocator hAllocator);
    5482 
    5483  private:
    5484  // Not null if mapped for defragmentation only, not originally mapped.
    5485  void* m_pMappedDataForDefragmentation;
    5486  };
    5487 
    5488  struct BlockPointerLess
    5489  {
    5490  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5491  {
    5492  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5493  }
    5494  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5495  {
    5496  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5497  }
    5498  };
    5499 
    5500  // 1. Blocks with some non-movable allocations go first.
    5501  // 2. Blocks with smaller sumFreeSize go first.
    5502  struct BlockInfoCompareMoveDestination
    5503  {
    5504  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5505  {
    5506  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5507  {
    5508  return true;
    5509  }
    5510  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5511  {
    5512  return false;
    5513  }
    5514  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5515  {
    5516  return true;
    5517  }
    5518  return false;
    5519  }
    5520  };
    5521 
    5522  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5523  BlockInfoVector m_Blocks;
    5524 
    5525  VkResult DefragmentRound(
    5526  VkDeviceSize maxBytesToMove,
    5527  uint32_t maxAllocationsToMove);
    5528 
    5529  static bool MoveMakesSense(
    5530  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5531  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5532 
    5533 public:
    5534  VmaDefragmentator(
    5535  VmaAllocator hAllocator,
    5536  VmaBlockVector* pBlockVector,
    5537  uint32_t currentFrameIndex);
    5538 
    5539  ~VmaDefragmentator();
    5540 
    5541  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5542  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5543 
    5544  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5545 
    5546  VkResult Defragment(
    5547  VkDeviceSize maxBytesToMove,
    5548  uint32_t maxAllocationsToMove);
    5549 };
    5550 
    5551 #if VMA_RECORDING_ENABLED
    5552 
    5553 class VmaRecorder
    5554 {
    5555 public:
    5556  VmaRecorder();
    5557  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5558  void WriteConfiguration(
    5559  const VkPhysicalDeviceProperties& devProps,
    5560  const VkPhysicalDeviceMemoryProperties& memProps,
    5561  bool dedicatedAllocationExtensionEnabled);
    5562  ~VmaRecorder();
    5563 
    5564  void RecordCreateAllocator(uint32_t frameIndex);
    5565  void RecordDestroyAllocator(uint32_t frameIndex);
    5566  void RecordCreatePool(uint32_t frameIndex,
    5567  const VmaPoolCreateInfo& createInfo,
    5568  VmaPool pool);
    5569  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5570  void RecordAllocateMemory(uint32_t frameIndex,
    5571  const VkMemoryRequirements& vkMemReq,
    5572  const VmaAllocationCreateInfo& createInfo,
    5573  VmaAllocation allocation);
    5574  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5575  const VkMemoryRequirements& vkMemReq,
    5576  bool requiresDedicatedAllocation,
    5577  bool prefersDedicatedAllocation,
    5578  const VmaAllocationCreateInfo& createInfo,
    5579  VmaAllocation allocation);
    5580  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5581  const VkMemoryRequirements& vkMemReq,
    5582  bool requiresDedicatedAllocation,
    5583  bool prefersDedicatedAllocation,
    5584  const VmaAllocationCreateInfo& createInfo,
    5585  VmaAllocation allocation);
    5586  void RecordFreeMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordSetAllocationUserData(uint32_t frameIndex,
    5589  VmaAllocation allocation,
    5590  const void* pUserData);
    5591  void RecordCreateLostAllocation(uint32_t frameIndex,
    5592  VmaAllocation allocation);
    5593  void RecordMapMemory(uint32_t frameIndex,
    5594  VmaAllocation allocation);
    5595  void RecordUnmapMemory(uint32_t frameIndex,
    5596  VmaAllocation allocation);
    5597  void RecordFlushAllocation(uint32_t frameIndex,
    5598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5599  void RecordInvalidateAllocation(uint32_t frameIndex,
    5600  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5601  void RecordCreateBuffer(uint32_t frameIndex,
    5602  const VkBufferCreateInfo& bufCreateInfo,
    5603  const VmaAllocationCreateInfo& allocCreateInfo,
    5604  VmaAllocation allocation);
    5605  void RecordCreateImage(uint32_t frameIndex,
    5606  const VkImageCreateInfo& imageCreateInfo,
    5607  const VmaAllocationCreateInfo& allocCreateInfo,
    5608  VmaAllocation allocation);
    5609  void RecordDestroyBuffer(uint32_t frameIndex,
    5610  VmaAllocation allocation);
    5611  void RecordDestroyImage(uint32_t frameIndex,
    5612  VmaAllocation allocation);
    5613  void RecordTouchAllocation(uint32_t frameIndex,
    5614  VmaAllocation allocation);
    5615  void RecordGetAllocationInfo(uint32_t frameIndex,
    5616  VmaAllocation allocation);
    5617  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5618  VmaPool pool);
    5619 
    5620 private:
    5621  struct CallParams
    5622  {
    5623  uint32_t threadId;
    5624  double time;
    5625  };
    5626 
    5627  class UserDataString
    5628  {
    5629  public:
    5630  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5631  const char* GetString() const { return m_Str; }
    5632 
    5633  private:
    5634  char m_PtrStr[17];
    5635  const char* m_Str;
    5636  };
    5637 
    5638  bool m_UseMutex;
    5639  VmaRecordFlags m_Flags;
    5640  FILE* m_File;
    5641  VMA_MUTEX m_FileMutex;
    5642  int64_t m_Freq;
    5643  int64_t m_StartCounter;
    5644 
    5645  void GetBasicParams(CallParams& outParams);
    5646  void Flush();
    5647 };
    5648 
    5649 #endif // #if VMA_RECORDING_ENABLED
    5650 
    5651 // Main allocator object.
    5652 struct VmaAllocator_T
    5653 {
    5654  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5655 public:
    5656  bool m_UseMutex;
    5657  bool m_UseKhrDedicatedAllocation;
    5658  VkDevice m_hDevice;
    5659  bool m_AllocationCallbacksSpecified;
    5660  VkAllocationCallbacks m_AllocationCallbacks;
    5661  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5662 
    5663  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5664  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5665  VMA_MUTEX m_HeapSizeLimitMutex;
    5666 
    5667  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5668  VkPhysicalDeviceMemoryProperties m_MemProps;
    5669 
    5670  // Default pools.
    5671  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5672 
    5673  // Each vector is sorted by memory (handle value).
    5674  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5675  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5676  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5677 
    5678  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5679  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5680  ~VmaAllocator_T();
    5681 
    5682  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5683  {
    5684  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5685  }
    5686  const VmaVulkanFunctions& GetVulkanFunctions() const
    5687  {
    5688  return m_VulkanFunctions;
    5689  }
    5690 
    5691  VkDeviceSize GetBufferImageGranularity() const
    5692  {
    5693  return VMA_MAX(
    5694  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5695  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5696  }
    5697 
    5698  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5699  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5700 
    5701  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5702  {
    5703  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5704  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5705  }
    5706  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5707  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5708  {
    5709  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5710  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5711  }
    5712  // Minimum alignment for all allocations in specific memory type.
    5713  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5714  {
    5715  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5716  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5717  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5718  }
    5719 
    5720  bool IsIntegratedGpu() const
    5721  {
    5722  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5723  }
    5724 
    5725 #if VMA_RECORDING_ENABLED
    5726  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5727 #endif
    5728 
    5729  void GetBufferMemoryRequirements(
    5730  VkBuffer hBuffer,
    5731  VkMemoryRequirements& memReq,
    5732  bool& requiresDedicatedAllocation,
    5733  bool& prefersDedicatedAllocation) const;
    5734  void GetImageMemoryRequirements(
    5735  VkImage hImage,
    5736  VkMemoryRequirements& memReq,
    5737  bool& requiresDedicatedAllocation,
    5738  bool& prefersDedicatedAllocation) const;
    5739 
    5740  // Main allocation function.
    5741  VkResult AllocateMemory(
    5742  const VkMemoryRequirements& vkMemReq,
    5743  bool requiresDedicatedAllocation,
    5744  bool prefersDedicatedAllocation,
    5745  VkBuffer dedicatedBuffer,
    5746  VkImage dedicatedImage,
    5747  const VmaAllocationCreateInfo& createInfo,
    5748  VmaSuballocationType suballocType,
    5749  VmaAllocation* pAllocation);
    5750 
    5751  // Main deallocation function.
    5752  void FreeMemory(const VmaAllocation allocation);
    5753 
    5754  void CalculateStats(VmaStats* pStats);
    5755 
    5756 #if VMA_STATS_STRING_ENABLED
    5757  void PrintDetailedMap(class VmaJsonWriter& json);
    5758 #endif
    5759 
    5760  VkResult Defragment(
    5761  VmaAllocation* pAllocations,
    5762  size_t allocationCount,
    5763  VkBool32* pAllocationsChanged,
    5764  const VmaDefragmentationInfo* pDefragmentationInfo,
    5765  VmaDefragmentationStats* pDefragmentationStats);
    5766 
    5767  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5768  bool TouchAllocation(VmaAllocation hAllocation);
    5769 
    5770  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5771  void DestroyPool(VmaPool pool);
    5772  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5773 
    5774  void SetCurrentFrameIndex(uint32_t frameIndex);
    5775  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5776 
    5777  void MakePoolAllocationsLost(
    5778  VmaPool hPool,
    5779  size_t* pLostAllocationCount);
    5780  VkResult CheckPoolCorruption(VmaPool hPool);
    5781  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5782 
    5783  void CreateLostAllocation(VmaAllocation* pAllocation);
    5784 
    5785  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5786  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5787 
    5788  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5789  void Unmap(VmaAllocation hAllocation);
    5790 
    5791  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5792  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5793 
    5794  void FlushOrInvalidateAllocation(
    5795  VmaAllocation hAllocation,
    5796  VkDeviceSize offset, VkDeviceSize size,
    5797  VMA_CACHE_OPERATION op);
    5798 
    5799  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5800 
    5801 private:
    5802  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5803 
    5804  VkPhysicalDevice m_PhysicalDevice;
    5805  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5806 
    5807  VMA_MUTEX m_PoolsMutex;
    5808  // Protected by m_PoolsMutex. Sorted by pointer value.
    5809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5810  uint32_t m_NextPoolId;
    5811 
    5812  VmaVulkanFunctions m_VulkanFunctions;
    5813 
    5814 #if VMA_RECORDING_ENABLED
    5815  VmaRecorder* m_pRecorder;
    5816 #endif
    5817 
    5818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5819 
    5820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5821 
    5822  VkResult AllocateMemoryOfType(
    5823  VkDeviceSize size,
    5824  VkDeviceSize alignment,
    5825  bool dedicatedAllocation,
    5826  VkBuffer dedicatedBuffer,
    5827  VkImage dedicatedImage,
    5828  const VmaAllocationCreateInfo& createInfo,
    5829  uint32_t memTypeIndex,
    5830  VmaSuballocationType suballocType,
    5831  VmaAllocation* pAllocation);
    5832 
    5833  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5834  VkResult AllocateDedicatedMemory(
    5835  VkDeviceSize size,
    5836  VmaSuballocationType suballocType,
    5837  uint32_t memTypeIndex,
    5838  bool map,
    5839  bool isUserDataString,
    5840  void* pUserData,
    5841  VkBuffer dedicatedBuffer,
    5842  VkImage dedicatedImage,
    5843  VmaAllocation* pAllocation);
    5844 
    5845  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5846  void FreeDedicatedMemory(VmaAllocation allocation);
    5847 };
    5848 
    5850 // Memory allocation #2 after VmaAllocator_T definition
    5851 
    5852 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5853 {
    5854  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5855 }
    5856 
    5857 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5858 {
    5859  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5860 }
    5861 
    5862 template<typename T>
    5863 static T* VmaAllocate(VmaAllocator hAllocator)
    5864 {
    5865  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5866 }
    5867 
    5868 template<typename T>
    5869 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5870 {
    5871  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5872 }
    5873 
    5874 template<typename T>
    5875 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5876 {
    5877  if(ptr != VMA_NULL)
    5878  {
    5879  ptr->~T();
    5880  VmaFree(hAllocator, ptr);
    5881  }
    5882 }
    5883 
    5884 template<typename T>
    5885 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5886 {
    5887  if(ptr != VMA_NULL)
    5888  {
    5889  for(size_t i = count; i--; )
    5890  ptr[i].~T();
    5891  VmaFree(hAllocator, ptr);
    5892  }
    5893 }
    5894 
    5896 // VmaStringBuilder
    5897 
    5898 #if VMA_STATS_STRING_ENABLED
    5899 
    5900 class VmaStringBuilder
    5901 {
    5902 public:
    5903  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5904  size_t GetLength() const { return m_Data.size(); }
    5905  const char* GetData() const { return m_Data.data(); }
    5906 
    5907  void Add(char ch) { m_Data.push_back(ch); }
    5908  void Add(const char* pStr);
    5909  void AddNewLine() { Add('\n'); }
    5910  void AddNumber(uint32_t num);
    5911  void AddNumber(uint64_t num);
    5912  void AddPointer(const void* ptr);
    5913 
    5914 private:
    5915  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5916 };
    5917 
    5918 void VmaStringBuilder::Add(const char* pStr)
    5919 {
    5920  const size_t strLen = strlen(pStr);
    5921  if(strLen > 0)
    5922  {
    5923  const size_t oldCount = m_Data.size();
    5924  m_Data.resize(oldCount + strLen);
    5925  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5926  }
    5927 }
    5928 
    5929 void VmaStringBuilder::AddNumber(uint32_t num)
    5930 {
    5931  char buf[11];
    5932  VmaUint32ToStr(buf, sizeof(buf), num);
    5933  Add(buf);
    5934 }
    5935 
    5936 void VmaStringBuilder::AddNumber(uint64_t num)
    5937 {
    5938  char buf[21];
    5939  VmaUint64ToStr(buf, sizeof(buf), num);
    5940  Add(buf);
    5941 }
    5942 
    5943 void VmaStringBuilder::AddPointer(const void* ptr)
    5944 {
    5945  char buf[21];
    5946  VmaPtrToStr(buf, sizeof(buf), ptr);
    5947  Add(buf);
    5948 }
    5949 
    5950 #endif // #if VMA_STATS_STRING_ENABLED
    5951 
    5953 // VmaJsonWriter
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaJsonWriter
    5958 {
    5959  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5960 public:
    5961  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5962  ~VmaJsonWriter();
    5963 
    5964  void BeginObject(bool singleLine = false);
    5965  void EndObject();
    5966 
    5967  void BeginArray(bool singleLine = false);
    5968  void EndArray();
    5969 
    5970  void WriteString(const char* pStr);
    5971  void BeginString(const char* pStr = VMA_NULL);
    5972  void ContinueString(const char* pStr);
    5973  void ContinueString(uint32_t n);
    5974  void ContinueString(uint64_t n);
    5975  void ContinueString_Pointer(const void* ptr);
    5976  void EndString(const char* pStr = VMA_NULL);
    5977 
    5978  void WriteNumber(uint32_t n);
    5979  void WriteNumber(uint64_t n);
    5980  void WriteBool(bool b);
    5981  void WriteNull();
    5982 
    5983 private:
    5984  static const char* const INDENT;
    5985 
    5986  enum COLLECTION_TYPE
    5987  {
    5988  COLLECTION_TYPE_OBJECT,
    5989  COLLECTION_TYPE_ARRAY,
    5990  };
    5991  struct StackItem
    5992  {
    5993  COLLECTION_TYPE type;
    5994  uint32_t valueCount;
    5995  bool singleLineMode;
    5996  };
    5997 
    5998  VmaStringBuilder& m_SB;
    5999  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6000  bool m_InsideString;
    6001 
    6002  void BeginValue(bool isString);
    6003  void WriteIndent(bool oneLess = false);
    6004 };
    6005 
    6006 const char* const VmaJsonWriter::INDENT = " ";
    6007 
    6008 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6009  m_SB(sb),
    6010  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6011  m_InsideString(false)
    6012 {
    6013 }
    6014 
    6015 VmaJsonWriter::~VmaJsonWriter()
    6016 {
    6017  VMA_ASSERT(!m_InsideString);
    6018  VMA_ASSERT(m_Stack.empty());
    6019 }
    6020 
    6021 void VmaJsonWriter::BeginObject(bool singleLine)
    6022 {
    6023  VMA_ASSERT(!m_InsideString);
    6024 
    6025  BeginValue(false);
    6026  m_SB.Add('{');
    6027 
    6028  StackItem item;
    6029  item.type = COLLECTION_TYPE_OBJECT;
    6030  item.valueCount = 0;
    6031  item.singleLineMode = singleLine;
    6032  m_Stack.push_back(item);
    6033 }
    6034 
    6035 void VmaJsonWriter::EndObject()
    6036 {
    6037  VMA_ASSERT(!m_InsideString);
    6038 
    6039  WriteIndent(true);
    6040  m_SB.Add('}');
    6041 
    6042  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6043  m_Stack.pop_back();
    6044 }
    6045 
    6046 void VmaJsonWriter::BeginArray(bool singleLine)
    6047 {
    6048  VMA_ASSERT(!m_InsideString);
    6049 
    6050  BeginValue(false);
    6051  m_SB.Add('[');
    6052 
    6053  StackItem item;
    6054  item.type = COLLECTION_TYPE_ARRAY;
    6055  item.valueCount = 0;
    6056  item.singleLineMode = singleLine;
    6057  m_Stack.push_back(item);
    6058 }
    6059 
    6060 void VmaJsonWriter::EndArray()
    6061 {
    6062  VMA_ASSERT(!m_InsideString);
    6063 
    6064  WriteIndent(true);
    6065  m_SB.Add(']');
    6066 
    6067  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6068  m_Stack.pop_back();
    6069 }
    6070 
    6071 void VmaJsonWriter::WriteString(const char* pStr)
    6072 {
    6073  BeginString(pStr);
    6074  EndString();
    6075 }
    6076 
    6077 void VmaJsonWriter::BeginString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(!m_InsideString);
    6080 
    6081  BeginValue(true);
    6082  m_SB.Add('"');
    6083  m_InsideString = true;
    6084  if(pStr != VMA_NULL && pStr[0] != '\0')
    6085  {
    6086  ContinueString(pStr);
    6087  }
    6088 }
    6089 
    6090 void VmaJsonWriter::ContinueString(const char* pStr)
    6091 {
    6092  VMA_ASSERT(m_InsideString);
    6093 
    6094  const size_t strLen = strlen(pStr);
    6095  for(size_t i = 0; i < strLen; ++i)
    6096  {
    6097  char ch = pStr[i];
    6098  if(ch == '\\')
    6099  {
    6100  m_SB.Add("\\\\");
    6101  }
    6102  else if(ch == '"')
    6103  {
    6104  m_SB.Add("\\\"");
    6105  }
    6106  else if(ch >= 32)
    6107  {
    6108  m_SB.Add(ch);
    6109  }
    6110  else switch(ch)
    6111  {
    6112  case '\b':
    6113  m_SB.Add("\\b");
    6114  break;
    6115  case '\f':
    6116  m_SB.Add("\\f");
    6117  break;
    6118  case '\n':
    6119  m_SB.Add("\\n");
    6120  break;
    6121  case '\r':
    6122  m_SB.Add("\\r");
    6123  break;
    6124  case '\t':
    6125  m_SB.Add("\\t");
    6126  break;
    6127  default:
    6128  VMA_ASSERT(0 && "Character not currently supported.");
    6129  break;
    6130  }
    6131  }
    6132 }
    6133 
    6134 void VmaJsonWriter::ContinueString(uint32_t n)
    6135 {
    6136  VMA_ASSERT(m_InsideString);
    6137  m_SB.AddNumber(n);
    6138 }
    6139 
    6140 void VmaJsonWriter::ContinueString(uint64_t n)
    6141 {
    6142  VMA_ASSERT(m_InsideString);
    6143  m_SB.AddNumber(n);
    6144 }
    6145 
    6146 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6147 {
    6148  VMA_ASSERT(m_InsideString);
    6149  m_SB.AddPointer(ptr);
    6150 }
    6151 
    6152 void VmaJsonWriter::EndString(const char* pStr)
    6153 {
    6154  VMA_ASSERT(m_InsideString);
    6155  if(pStr != VMA_NULL && pStr[0] != '\0')
    6156  {
    6157  ContinueString(pStr);
    6158  }
    6159  m_SB.Add('"');
    6160  m_InsideString = false;
    6161 }
    6162 
    6163 void VmaJsonWriter::WriteNumber(uint32_t n)
    6164 {
    6165  VMA_ASSERT(!m_InsideString);
    6166  BeginValue(false);
    6167  m_SB.AddNumber(n);
    6168 }
    6169 
    6170 void VmaJsonWriter::WriteNumber(uint64_t n)
    6171 {
    6172  VMA_ASSERT(!m_InsideString);
    6173  BeginValue(false);
    6174  m_SB.AddNumber(n);
    6175 }
    6176 
    6177 void VmaJsonWriter::WriteBool(bool b)
    6178 {
    6179  VMA_ASSERT(!m_InsideString);
    6180  BeginValue(false);
    6181  m_SB.Add(b ? "true" : "false");
    6182 }
    6183 
    6184 void VmaJsonWriter::WriteNull()
    6185 {
    6186  VMA_ASSERT(!m_InsideString);
    6187  BeginValue(false);
    6188  m_SB.Add("null");
    6189 }
    6190 
    6191 void VmaJsonWriter::BeginValue(bool isString)
    6192 {
    6193  if(!m_Stack.empty())
    6194  {
    6195  StackItem& currItem = m_Stack.back();
    6196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6197  currItem.valueCount % 2 == 0)
    6198  {
    6199  VMA_ASSERT(isString);
    6200  }
    6201 
    6202  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6203  currItem.valueCount % 2 != 0)
    6204  {
    6205  m_SB.Add(": ");
    6206  }
    6207  else if(currItem.valueCount > 0)
    6208  {
    6209  m_SB.Add(", ");
    6210  WriteIndent();
    6211  }
    6212  else
    6213  {
    6214  WriteIndent();
    6215  }
    6216  ++currItem.valueCount;
    6217  }
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteIndent(bool oneLess)
    6221 {
    6222  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6223  {
    6224  m_SB.AddNewLine();
    6225 
    6226  size_t count = m_Stack.size();
    6227  if(count > 0 && oneLess)
    6228  {
    6229  --count;
    6230  }
    6231  for(size_t i = 0; i < count; ++i)
    6232  {
    6233  m_SB.Add(INDENT);
    6234  }
    6235  }
    6236 }
    6237 
    6238 #endif // #if VMA_STATS_STRING_ENABLED
    6239 
    6241 
    6242 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6243 {
    6244  if(IsUserDataString())
    6245  {
    6246  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6247 
    6248  FreeUserDataString(hAllocator);
    6249 
    6250  if(pUserData != VMA_NULL)
    6251  {
    6252  const char* const newStrSrc = (char*)pUserData;
    6253  const size_t newStrLen = strlen(newStrSrc);
    6254  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6255  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6256  m_pUserData = newStrDst;
    6257  }
    6258  }
    6259  else
    6260  {
    6261  m_pUserData = pUserData;
    6262  }
    6263 }
    6264 
    6265 void VmaAllocation_T::ChangeBlockAllocation(
    6266  VmaAllocator hAllocator,
    6267  VmaDeviceMemoryBlock* block,
    6268  VkDeviceSize offset)
    6269 {
    6270  VMA_ASSERT(block != VMA_NULL);
    6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6272 
    6273  // Move mapping reference counter from old block to new block.
    6274  if(block != m_BlockAllocation.m_Block)
    6275  {
    6276  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6277  if(IsPersistentMap())
    6278  ++mapRefCount;
    6279  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6280  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6281  }
    6282 
    6283  m_BlockAllocation.m_Block = block;
    6284  m_BlockAllocation.m_Offset = offset;
    6285 }
    6286 
    6287 VkDeviceSize VmaAllocation_T::GetOffset() const
    6288 {
    6289  switch(m_Type)
    6290  {
    6291  case ALLOCATION_TYPE_BLOCK:
    6292  return m_BlockAllocation.m_Offset;
    6293  case ALLOCATION_TYPE_DEDICATED:
    6294  return 0;
    6295  default:
    6296  VMA_ASSERT(0);
    6297  return 0;
    6298  }
    6299 }
    6300 
    6301 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6302 {
    6303  switch(m_Type)
    6304  {
    6305  case ALLOCATION_TYPE_BLOCK:
    6306  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6307  case ALLOCATION_TYPE_DEDICATED:
    6308  return m_DedicatedAllocation.m_hMemory;
    6309  default:
    6310  VMA_ASSERT(0);
    6311  return VK_NULL_HANDLE;
    6312  }
    6313 }
    6314 
    6315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6316 {
    6317  switch(m_Type)
    6318  {
    6319  case ALLOCATION_TYPE_BLOCK:
    6320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6321  case ALLOCATION_TYPE_DEDICATED:
    6322  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6323  default:
    6324  VMA_ASSERT(0);
    6325  return UINT32_MAX;
    6326  }
    6327 }
    6328 
    6329 void* VmaAllocation_T::GetMappedData() const
    6330 {
    6331  switch(m_Type)
    6332  {
    6333  case ALLOCATION_TYPE_BLOCK:
    6334  if(m_MapCount != 0)
    6335  {
    6336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6337  VMA_ASSERT(pBlockData != VMA_NULL);
    6338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6339  }
    6340  else
    6341  {
    6342  return VMA_NULL;
    6343  }
    6344  break;
    6345  case ALLOCATION_TYPE_DEDICATED:
    6346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6347  return m_DedicatedAllocation.m_pMappedData;
    6348  default:
    6349  VMA_ASSERT(0);
    6350  return VMA_NULL;
    6351  }
    6352 }
    6353 
    6354 bool VmaAllocation_T::CanBecomeLost() const
    6355 {
    6356  switch(m_Type)
    6357  {
    6358  case ALLOCATION_TYPE_BLOCK:
    6359  return m_BlockAllocation.m_CanBecomeLost;
    6360  case ALLOCATION_TYPE_DEDICATED:
    6361  return false;
    6362  default:
    6363  VMA_ASSERT(0);
    6364  return false;
    6365  }
    6366 }
    6367 
    6368 VmaPool VmaAllocation_T::GetPool() const
    6369 {
    6370  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6371  return m_BlockAllocation.m_hPool;
    6372 }
    6373 
    6374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6375 {
    6376  VMA_ASSERT(CanBecomeLost());
    6377 
    6378  /*
    6379  Warning: This is a carefully designed algorithm.
    6380  Do not modify unless you really know what you're doing :)
    6381  */
    6382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6383  for(;;)
    6384  {
    6385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6386  {
    6387  VMA_ASSERT(0);
    6388  return false;
    6389  }
    6390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6391  {
    6392  return false;
    6393  }
    6394  else // Last use time earlier than current time.
    6395  {
    6396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6397  {
    6398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6400  return true;
    6401  }
    6402  }
    6403  }
    6404 }
    6405 
    6406 #if VMA_STATS_STRING_ENABLED
    6407 
    6408 // Correspond to values of enum VmaSuballocationType.
    6409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6410  "FREE",
    6411  "UNKNOWN",
    6412  "BUFFER",
    6413  "IMAGE_UNKNOWN",
    6414  "IMAGE_LINEAR",
    6415  "IMAGE_OPTIMAL",
    6416 };
    6417 
    6418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6419 {
    6420  json.WriteString("Type");
    6421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6422 
    6423  json.WriteString("Size");
    6424  json.WriteNumber(m_Size);
    6425 
    6426  if(m_pUserData != VMA_NULL)
    6427  {
    6428  json.WriteString("UserData");
    6429  if(IsUserDataString())
    6430  {
    6431  json.WriteString((const char*)m_pUserData);
    6432  }
    6433  else
    6434  {
    6435  json.BeginString();
    6436  json.ContinueString_Pointer(m_pUserData);
    6437  json.EndString();
    6438  }
    6439  }
    6440 
    6441  json.WriteString("CreationFrameIndex");
    6442  json.WriteNumber(m_CreationFrameIndex);
    6443 
    6444  json.WriteString("LastUseFrameIndex");
    6445  json.WriteNumber(GetLastUseFrameIndex());
    6446 
    6447  if(m_BufferImageUsage != 0)
    6448  {
    6449  json.WriteString("Usage");
    6450  json.WriteNumber(m_BufferImageUsage);
    6451  }
    6452 }
    6453 
    6454 #endif
    6455 
    6456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6457 {
    6458  VMA_ASSERT(IsUserDataString());
    6459  if(m_pUserData != VMA_NULL)
    6460  {
    6461  char* const oldStr = (char*)m_pUserData;
    6462  const size_t oldStrLen = strlen(oldStr);
    6463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6464  m_pUserData = VMA_NULL;
    6465  }
    6466 }
    6467 
    6468 void VmaAllocation_T::BlockAllocMap()
    6469 {
    6470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6471 
    6472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6473  {
    6474  ++m_MapCount;
    6475  }
    6476  else
    6477  {
    6478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6479  }
    6480 }
    6481 
    6482 void VmaAllocation_T::BlockAllocUnmap()
    6483 {
    6484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6485 
    6486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6487  {
    6488  --m_MapCount;
    6489  }
    6490  else
    6491  {
    6492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6493  }
    6494 }
    6495 
    6496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6497 {
    6498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6499 
    6500  if(m_MapCount != 0)
    6501  {
    6502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6503  {
    6504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6505  *ppData = m_DedicatedAllocation.m_pMappedData;
    6506  ++m_MapCount;
    6507  return VK_SUCCESS;
    6508  }
    6509  else
    6510  {
    6511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6512  return VK_ERROR_MEMORY_MAP_FAILED;
    6513  }
    6514  }
    6515  else
    6516  {
    6517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6518  hAllocator->m_hDevice,
    6519  m_DedicatedAllocation.m_hMemory,
    6520  0, // offset
    6521  VK_WHOLE_SIZE,
    6522  0, // flags
    6523  ppData);
    6524  if(result == VK_SUCCESS)
    6525  {
    6526  m_DedicatedAllocation.m_pMappedData = *ppData;
    6527  m_MapCount = 1;
    6528  }
    6529  return result;
    6530  }
    6531 }
    6532 
    6533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6534 {
    6535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6536 
    6537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6538  {
    6539  --m_MapCount;
    6540  if(m_MapCount == 0)
    6541  {
    6542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6544  hAllocator->m_hDevice,
    6545  m_DedicatedAllocation.m_hMemory);
    6546  }
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 #if VMA_STATS_STRING_ENABLED
    6555 
    6556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6557 {
    6558  json.BeginObject();
    6559 
    6560  json.WriteString("Blocks");
    6561  json.WriteNumber(stat.blockCount);
    6562 
    6563  json.WriteString("Allocations");
    6564  json.WriteNumber(stat.allocationCount);
    6565 
    6566  json.WriteString("UnusedRanges");
    6567  json.WriteNumber(stat.unusedRangeCount);
    6568 
    6569  json.WriteString("UsedBytes");
    6570  json.WriteNumber(stat.usedBytes);
    6571 
    6572  json.WriteString("UnusedBytes");
    6573  json.WriteNumber(stat.unusedBytes);
    6574 
    6575  if(stat.allocationCount > 1)
    6576  {
    6577  json.WriteString("AllocationSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.allocationSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.allocationSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.allocationSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  if(stat.unusedRangeCount > 1)
    6589  {
    6590  json.WriteString("UnusedRangeSize");
    6591  json.BeginObject(true);
    6592  json.WriteString("Min");
    6593  json.WriteNumber(stat.unusedRangeSizeMin);
    6594  json.WriteString("Avg");
    6595  json.WriteNumber(stat.unusedRangeSizeAvg);
    6596  json.WriteString("Max");
    6597  json.WriteNumber(stat.unusedRangeSizeMax);
    6598  json.EndObject();
    6599  }
    6600 
    6601  json.EndObject();
    6602 }
    6603 
    6604 #endif // #if VMA_STATS_STRING_ENABLED
    6605 
    6606 struct VmaSuballocationItemSizeLess
    6607 {
    6608  bool operator()(
    6609  const VmaSuballocationList::iterator lhs,
    6610  const VmaSuballocationList::iterator rhs) const
    6611  {
    6612  return lhs->size < rhs->size;
    6613  }
    6614  bool operator()(
    6615  const VmaSuballocationList::iterator lhs,
    6616  VkDeviceSize rhsSize) const
    6617  {
    6618  return lhs->size < rhsSize;
    6619  }
    6620 };
    6621 
    6622 
    6624 // class VmaBlockMetadata
    6625 
    6626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6627  m_Size(0),
    6628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6629 {
    6630 }
    6631 
    6632 #if VMA_STATS_STRING_ENABLED
    6633 
    6634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6635  VkDeviceSize unusedBytes,
    6636  size_t allocationCount,
    6637  size_t unusedRangeCount) const
    6638 {
    6639  json.BeginObject();
    6640 
    6641  json.WriteString("TotalBytes");
    6642  json.WriteNumber(GetSize());
    6643 
    6644  json.WriteString("UnusedBytes");
    6645  json.WriteNumber(unusedBytes);
    6646 
    6647  json.WriteString("Allocations");
    6648  json.WriteNumber((uint64_t)allocationCount);
    6649 
    6650  json.WriteString("UnusedRanges");
    6651  json.WriteNumber((uint64_t)unusedRangeCount);
    6652 
    6653  json.WriteString("Suballocations");
    6654  json.BeginArray();
    6655 }
    6656 
    6657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6658  VkDeviceSize offset,
    6659  VmaAllocation hAllocation) const
    6660 {
    6661  json.BeginObject(true);
    6662 
    6663  json.WriteString("Offset");
    6664  json.WriteNumber(offset);
    6665 
    6666  hAllocation->PrintParameters(json);
    6667 
    6668  json.EndObject();
    6669 }
    6670 
    6671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6672  VkDeviceSize offset,
    6673  VkDeviceSize size) const
    6674 {
    6675  json.BeginObject(true);
    6676 
    6677  json.WriteString("Offset");
    6678  json.WriteNumber(offset);
    6679 
    6680  json.WriteString("Type");
    6681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6682 
    6683  json.WriteString("Size");
    6684  json.WriteNumber(size);
    6685 
    6686  json.EndObject();
    6687 }
    6688 
    6689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6690 {
    6691  json.EndArray();
    6692  json.EndObject();
    6693 }
    6694 
    6695 #endif // #if VMA_STATS_STRING_ENABLED
    6696 
    6698 // class VmaBlockMetadata_Generic
    6699 
    6700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6701  VmaBlockMetadata(hAllocator),
    6702  m_FreeCount(0),
    6703  m_SumFreeSize(0),
    6704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6706 {
    6707 }
    6708 
    6709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6710 {
    6711 }
    6712 
    6713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6714 {
    6715  VmaBlockMetadata::Init(size);
    6716 
    6717  m_FreeCount = 1;
    6718  m_SumFreeSize = size;
    6719 
    6720  VmaSuballocation suballoc = {};
    6721  suballoc.offset = 0;
    6722  suballoc.size = size;
    6723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6724  suballoc.hAllocation = VK_NULL_HANDLE;
    6725 
    6726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6727  m_Suballocations.push_back(suballoc);
    6728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6729  --suballocItem;
    6730  m_FreeSuballocationsBySize.push_back(suballocItem);
    6731 }
    6732 
    6733 bool VmaBlockMetadata_Generic::Validate() const
    6734 {
    6735  VMA_VALIDATE(!m_Suballocations.empty());
    6736 
    6737  // Expected offset of new suballocation as calculated from previous ones.
    6738  VkDeviceSize calculatedOffset = 0;
    6739  // Expected number of free suballocations as calculated from traversing their list.
    6740  uint32_t calculatedFreeCount = 0;
    6741  // Expected sum size of free suballocations as calculated from traversing their list.
    6742  VkDeviceSize calculatedSumFreeSize = 0;
    6743  // Expected number of free suballocations that should be registered in
    6744  // m_FreeSuballocationsBySize calculated from traversing their list.
    6745  size_t freeSuballocationsToRegister = 0;
    6746  // True if previous visited suballocation was free.
    6747  bool prevFree = false;
    6748 
    6749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6750  suballocItem != m_Suballocations.cend();
    6751  ++suballocItem)
    6752  {
    6753  const VmaSuballocation& subAlloc = *suballocItem;
    6754 
    6755  // Actual offset of this suballocation doesn't match expected one.
    6756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6757 
    6758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6759  // Two adjacent free suballocations are invalid. They should be merged.
    6760  VMA_VALIDATE(!prevFree || !currFree);
    6761 
    6762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6763 
    6764  if(currFree)
    6765  {
    6766  calculatedSumFreeSize += subAlloc.size;
    6767  ++calculatedFreeCount;
    6768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6769  {
    6770  ++freeSuballocationsToRegister;
    6771  }
    6772 
    6773  // Margin required between allocations - every free space must be at least that large.
    6774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6775  }
    6776  else
    6777  {
    6778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6780 
    6781  // Margin required between allocations - previous allocation must be free.
    6782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6783  }
    6784 
    6785  calculatedOffset += subAlloc.size;
    6786  prevFree = currFree;
    6787  }
    6788 
    6789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6790  // match expected one.
    6791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6792 
    6793  VkDeviceSize lastSize = 0;
    6794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6795  {
    6796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6797 
    6798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6800  // They must be sorted by size ascending.
    6801  VMA_VALIDATE(suballocItem->size >= lastSize);
    6802 
    6803  lastSize = suballocItem->size;
    6804  }
    6805 
    6806  // Check if totals match calculacted values.
    6807  VMA_VALIDATE(ValidateFreeSuballocationList());
    6808  VMA_VALIDATE(calculatedOffset == GetSize());
    6809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6811 
    6812  return true;
    6813 }
    6814 
    6815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6816 {
    6817  if(!m_FreeSuballocationsBySize.empty())
    6818  {
    6819  return m_FreeSuballocationsBySize.back()->size;
    6820  }
    6821  else
    6822  {
    6823  return 0;
    6824  }
    6825 }
    6826 
    6827 bool VmaBlockMetadata_Generic::IsEmpty() const
    6828 {
    6829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6830 }
    6831 
    6832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6833 {
    6834  outInfo.blockCount = 1;
    6835 
    6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6837  outInfo.allocationCount = rangeCount - m_FreeCount;
    6838  outInfo.unusedRangeCount = m_FreeCount;
    6839 
    6840  outInfo.unusedBytes = m_SumFreeSize;
    6841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6842 
    6843  outInfo.allocationSizeMin = UINT64_MAX;
    6844  outInfo.allocationSizeMax = 0;
    6845  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6846  outInfo.unusedRangeSizeMax = 0;
    6847 
    6848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6849  suballocItem != m_Suballocations.cend();
    6850  ++suballocItem)
    6851  {
    6852  const VmaSuballocation& suballoc = *suballocItem;
    6853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6854  {
    6855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6857  }
    6858  else
    6859  {
    6860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6862  }
    6863  }
    6864 }
    6865 
    6866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6867 {
    6868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6869 
    6870  inoutStats.size += GetSize();
    6871  inoutStats.unusedSize += m_SumFreeSize;
    6872  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6873  inoutStats.unusedRangeCount += m_FreeCount;
    6874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6875 }
    6876 
    6877 #if VMA_STATS_STRING_ENABLED
    6878 
    6879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6880 {
    6881  PrintDetailedMap_Begin(json,
    6882  m_SumFreeSize, // unusedBytes
    6883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6884  m_FreeCount); // unusedRangeCount
    6885 
    6886  size_t i = 0;
    6887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6888  suballocItem != m_Suballocations.cend();
    6889  ++suballocItem, ++i)
    6890  {
    6891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6892  {
    6893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6894  }
    6895  else
    6896  {
    6897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6898  }
    6899  }
    6900 
    6901  PrintDetailedMap_End(json);
    6902 }
    6903 
    6904 #endif // #if VMA_STATS_STRING_ENABLED
    6905 
    6906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6907  uint32_t currentFrameIndex,
    6908  uint32_t frameInUseCount,
    6909  VkDeviceSize bufferImageGranularity,
    6910  VkDeviceSize allocSize,
    6911  VkDeviceSize allocAlignment,
    6912  bool upperAddress,
    6913  VmaSuballocationType allocType,
    6914  bool canMakeOtherLost,
    6915  uint32_t strategy,
    6916  VmaAllocationRequest* pAllocationRequest)
    6917 {
    6918  VMA_ASSERT(allocSize > 0);
    6919  VMA_ASSERT(!upperAddress);
    6920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6922  VMA_HEAVY_ASSERT(Validate());
    6923 
    6924  // There is not enough total free space in this block to fullfill the request: Early return.
    6925  if(canMakeOtherLost == false &&
    6926  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6927  {
    6928  return false;
    6929  }
    6930 
    6931  // New algorithm, efficiently searching freeSuballocationsBySize.
    6932  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6933  if(freeSuballocCount > 0)
    6934  {
    6936  {
    6937  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6938  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6939  m_FreeSuballocationsBySize.data(),
    6940  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6941  allocSize + 2 * VMA_DEBUG_MARGIN,
    6942  VmaSuballocationItemSizeLess());
    6943  size_t index = it - m_FreeSuballocationsBySize.data();
    6944  for(; index < freeSuballocCount; ++index)
    6945  {
    6946  if(CheckAllocation(
    6947  currentFrameIndex,
    6948  frameInUseCount,
    6949  bufferImageGranularity,
    6950  allocSize,
    6951  allocAlignment,
    6952  allocType,
    6953  m_FreeSuballocationsBySize[index],
    6954  false, // canMakeOtherLost
    6955  &pAllocationRequest->offset,
    6956  &pAllocationRequest->itemsToMakeLostCount,
    6957  &pAllocationRequest->sumFreeSize,
    6958  &pAllocationRequest->sumItemSize))
    6959  {
    6960  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6961  return true;
    6962  }
    6963  }
    6964  }
    6965  else // WORST_FIT, FIRST_FIT
    6966  {
    6967  // Search staring from biggest suballocations.
    6968  for(size_t index = freeSuballocCount; index--; )
    6969  {
    6970  if(CheckAllocation(
    6971  currentFrameIndex,
    6972  frameInUseCount,
    6973  bufferImageGranularity,
    6974  allocSize,
    6975  allocAlignment,
    6976  allocType,
    6977  m_FreeSuballocationsBySize[index],
    6978  false, // canMakeOtherLost
    6979  &pAllocationRequest->offset,
    6980  &pAllocationRequest->itemsToMakeLostCount,
    6981  &pAllocationRequest->sumFreeSize,
    6982  &pAllocationRequest->sumItemSize))
    6983  {
    6984  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6985  return true;
    6986  }
    6987  }
    6988  }
    6989  }
    6990 
    6991  if(canMakeOtherLost)
    6992  {
    6993  // Brute-force algorithm. TODO: Come up with something better.
    6994 
    6995  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6996  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6997 
    6998  VmaAllocationRequest tmpAllocRequest = {};
    6999  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7000  suballocIt != m_Suballocations.end();
    7001  ++suballocIt)
    7002  {
    7003  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7004  suballocIt->hAllocation->CanBecomeLost())
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  suballocIt,
    7014  canMakeOtherLost,
    7015  &tmpAllocRequest.offset,
    7016  &tmpAllocRequest.itemsToMakeLostCount,
    7017  &tmpAllocRequest.sumFreeSize,
    7018  &tmpAllocRequest.sumItemSize))
    7019  {
    7020  tmpAllocRequest.item = suballocIt;
    7021 
    7022  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7024  {
    7025  *pAllocationRequest = tmpAllocRequest;
    7026  }
    7027  }
    7028  }
    7029  }
    7030 
    7031  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7032  {
    7033  return true;
    7034  }
    7035  }
    7036 
    7037  return false;
    7038 }
    7039 
    7040 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7041  uint32_t currentFrameIndex,
    7042  uint32_t frameInUseCount,
    7043  VmaAllocationRequest* pAllocationRequest)
    7044 {
    7045  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7046  {
    7047  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7048  {
    7049  ++pAllocationRequest->item;
    7050  }
    7051  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7052  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7053  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7054  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7055  {
    7056  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7057  --pAllocationRequest->itemsToMakeLostCount;
    7058  }
    7059  else
    7060  {
    7061  return false;
    7062  }
    7063  }
    7064 
    7065  VMA_HEAVY_ASSERT(Validate());
    7066  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7067  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7068 
    7069  return true;
    7070 }
    7071 
    7072 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7073 {
    7074  uint32_t lostAllocationCount = 0;
    7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7076  it != m_Suballocations.end();
    7077  ++it)
    7078  {
    7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7080  it->hAllocation->CanBecomeLost() &&
    7081  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7082  {
    7083  it = FreeSuballocation(it);
    7084  ++lostAllocationCount;
    7085  }
    7086  }
    7087  return lostAllocationCount;
    7088 }
    7089 
    7090 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7091 {
    7092  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7093  it != m_Suballocations.end();
    7094  ++it)
    7095  {
    7096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7097  {
    7098  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7099  {
    7100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7101  return VK_ERROR_VALIDATION_FAILED_EXT;
    7102  }
    7103  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7104  {
    7105  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7106  return VK_ERROR_VALIDATION_FAILED_EXT;
    7107  }
    7108  }
    7109  }
    7110 
    7111  return VK_SUCCESS;
    7112 }
    7113 
    7114 void VmaBlockMetadata_Generic::Alloc(
    7115  const VmaAllocationRequest& request,
    7116  VmaSuballocationType type,
    7117  VkDeviceSize allocSize,
    7118  bool upperAddress,
    7119  VmaAllocation hAllocation)
    7120 {
    7121  VMA_ASSERT(!upperAddress);
    7122  VMA_ASSERT(request.item != m_Suballocations.end());
    7123  VmaSuballocation& suballoc = *request.item;
    7124  // Given suballocation is a free block.
    7125  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7126  // Given offset is inside this suballocation.
    7127  VMA_ASSERT(request.offset >= suballoc.offset);
    7128  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7129  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7130  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7131 
    7132  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7133  // it to become used.
    7134  UnregisterFreeSuballocation(request.item);
    7135 
    7136  suballoc.offset = request.offset;
    7137  suballoc.size = allocSize;
    7138  suballoc.type = type;
    7139  suballoc.hAllocation = hAllocation;
    7140 
    7141  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7142  if(paddingEnd)
    7143  {
    7144  VmaSuballocation paddingSuballoc = {};
    7145  paddingSuballoc.offset = request.offset + allocSize;
    7146  paddingSuballoc.size = paddingEnd;
    7147  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7148  VmaSuballocationList::iterator next = request.item;
    7149  ++next;
    7150  const VmaSuballocationList::iterator paddingEndItem =
    7151  m_Suballocations.insert(next, paddingSuballoc);
    7152  RegisterFreeSuballocation(paddingEndItem);
    7153  }
    7154 
    7155  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7156  if(paddingBegin)
    7157  {
    7158  VmaSuballocation paddingSuballoc = {};
    7159  paddingSuballoc.offset = request.offset - paddingBegin;
    7160  paddingSuballoc.size = paddingBegin;
    7161  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7162  const VmaSuballocationList::iterator paddingBeginItem =
    7163  m_Suballocations.insert(request.item, paddingSuballoc);
    7164  RegisterFreeSuballocation(paddingBeginItem);
    7165  }
    7166 
    7167  // Update totals.
    7168  m_FreeCount = m_FreeCount - 1;
    7169  if(paddingBegin > 0)
    7170  {
    7171  ++m_FreeCount;
    7172  }
    7173  if(paddingEnd > 0)
    7174  {
    7175  ++m_FreeCount;
    7176  }
    7177  m_SumFreeSize -= allocSize;
    7178 }
    7179 
    7180 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7181 {
    7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7183  suballocItem != m_Suballocations.end();
    7184  ++suballocItem)
    7185  {
    7186  VmaSuballocation& suballoc = *suballocItem;
    7187  if(suballoc.hAllocation == allocation)
    7188  {
    7189  FreeSuballocation(suballocItem);
    7190  VMA_HEAVY_ASSERT(Validate());
    7191  return;
    7192  }
    7193  }
    7194  VMA_ASSERT(0 && "Not found!");
    7195 }
    7196 
    7197 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7198 {
    7199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7200  suballocItem != m_Suballocations.end();
    7201  ++suballocItem)
    7202  {
    7203  VmaSuballocation& suballoc = *suballocItem;
    7204  if(suballoc.offset == offset)
    7205  {
    7206  FreeSuballocation(suballocItem);
    7207  return;
    7208  }
    7209  }
    7210  VMA_ASSERT(0 && "Not found!");
    7211 }
    7212 
    7213 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7214 {
    7215  VkDeviceSize lastSize = 0;
    7216  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7217  {
    7218  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7219 
    7220  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7221  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7222  VMA_VALIDATE(it->size >= lastSize);
    7223  lastSize = it->size;
    7224  }
    7225  return true;
    7226 }
    7227 
    7228 bool VmaBlockMetadata_Generic::CheckAllocation(
    7229  uint32_t currentFrameIndex,
    7230  uint32_t frameInUseCount,
    7231  VkDeviceSize bufferImageGranularity,
    7232  VkDeviceSize allocSize,
    7233  VkDeviceSize allocAlignment,
    7234  VmaSuballocationType allocType,
    7235  VmaSuballocationList::const_iterator suballocItem,
    7236  bool canMakeOtherLost,
    7237  VkDeviceSize* pOffset,
    7238  size_t* itemsToMakeLostCount,
    7239  VkDeviceSize* pSumFreeSize,
    7240  VkDeviceSize* pSumItemSize) const
    7241 {
    7242  VMA_ASSERT(allocSize > 0);
    7243  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7244  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7245  VMA_ASSERT(pOffset != VMA_NULL);
    7246 
    7247  *itemsToMakeLostCount = 0;
    7248  *pSumFreeSize = 0;
    7249  *pSumItemSize = 0;
    7250 
    7251  if(canMakeOtherLost)
    7252  {
    7253  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7254  {
    7255  *pSumFreeSize = suballocItem->size;
    7256  }
    7257  else
    7258  {
    7259  if(suballocItem->hAllocation->CanBecomeLost() &&
    7260  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7261  {
    7262  ++*itemsToMakeLostCount;
    7263  *pSumItemSize = suballocItem->size;
    7264  }
    7265  else
    7266  {
    7267  return false;
    7268  }
    7269  }
    7270 
    7271  // Remaining size is too small for this request: Early return.
    7272  if(GetSize() - suballocItem->offset < allocSize)
    7273  {
    7274  return false;
    7275  }
    7276 
    7277  // Start from offset equal to beginning of this suballocation.
    7278  *pOffset = suballocItem->offset;
    7279 
    7280  // Apply VMA_DEBUG_MARGIN at the beginning.
    7281  if(VMA_DEBUG_MARGIN > 0)
    7282  {
    7283  *pOffset += VMA_DEBUG_MARGIN;
    7284  }
    7285 
    7286  // Apply alignment.
    7287  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7288 
    7289  // Check previous suballocations for BufferImageGranularity conflicts.
    7290  // Make bigger alignment if necessary.
    7291  if(bufferImageGranularity > 1)
    7292  {
    7293  bool bufferImageGranularityConflict = false;
    7294  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7295  while(prevSuballocItem != m_Suballocations.cbegin())
    7296  {
    7297  --prevSuballocItem;
    7298  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7299  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7300  {
    7301  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7302  {
    7303  bufferImageGranularityConflict = true;
    7304  break;
    7305  }
    7306  }
    7307  else
    7308  // Already on previous page.
    7309  break;
    7310  }
    7311  if(bufferImageGranularityConflict)
    7312  {
    7313  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7314  }
    7315  }
    7316 
    7317  // Now that we have final *pOffset, check if we are past suballocItem.
    7318  // If yes, return false - this function should be called for another suballocItem as starting point.
    7319  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Calculate padding at the beginning based on current offset.
    7325  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7326 
    7327  // Calculate required margin at the end.
    7328  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7329 
    7330  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7331  // Another early return check.
    7332  if(suballocItem->offset + totalSize > GetSize())
    7333  {
    7334  return false;
    7335  }
    7336 
    7337  // Advance lastSuballocItem until desired size is reached.
    7338  // Update itemsToMakeLostCount.
    7339  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7340  if(totalSize > suballocItem->size)
    7341  {
    7342  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7343  while(remainingSize > 0)
    7344  {
    7345  ++lastSuballocItem;
    7346  if(lastSuballocItem == m_Suballocations.cend())
    7347  {
    7348  return false;
    7349  }
    7350  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7351  {
    7352  *pSumFreeSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7357  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7358  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7359  {
    7360  ++*itemsToMakeLostCount;
    7361  *pSumItemSize += lastSuballocItem->size;
    7362  }
    7363  else
    7364  {
    7365  return false;
    7366  }
    7367  }
    7368  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7369  remainingSize - lastSuballocItem->size : 0;
    7370  }
    7371  }
    7372 
    7373  // Check next suballocations for BufferImageGranularity conflicts.
    7374  // If conflict exists, we must mark more allocations lost or fail.
    7375  if(bufferImageGranularity > 1)
    7376  {
    7377  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7378  ++nextSuballocItem;
    7379  while(nextSuballocItem != m_Suballocations.cend())
    7380  {
    7381  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7382  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7383  {
    7384  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7385  {
    7386  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7387  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7388  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7389  {
    7390  ++*itemsToMakeLostCount;
    7391  }
    7392  else
    7393  {
    7394  return false;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  // Already on next page.
    7401  break;
    7402  }
    7403  ++nextSuballocItem;
    7404  }
    7405  }
    7406  }
    7407  else
    7408  {
    7409  const VmaSuballocation& suballoc = *suballocItem;
    7410  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7411 
    7412  *pSumFreeSize = suballoc.size;
    7413 
    7414  // Size of this suballocation is too small for this request: Early return.
    7415  if(suballoc.size < allocSize)
    7416  {
    7417  return false;
    7418  }
    7419 
    7420  // Start from offset equal to beginning of this suballocation.
    7421  *pOffset = suballoc.offset;
    7422 
    7423  // Apply VMA_DEBUG_MARGIN at the beginning.
    7424  if(VMA_DEBUG_MARGIN > 0)
    7425  {
    7426  *pOffset += VMA_DEBUG_MARGIN;
    7427  }
    7428 
    7429  // Apply alignment.
    7430  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7431 
    7432  // Check previous suballocations for BufferImageGranularity conflicts.
    7433  // Make bigger alignment if necessary.
    7434  if(bufferImageGranularity > 1)
    7435  {
    7436  bool bufferImageGranularityConflict = false;
    7437  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7438  while(prevSuballocItem != m_Suballocations.cbegin())
    7439  {
    7440  --prevSuballocItem;
    7441  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7442  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7443  {
    7444  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7445  {
    7446  bufferImageGranularityConflict = true;
    7447  break;
    7448  }
    7449  }
    7450  else
    7451  // Already on previous page.
    7452  break;
    7453  }
    7454  if(bufferImageGranularityConflict)
    7455  {
    7456  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7457  }
    7458  }
    7459 
    7460  // Calculate padding at the beginning based on current offset.
    7461  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7462 
    7463  // Calculate required margin at the end.
    7464  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7465 
    7466  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7467  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7468  {
    7469  return false;
    7470  }
    7471 
    7472  // Check next suballocations for BufferImageGranularity conflicts.
    7473  // If conflict exists, allocation cannot be made here.
    7474  if(bufferImageGranularity > 1)
    7475  {
    7476  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7477  ++nextSuballocItem;
    7478  while(nextSuballocItem != m_Suballocations.cend())
    7479  {
    7480  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7481  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7482  {
    7483  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7484  {
    7485  return false;
    7486  }
    7487  }
    7488  else
    7489  {
    7490  // Already on next page.
    7491  break;
    7492  }
    7493  ++nextSuballocItem;
    7494  }
    7495  }
    7496  }
    7497 
    7498  // All tests passed: Success. pOffset is already filled.
    7499  return true;
    7500 }
    7501 
    7502 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7503 {
    7504  VMA_ASSERT(item != m_Suballocations.end());
    7505  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7506 
    7507  VmaSuballocationList::iterator nextItem = item;
    7508  ++nextItem;
    7509  VMA_ASSERT(nextItem != m_Suballocations.end());
    7510  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7511 
    7512  item->size += nextItem->size;
    7513  --m_FreeCount;
    7514  m_Suballocations.erase(nextItem);
    7515 }
    7516 
    7517 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7518 {
    7519  // Change this suballocation to be marked as free.
    7520  VmaSuballocation& suballoc = *suballocItem;
    7521  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7522  suballoc.hAllocation = VK_NULL_HANDLE;
    7523 
    7524  // Update totals.
    7525  ++m_FreeCount;
    7526  m_SumFreeSize += suballoc.size;
    7527 
    7528  // Merge with previous and/or next suballocation if it's also free.
    7529  bool mergeWithNext = false;
    7530  bool mergeWithPrev = false;
    7531 
    7532  VmaSuballocationList::iterator nextItem = suballocItem;
    7533  ++nextItem;
    7534  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7535  {
    7536  mergeWithNext = true;
    7537  }
    7538 
    7539  VmaSuballocationList::iterator prevItem = suballocItem;
    7540  if(suballocItem != m_Suballocations.begin())
    7541  {
    7542  --prevItem;
    7543  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7544  {
    7545  mergeWithPrev = true;
    7546  }
    7547  }
    7548 
    7549  if(mergeWithNext)
    7550  {
    7551  UnregisterFreeSuballocation(nextItem);
    7552  MergeFreeWithNext(suballocItem);
    7553  }
    7554 
    7555  if(mergeWithPrev)
    7556  {
    7557  UnregisterFreeSuballocation(prevItem);
    7558  MergeFreeWithNext(prevItem);
    7559  RegisterFreeSuballocation(prevItem);
    7560  return prevItem;
    7561  }
    7562  else
    7563  {
    7564  RegisterFreeSuballocation(suballocItem);
    7565  return suballocItem;
    7566  }
    7567 }
    7568 
    7569 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7570 {
    7571  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7572  VMA_ASSERT(item->size > 0);
    7573 
    7574  // You may want to enable this validation at the beginning or at the end of
    7575  // this function, depending on what do you want to check.
    7576  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7577 
    7578  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7579  {
    7580  if(m_FreeSuballocationsBySize.empty())
    7581  {
    7582  m_FreeSuballocationsBySize.push_back(item);
    7583  }
    7584  else
    7585  {
    7586  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7587  }
    7588  }
    7589 
    7590  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7591 }
    7592 
    7593 
    7594 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7595 {
    7596  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7597  VMA_ASSERT(item->size > 0);
    7598 
    7599  // You may want to enable this validation at the beginning or at the end of
    7600  // this function, depending on what do you want to check.
    7601  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7602 
    7603  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7604  {
    7605  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7606  m_FreeSuballocationsBySize.data(),
    7607  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7608  item,
    7609  VmaSuballocationItemSizeLess());
    7610  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7611  index < m_FreeSuballocationsBySize.size();
    7612  ++index)
    7613  {
    7614  if(m_FreeSuballocationsBySize[index] == item)
    7615  {
    7616  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7617  return;
    7618  }
    7619  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7620  }
    7621  VMA_ASSERT(0 && "Not found.");
    7622  }
    7623 
    7624  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7625 }
    7626 
    7628 // class VmaBlockMetadata_Linear
    7629 
    7630 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7631  VmaBlockMetadata(hAllocator),
    7632  m_SumFreeSize(0),
    7633  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7634  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7635  m_1stVectorIndex(0),
    7636  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7637  m_1stNullItemsBeginCount(0),
    7638  m_1stNullItemsMiddleCount(0),
    7639  m_2ndNullItemsCount(0)
    7640 {
    7641 }
    7642 
    7643 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7644 {
    7645 }
    7646 
    7647 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7648 {
    7649  VmaBlockMetadata::Init(size);
    7650  m_SumFreeSize = size;
    7651 }
    7652 
    7653 bool VmaBlockMetadata_Linear::Validate() const
    7654 {
    7655  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7656  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7657 
    7658  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7659  VMA_VALIDATE(!suballocations1st.empty() ||
    7660  suballocations2nd.empty() ||
    7661  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7662 
    7663  if(!suballocations1st.empty())
    7664  {
    7665  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7666  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7667  // Null item at the end should be just pop_back().
    7668  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7669  }
    7670  if(!suballocations2nd.empty())
    7671  {
    7672  // Null item at the end should be just pop_back().
    7673  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7674  }
    7675 
    7676  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7677  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7678 
    7679  VkDeviceSize sumUsedSize = 0;
    7680  const size_t suballoc1stCount = suballocations1st.size();
    7681  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7682 
    7683  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7684  {
    7685  const size_t suballoc2ndCount = suballocations2nd.size();
    7686  size_t nullItem2ndCount = 0;
    7687  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7688  {
    7689  const VmaSuballocation& suballoc = suballocations2nd[i];
    7690  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7693  VMA_VALIDATE(suballoc.offset >= offset);
    7694 
    7695  if(!currFree)
    7696  {
    7697  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7698  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7699  sumUsedSize += suballoc.size;
    7700  }
    7701  else
    7702  {
    7703  ++nullItem2ndCount;
    7704  }
    7705 
    7706  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7707  }
    7708 
    7709  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7710  }
    7711 
    7712  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7716  suballoc.hAllocation == VK_NULL_HANDLE);
    7717  }
    7718 
    7719  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7720 
    7721  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7722  {
    7723  const VmaSuballocation& suballoc = suballocations1st[i];
    7724  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7725 
    7726  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7727  VMA_VALIDATE(suballoc.offset >= offset);
    7728  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7729 
    7730  if(!currFree)
    7731  {
    7732  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7733  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7734  sumUsedSize += suballoc.size;
    7735  }
    7736  else
    7737  {
    7738  ++nullItem1stCount;
    7739  }
    7740 
    7741  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7742  }
    7743  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7744 
    7745  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7746  {
    7747  const size_t suballoc2ndCount = suballocations2nd.size();
    7748  size_t nullItem2ndCount = 0;
    7749  for(size_t i = suballoc2ndCount; i--; )
    7750  {
    7751  const VmaSuballocation& suballoc = suballocations2nd[i];
    7752  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7753 
    7754  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7755  VMA_VALIDATE(suballoc.offset >= offset);
    7756 
    7757  if(!currFree)
    7758  {
    7759  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7760  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7761  sumUsedSize += suballoc.size;
    7762  }
    7763  else
    7764  {
    7765  ++nullItem2ndCount;
    7766  }
    7767 
    7768  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7769  }
    7770 
    7771  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7772  }
    7773 
    7774  VMA_VALIDATE(offset <= GetSize());
    7775  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7776 
    7777  return true;
    7778 }
    7779 
    7780 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7781 {
    7782  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7783  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7784 }
    7785 
    7786 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7787 {
    7788  const VkDeviceSize size = GetSize();
    7789 
    7790  /*
    7791  We don't consider gaps inside allocation vectors with freed allocations because
    7792  they are not suitable for reuse in linear allocator. We consider only space that
    7793  is available for new allocations.
    7794  */
    7795  if(IsEmpty())
    7796  {
    7797  return size;
    7798  }
    7799 
    7800  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7801 
    7802  switch(m_2ndVectorMode)
    7803  {
    7804  case SECOND_VECTOR_EMPTY:
    7805  /*
    7806  Available space is after end of 1st, as well as before beginning of 1st (which
    7807  whould make it a ring buffer).
    7808  */
    7809  {
    7810  const size_t suballocations1stCount = suballocations1st.size();
    7811  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7812  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7813  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7814  return VMA_MAX(
    7815  firstSuballoc.offset,
    7816  size - (lastSuballoc.offset + lastSuballoc.size));
    7817  }
    7818  break;
    7819 
    7820  case SECOND_VECTOR_RING_BUFFER:
    7821  /*
    7822  Available space is only between end of 2nd and beginning of 1st.
    7823  */
    7824  {
    7825  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7826  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7827  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7828  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7829  }
    7830  break;
    7831 
    7832  case SECOND_VECTOR_DOUBLE_STACK:
    7833  /*
    7834  Available space is only between end of 1st and top of 2nd.
    7835  */
    7836  {
    7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7838  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7839  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7840  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7841  }
    7842  break;
    7843 
    7844  default:
    7845  VMA_ASSERT(0);
    7846  return 0;
    7847  }
    7848 }
    7849 
    7850 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7851 {
    7852  const VkDeviceSize size = GetSize();
    7853  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7854  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7855  const size_t suballoc1stCount = suballocations1st.size();
    7856  const size_t suballoc2ndCount = suballocations2nd.size();
    7857 
    7858  outInfo.blockCount = 1;
    7859  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7860  outInfo.unusedRangeCount = 0;
    7861  outInfo.usedBytes = 0;
    7862  outInfo.allocationSizeMin = UINT64_MAX;
    7863  outInfo.allocationSizeMax = 0;
    7864  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7865  outInfo.unusedRangeSizeMax = 0;
    7866 
    7867  VkDeviceSize lastOffset = 0;
    7868 
    7869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7870  {
    7871  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7872  size_t nextAlloc2ndIndex = 0;
    7873  while(lastOffset < freeSpace2ndTo1stEnd)
    7874  {
    7875  // Find next non-null allocation or move nextAllocIndex to the end.
    7876  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7877  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7878  {
    7879  ++nextAlloc2ndIndex;
    7880  }
    7881 
    7882  // Found non-null allocation.
    7883  if(nextAlloc2ndIndex < suballoc2ndCount)
    7884  {
    7885  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7886 
    7887  // 1. Process free space before this allocation.
    7888  if(lastOffset < suballoc.offset)
    7889  {
    7890  // There is free space from lastOffset to suballoc.offset.
    7891  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7892  ++outInfo.unusedRangeCount;
    7893  outInfo.unusedBytes += unusedRangeSize;
    7894  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7895  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7896  }
    7897 
    7898  // 2. Process this allocation.
    7899  // There is allocation with suballoc.offset, suballoc.size.
    7900  outInfo.usedBytes += suballoc.size;
    7901  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7902  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7903 
    7904  // 3. Prepare for next iteration.
    7905  lastOffset = suballoc.offset + suballoc.size;
    7906  ++nextAlloc2ndIndex;
    7907  }
    7908  // We are at the end.
    7909  else
    7910  {
    7911  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7912  if(lastOffset < freeSpace2ndTo1stEnd)
    7913  {
    7914  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7915  ++outInfo.unusedRangeCount;
    7916  outInfo.unusedBytes += unusedRangeSize;
    7917  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7918  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7919  }
    7920 
    7921  // End of loop.
    7922  lastOffset = freeSpace2ndTo1stEnd;
    7923  }
    7924  }
    7925  }
    7926 
    7927  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7928  const VkDeviceSize freeSpace1stTo2ndEnd =
    7929  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7930  while(lastOffset < freeSpace1stTo2ndEnd)
    7931  {
    7932  // Find next non-null allocation or move nextAllocIndex to the end.
    7933  while(nextAlloc1stIndex < suballoc1stCount &&
    7934  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7935  {
    7936  ++nextAlloc1stIndex;
    7937  }
    7938 
    7939  // Found non-null allocation.
    7940  if(nextAlloc1stIndex < suballoc1stCount)
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7943 
    7944  // 1. Process free space before this allocation.
    7945  if(lastOffset < suballoc.offset)
    7946  {
    7947  // There is free space from lastOffset to suballoc.offset.
    7948  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7949  ++outInfo.unusedRangeCount;
    7950  outInfo.unusedBytes += unusedRangeSize;
    7951  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7952  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7953  }
    7954 
    7955  // 2. Process this allocation.
    7956  // There is allocation with suballoc.offset, suballoc.size.
    7957  outInfo.usedBytes += suballoc.size;
    7958  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7959  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7960 
    7961  // 3. Prepare for next iteration.
    7962  lastOffset = suballoc.offset + suballoc.size;
    7963  ++nextAlloc1stIndex;
    7964  }
    7965  // We are at the end.
    7966  else
    7967  {
    7968  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7969  if(lastOffset < freeSpace1stTo2ndEnd)
    7970  {
    7971  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7972  ++outInfo.unusedRangeCount;
    7973  outInfo.unusedBytes += unusedRangeSize;
    7974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7976  }
    7977 
    7978  // End of loop.
    7979  lastOffset = freeSpace1stTo2ndEnd;
    7980  }
    7981  }
    7982 
    7983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7984  {
    7985  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7986  while(lastOffset < size)
    7987  {
    7988  // Find next non-null allocation or move nextAllocIndex to the end.
    7989  while(nextAlloc2ndIndex != SIZE_MAX &&
    7990  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7991  {
    7992  --nextAlloc2ndIndex;
    7993  }
    7994 
    7995  // Found non-null allocation.
    7996  if(nextAlloc2ndIndex != SIZE_MAX)
    7997  {
    7998  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7999 
    8000  // 1. Process free space before this allocation.
    8001  if(lastOffset < suballoc.offset)
    8002  {
    8003  // There is free space from lastOffset to suballoc.offset.
    8004  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8005  ++outInfo.unusedRangeCount;
    8006  outInfo.unusedBytes += unusedRangeSize;
    8007  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8008  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8009  }
    8010 
    8011  // 2. Process this allocation.
    8012  // There is allocation with suballoc.offset, suballoc.size.
    8013  outInfo.usedBytes += suballoc.size;
    8014  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8015  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8016 
    8017  // 3. Prepare for next iteration.
    8018  lastOffset = suballoc.offset + suballoc.size;
    8019  --nextAlloc2ndIndex;
    8020  }
    8021  // We are at the end.
    8022  else
    8023  {
    8024  // There is free space from lastOffset to size.
    8025  if(lastOffset < size)
    8026  {
    8027  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8028  ++outInfo.unusedRangeCount;
    8029  outInfo.unusedBytes += unusedRangeSize;
    8030  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8031  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8032  }
    8033 
    8034  // End of loop.
    8035  lastOffset = size;
    8036  }
    8037  }
    8038  }
    8039 
    8040  outInfo.unusedBytes = size - outInfo.usedBytes;
    8041 }
    8042 
    8043 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8044 {
    8045  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8046  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8047  const VkDeviceSize size = GetSize();
    8048  const size_t suballoc1stCount = suballocations1st.size();
    8049  const size_t suballoc2ndCount = suballocations2nd.size();
    8050 
    8051  inoutStats.size += size;
    8052 
    8053  VkDeviceSize lastOffset = 0;
    8054 
    8055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8056  {
    8057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8058  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8059  while(lastOffset < freeSpace2ndTo1stEnd)
    8060  {
    8061  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8062  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8064  {
    8065  ++nextAlloc2ndIndex;
    8066  }
    8067 
    8068  // Found non-null allocation.
    8069  if(nextAlloc2ndIndex < suballoc2ndCount)
    8070  {
    8071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8072 
    8073  // 1. Process free space before this allocation.
    8074  if(lastOffset < suballoc.offset)
    8075  {
    8076  // There is free space from lastOffset to suballoc.offset.
    8077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8078  inoutStats.unusedSize += unusedRangeSize;
    8079  ++inoutStats.unusedRangeCount;
    8080  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  ++inoutStats.allocationCount;
    8086 
    8087  // 3. Prepare for next iteration.
    8088  lastOffset = suballoc.offset + suballoc.size;
    8089  ++nextAlloc2ndIndex;
    8090  }
    8091  // We are at the end.
    8092  else
    8093  {
    8094  if(lastOffset < freeSpace2ndTo1stEnd)
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8098  inoutStats.unusedSize += unusedRangeSize;
    8099  ++inoutStats.unusedRangeCount;
    8100  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8101  }
    8102 
    8103  // End of loop.
    8104  lastOffset = freeSpace2ndTo1stEnd;
    8105  }
    8106  }
    8107  }
    8108 
    8109  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8110  const VkDeviceSize freeSpace1stTo2ndEnd =
    8111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8112  while(lastOffset < freeSpace1stTo2ndEnd)
    8113  {
    8114  // Find next non-null allocation or move nextAllocIndex to the end.
    8115  while(nextAlloc1stIndex < suballoc1stCount &&
    8116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8117  {
    8118  ++nextAlloc1stIndex;
    8119  }
    8120 
    8121  // Found non-null allocation.
    8122  if(nextAlloc1stIndex < suballoc1stCount)
    8123  {
    8124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8125 
    8126  // 1. Process free space before this allocation.
    8127  if(lastOffset < suballoc.offset)
    8128  {
    8129  // There is free space from lastOffset to suballoc.offset.
    8130  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8131  inoutStats.unusedSize += unusedRangeSize;
    8132  ++inoutStats.unusedRangeCount;
    8133  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8134  }
    8135 
    8136  // 2. Process this allocation.
    8137  // There is allocation with suballoc.offset, suballoc.size.
    8138  ++inoutStats.allocationCount;
    8139 
    8140  // 3. Prepare for next iteration.
    8141  lastOffset = suballoc.offset + suballoc.size;
    8142  ++nextAlloc1stIndex;
    8143  }
    8144  // We are at the end.
    8145  else
    8146  {
    8147  if(lastOffset < freeSpace1stTo2ndEnd)
    8148  {
    8149  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8150  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8151  inoutStats.unusedSize += unusedRangeSize;
    8152  ++inoutStats.unusedRangeCount;
    8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8154  }
    8155 
    8156  // End of loop.
    8157  lastOffset = freeSpace1stTo2ndEnd;
    8158  }
    8159  }
    8160 
    8161  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8162  {
    8163  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8164  while(lastOffset < size)
    8165  {
    8166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8167  while(nextAlloc2ndIndex != SIZE_MAX &&
    8168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8169  {
    8170  --nextAlloc2ndIndex;
    8171  }
    8172 
    8173  // Found non-null allocation.
    8174  if(nextAlloc2ndIndex != SIZE_MAX)
    8175  {
    8176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8177 
    8178  // 1. Process free space before this allocation.
    8179  if(lastOffset < suballoc.offset)
    8180  {
    8181  // There is free space from lastOffset to suballoc.offset.
    8182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8183  inoutStats.unusedSize += unusedRangeSize;
    8184  ++inoutStats.unusedRangeCount;
    8185  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8186  }
    8187 
    8188  // 2. Process this allocation.
    8189  // There is allocation with suballoc.offset, suballoc.size.
    8190  ++inoutStats.allocationCount;
    8191 
    8192  // 3. Prepare for next iteration.
    8193  lastOffset = suballoc.offset + suballoc.size;
    8194  --nextAlloc2ndIndex;
    8195  }
    8196  // We are at the end.
    8197  else
    8198  {
    8199  if(lastOffset < size)
    8200  {
    8201  // There is free space from lastOffset to size.
    8202  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8203  inoutStats.unusedSize += unusedRangeSize;
    8204  ++inoutStats.unusedRangeCount;
    8205  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8206  }
    8207 
    8208  // End of loop.
    8209  lastOffset = size;
    8210  }
    8211  }
    8212  }
    8213 }
    8214 
    8215 #if VMA_STATS_STRING_ENABLED
    8216 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8217 {
    8218  const VkDeviceSize size = GetSize();
    8219  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8220  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8221  const size_t suballoc1stCount = suballocations1st.size();
    8222  const size_t suballoc2ndCount = suballocations2nd.size();
    8223 
    8224  // FIRST PASS
    8225 
    8226  size_t unusedRangeCount = 0;
    8227  VkDeviceSize usedBytes = 0;
    8228 
    8229  VkDeviceSize lastOffset = 0;
    8230 
    8231  size_t alloc2ndCount = 0;
    8232  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8233  {
    8234  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8235  size_t nextAlloc2ndIndex = 0;
    8236  while(lastOffset < freeSpace2ndTo1stEnd)
    8237  {
    8238  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8239  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8240  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8241  {
    8242  ++nextAlloc2ndIndex;
    8243  }
    8244 
    8245  // Found non-null allocation.
    8246  if(nextAlloc2ndIndex < suballoc2ndCount)
    8247  {
    8248  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8249 
    8250  // 1. Process free space before this allocation.
    8251  if(lastOffset < suballoc.offset)
    8252  {
    8253  // There is free space from lastOffset to suballoc.offset.
    8254  ++unusedRangeCount;
    8255  }
    8256 
    8257  // 2. Process this allocation.
    8258  // There is allocation with suballoc.offset, suballoc.size.
    8259  ++alloc2ndCount;
    8260  usedBytes += suballoc.size;
    8261 
    8262  // 3. Prepare for next iteration.
    8263  lastOffset = suballoc.offset + suballoc.size;
    8264  ++nextAlloc2ndIndex;
    8265  }
    8266  // We are at the end.
    8267  else
    8268  {
    8269  if(lastOffset < freeSpace2ndTo1stEnd)
    8270  {
    8271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8272  ++unusedRangeCount;
    8273  }
    8274 
    8275  // End of loop.
    8276  lastOffset = freeSpace2ndTo1stEnd;
    8277  }
    8278  }
    8279  }
    8280 
    8281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8282  size_t alloc1stCount = 0;
    8283  const VkDeviceSize freeSpace1stTo2ndEnd =
    8284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8285  while(lastOffset < freeSpace1stTo2ndEnd)
    8286  {
    8287  // Find next non-null allocation or move nextAllocIndex to the end.
    8288  while(nextAlloc1stIndex < suballoc1stCount &&
    8289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8290  {
    8291  ++nextAlloc1stIndex;
    8292  }
    8293 
    8294  // Found non-null allocation.
    8295  if(nextAlloc1stIndex < suballoc1stCount)
    8296  {
    8297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8298 
    8299  // 1. Process free space before this allocation.
    8300  if(lastOffset < suballoc.offset)
    8301  {
    8302  // There is free space from lastOffset to suballoc.offset.
    8303  ++unusedRangeCount;
    8304  }
    8305 
    8306  // 2. Process this allocation.
    8307  // There is allocation with suballoc.offset, suballoc.size.
    8308  ++alloc1stCount;
    8309  usedBytes += suballoc.size;
    8310 
    8311  // 3. Prepare for next iteration.
    8312  lastOffset = suballoc.offset + suballoc.size;
    8313  ++nextAlloc1stIndex;
    8314  }
    8315  // We are at the end.
    8316  else
    8317  {
    8318  if(lastOffset < size)
    8319  {
    8320  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8321  ++unusedRangeCount;
    8322  }
    8323 
    8324  // End of loop.
    8325  lastOffset = freeSpace1stTo2ndEnd;
    8326  }
    8327  }
    8328 
    8329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8330  {
    8331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8332  while(lastOffset < size)
    8333  {
    8334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8335  while(nextAlloc2ndIndex != SIZE_MAX &&
    8336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8337  {
    8338  --nextAlloc2ndIndex;
    8339  }
    8340 
    8341  // Found non-null allocation.
    8342  if(nextAlloc2ndIndex != SIZE_MAX)
    8343  {
    8344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8345 
    8346  // 1. Process free space before this allocation.
    8347  if(lastOffset < suballoc.offset)
    8348  {
    8349  // There is free space from lastOffset to suballoc.offset.
    8350  ++unusedRangeCount;
    8351  }
    8352 
    8353  // 2. Process this allocation.
    8354  // There is allocation with suballoc.offset, suballoc.size.
    8355  ++alloc2ndCount;
    8356  usedBytes += suballoc.size;
    8357 
    8358  // 3. Prepare for next iteration.
    8359  lastOffset = suballoc.offset + suballoc.size;
    8360  --nextAlloc2ndIndex;
    8361  }
    8362  // We are at the end.
    8363  else
    8364  {
    8365  if(lastOffset < size)
    8366  {
    8367  // There is free space from lastOffset to size.
    8368  ++unusedRangeCount;
    8369  }
    8370 
    8371  // End of loop.
    8372  lastOffset = size;
    8373  }
    8374  }
    8375  }
    8376 
    8377  const VkDeviceSize unusedBytes = size - usedBytes;
    8378  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8379 
    8380  // SECOND PASS
    8381  lastOffset = 0;
    8382 
    8383  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8384  {
    8385  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8386  size_t nextAlloc2ndIndex = 0;
    8387  while(lastOffset < freeSpace2ndTo1stEnd)
    8388  {
    8389  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8390  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8391  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8392  {
    8393  ++nextAlloc2ndIndex;
    8394  }
    8395 
    8396  // Found non-null allocation.
    8397  if(nextAlloc2ndIndex < suballoc2ndCount)
    8398  {
    8399  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8400 
    8401  // 1. Process free space before this allocation.
    8402  if(lastOffset < suballoc.offset)
    8403  {
    8404  // There is free space from lastOffset to suballoc.offset.
    8405  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8406  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8407  }
    8408 
    8409  // 2. Process this allocation.
    8410  // There is allocation with suballoc.offset, suballoc.size.
    8411  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8412 
    8413  // 3. Prepare for next iteration.
    8414  lastOffset = suballoc.offset + suballoc.size;
    8415  ++nextAlloc2ndIndex;
    8416  }
    8417  // We are at the end.
    8418  else
    8419  {
    8420  if(lastOffset < freeSpace2ndTo1stEnd)
    8421  {
    8422  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8423  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8424  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8425  }
    8426 
    8427  // End of loop.
    8428  lastOffset = freeSpace2ndTo1stEnd;
    8429  }
    8430  }
    8431  }
    8432 
    8433  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8434  while(lastOffset < freeSpace1stTo2ndEnd)
    8435  {
    8436  // Find next non-null allocation or move nextAllocIndex to the end.
    8437  while(nextAlloc1stIndex < suballoc1stCount &&
    8438  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8439  {
    8440  ++nextAlloc1stIndex;
    8441  }
    8442 
    8443  // Found non-null allocation.
    8444  if(nextAlloc1stIndex < suballoc1stCount)
    8445  {
    8446  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8447 
    8448  // 1. Process free space before this allocation.
    8449  if(lastOffset < suballoc.offset)
    8450  {
    8451  // There is free space from lastOffset to suballoc.offset.
    8452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8454  }
    8455 
    8456  // 2. Process this allocation.
    8457  // There is allocation with suballoc.offset, suballoc.size.
    8458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8459 
    8460  // 3. Prepare for next iteration.
    8461  lastOffset = suballoc.offset + suballoc.size;
    8462  ++nextAlloc1stIndex;
    8463  }
    8464  // We are at the end.
    8465  else
    8466  {
    8467  if(lastOffset < freeSpace1stTo2ndEnd)
    8468  {
    8469  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8470  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8472  }
    8473 
    8474  // End of loop.
    8475  lastOffset = freeSpace1stTo2ndEnd;
    8476  }
    8477  }
    8478 
    8479  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8480  {
    8481  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8482  while(lastOffset < size)
    8483  {
    8484  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8485  while(nextAlloc2ndIndex != SIZE_MAX &&
    8486  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8487  {
    8488  --nextAlloc2ndIndex;
    8489  }
    8490 
    8491  // Found non-null allocation.
    8492  if(nextAlloc2ndIndex != SIZE_MAX)
    8493  {
    8494  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8495 
    8496  // 1. Process free space before this allocation.
    8497  if(lastOffset < suballoc.offset)
    8498  {
    8499  // There is free space from lastOffset to suballoc.offset.
    8500  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8501  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8502  }
    8503 
    8504  // 2. Process this allocation.
    8505  // There is allocation with suballoc.offset, suballoc.size.
    8506  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8507 
    8508  // 3. Prepare for next iteration.
    8509  lastOffset = suballoc.offset + suballoc.size;
    8510  --nextAlloc2ndIndex;
    8511  }
    8512  // We are at the end.
    8513  else
    8514  {
    8515  if(lastOffset < size)
    8516  {
    8517  // There is free space from lastOffset to size.
    8518  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8519  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8520  }
    8521 
    8522  // End of loop.
    8523  lastOffset = size;
    8524  }
    8525  }
    8526  }
    8527 
    8528  PrintDetailedMap_End(json);
    8529 }
    8530 #endif // #if VMA_STATS_STRING_ENABLED
    8531 
    8532 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8533  uint32_t currentFrameIndex,
    8534  uint32_t frameInUseCount,
    8535  VkDeviceSize bufferImageGranularity,
    8536  VkDeviceSize allocSize,
    8537  VkDeviceSize allocAlignment,
    8538  bool upperAddress,
    8539  VmaSuballocationType allocType,
    8540  bool canMakeOtherLost,
    8541  uint32_t strategy,
    8542  VmaAllocationRequest* pAllocationRequest)
    8543 {
    8544  VMA_ASSERT(allocSize > 0);
    8545  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8546  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8547  VMA_HEAVY_ASSERT(Validate());
    8548 
    8549  const VkDeviceSize size = GetSize();
    8550  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8551  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8552 
    8553  if(upperAddress)
    8554  {
    8555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8556  {
    8557  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8558  return false;
    8559  }
    8560 
    8561  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8562  if(allocSize > size)
    8563  {
    8564  return false;
    8565  }
    8566  VkDeviceSize resultBaseOffset = size - allocSize;
    8567  if(!suballocations2nd.empty())
    8568  {
    8569  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8570  resultBaseOffset = lastSuballoc.offset - allocSize;
    8571  if(allocSize > lastSuballoc.offset)
    8572  {
    8573  return false;
    8574  }
    8575  }
    8576 
    8577  // Start from offset equal to end of free space.
    8578  VkDeviceSize resultOffset = resultBaseOffset;
    8579 
    8580  // Apply VMA_DEBUG_MARGIN at the end.
    8581  if(VMA_DEBUG_MARGIN > 0)
    8582  {
    8583  if(resultOffset < VMA_DEBUG_MARGIN)
    8584  {
    8585  return false;
    8586  }
    8587  resultOffset -= VMA_DEBUG_MARGIN;
    8588  }
    8589 
    8590  // Apply alignment.
    8591  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8592 
    8593  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8594  // Make bigger alignment if necessary.
    8595  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8596  {
    8597  bool bufferImageGranularityConflict = false;
    8598  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8599  {
    8600  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8601  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8602  {
    8603  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8604  {
    8605  bufferImageGranularityConflict = true;
    8606  break;
    8607  }
    8608  }
    8609  else
    8610  // Already on previous page.
    8611  break;
    8612  }
    8613  if(bufferImageGranularityConflict)
    8614  {
    8615  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8616  }
    8617  }
    8618 
    8619  // There is enough free space.
    8620  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8621  suballocations1st.back().offset + suballocations1st.back().size :
    8622  0;
    8623  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8624  {
    8625  // Check previous suballocations for BufferImageGranularity conflicts.
    8626  // If conflict exists, allocation cannot be made here.
    8627  if(bufferImageGranularity > 1)
    8628  {
    8629  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8630  {
    8631  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8633  {
    8634  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8635  {
    8636  return false;
    8637  }
    8638  }
    8639  else
    8640  {
    8641  // Already on next page.
    8642  break;
    8643  }
    8644  }
    8645  }
    8646 
    8647  // All tests passed: Success.
    8648  pAllocationRequest->offset = resultOffset;
    8649  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8650  pAllocationRequest->sumItemSize = 0;
    8651  // pAllocationRequest->item unused.
    8652  pAllocationRequest->itemsToMakeLostCount = 0;
    8653  return true;
    8654  }
    8655  }
    8656  else // !upperAddress
    8657  {
    8658  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8659  {
    8660  // Try to allocate at the end of 1st vector.
    8661 
    8662  VkDeviceSize resultBaseOffset = 0;
    8663  if(!suballocations1st.empty())
    8664  {
    8665  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8666  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8667  }
    8668 
    8669  // Start from offset equal to beginning of free space.
    8670  VkDeviceSize resultOffset = resultBaseOffset;
    8671 
    8672  // Apply VMA_DEBUG_MARGIN at the beginning.
    8673  if(VMA_DEBUG_MARGIN > 0)
    8674  {
    8675  resultOffset += VMA_DEBUG_MARGIN;
    8676  }
    8677 
    8678  // Apply alignment.
    8679  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8680 
    8681  // Check previous suballocations for BufferImageGranularity conflicts.
    8682  // Make bigger alignment if necessary.
    8683  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8684  {
    8685  bool bufferImageGranularityConflict = false;
    8686  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8687  {
    8688  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8690  {
    8691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8692  {
    8693  bufferImageGranularityConflict = true;
    8694  break;
    8695  }
    8696  }
    8697  else
    8698  // Already on previous page.
    8699  break;
    8700  }
    8701  if(bufferImageGranularityConflict)
    8702  {
    8703  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8704  }
    8705  }
    8706 
    8707  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8708  suballocations2nd.back().offset : size;
    8709 
    8710  // There is enough free space at the end after alignment.
    8711  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8712  {
    8713  // Check next suballocations for BufferImageGranularity conflicts.
    8714  // If conflict exists, allocation cannot be made here.
    8715  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8716  {
    8717  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8718  {
    8719  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8720  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8721  {
    8722  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8723  {
    8724  return false;
    8725  }
    8726  }
    8727  else
    8728  {
    8729  // Already on previous page.
    8730  break;
    8731  }
    8732  }
    8733  }
    8734 
    8735  // All tests passed: Success.
    8736  pAllocationRequest->offset = resultOffset;
    8737  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8738  pAllocationRequest->sumItemSize = 0;
    8739  // pAllocationRequest->item unused.
    8740  pAllocationRequest->itemsToMakeLostCount = 0;
    8741  return true;
    8742  }
    8743  }
    8744 
    8745  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8746  // beginning of 1st vector as the end of free space.
    8747  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8748  {
    8749  VMA_ASSERT(!suballocations1st.empty());
    8750 
    8751  VkDeviceSize resultBaseOffset = 0;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8756  }
    8757 
    8758  // Start from offset equal to beginning of free space.
    8759  VkDeviceSize resultOffset = resultBaseOffset;
    8760 
    8761  // Apply VMA_DEBUG_MARGIN at the beginning.
    8762  if(VMA_DEBUG_MARGIN > 0)
    8763  {
    8764  resultOffset += VMA_DEBUG_MARGIN;
    8765  }
    8766 
    8767  // Apply alignment.
    8768  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8769 
    8770  // Check previous suballocations for BufferImageGranularity conflicts.
    8771  // Make bigger alignment if necessary.
    8772  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8773  {
    8774  bool bufferImageGranularityConflict = false;
    8775  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8776  {
    8777  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8778  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8779  {
    8780  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8781  {
    8782  bufferImageGranularityConflict = true;
    8783  break;
    8784  }
    8785  }
    8786  else
    8787  // Already on previous page.
    8788  break;
    8789  }
    8790  if(bufferImageGranularityConflict)
    8791  {
    8792  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8793  }
    8794  }
    8795 
    8796  pAllocationRequest->itemsToMakeLostCount = 0;
    8797  pAllocationRequest->sumItemSize = 0;
    8798  size_t index1st = m_1stNullItemsBeginCount;
    8799 
    8800  if(canMakeOtherLost)
    8801  {
    8802  while(index1st < suballocations1st.size() &&
    8803  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8804  {
    8805  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8807  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8808  {
    8809  // No problem.
    8810  }
    8811  else
    8812  {
    8813  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8814  if(suballoc.hAllocation->CanBecomeLost() &&
    8815  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8816  {
    8817  ++pAllocationRequest->itemsToMakeLostCount;
    8818  pAllocationRequest->sumItemSize += suballoc.size;
    8819  }
    8820  else
    8821  {
    8822  return false;
    8823  }
    8824  }
    8825  ++index1st;
    8826  }
    8827 
    8828  // Check next suballocations for BufferImageGranularity conflicts.
    8829  // If conflict exists, we must mark more allocations lost or fail.
    8830  if(bufferImageGranularity > 1)
    8831  {
    8832  while(index1st < suballocations1st.size())
    8833  {
    8834  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8835  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8836  {
    8837  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8838  {
    8839  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8840  if(suballoc.hAllocation->CanBecomeLost() &&
    8841  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8842  {
    8843  ++pAllocationRequest->itemsToMakeLostCount;
    8844  pAllocationRequest->sumItemSize += suballoc.size;
    8845  }
    8846  else
    8847  {
    8848  return false;
    8849  }
    8850  }
    8851  }
    8852  else
    8853  {
    8854  // Already on next page.
    8855  break;
    8856  }
    8857  ++index1st;
    8858  }
    8859  }
    8860  }
    8861 
    8862  // There is enough free space at the end after alignment.
    8863  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8864  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8865  {
    8866  // Check next suballocations for BufferImageGranularity conflicts.
    8867  // If conflict exists, allocation cannot be made here.
    8868  if(bufferImageGranularity > 1)
    8869  {
    8870  for(size_t nextSuballocIndex = index1st;
    8871  nextSuballocIndex < suballocations1st.size();
    8872  nextSuballocIndex++)
    8873  {
    8874  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8875  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8876  {
    8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8878  {
    8879  return false;
    8880  }
    8881  }
    8882  else
    8883  {
    8884  // Already on next page.
    8885  break;
    8886  }
    8887  }
    8888  }
    8889 
    8890  // All tests passed: Success.
    8891  pAllocationRequest->offset = resultOffset;
    8892  pAllocationRequest->sumFreeSize =
    8893  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8894  - resultBaseOffset
    8895  - pAllocationRequest->sumItemSize;
    8896  // pAllocationRequest->item unused.
    8897  return true;
    8898  }
    8899  }
    8900  }
    8901 
    8902  return false;
    8903 }
    8904 
    8905 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8906  uint32_t currentFrameIndex,
    8907  uint32_t frameInUseCount,
    8908  VmaAllocationRequest* pAllocationRequest)
    8909 {
    8910  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8911  {
    8912  return true;
    8913  }
    8914 
    8915  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8916 
    8917  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8918  size_t index1st = m_1stNullItemsBeginCount;
    8919  size_t madeLostCount = 0;
    8920  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8921  {
    8922  VMA_ASSERT(index1st < suballocations1st.size());
    8923  VmaSuballocation& suballoc = suballocations1st[index1st];
    8924  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8925  {
    8926  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8927  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8928  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8929  {
    8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8931  suballoc.hAllocation = VK_NULL_HANDLE;
    8932  m_SumFreeSize += suballoc.size;
    8933  ++m_1stNullItemsMiddleCount;
    8934  ++madeLostCount;
    8935  }
    8936  else
    8937  {
    8938  return false;
    8939  }
    8940  }
    8941  ++index1st;
    8942  }
    8943 
    8944  CleanupAfterFree();
    8945  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8946 
    8947  return true;
    8948 }
    8949 
    8950 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8951 {
    8952  uint32_t lostAllocationCount = 0;
    8953 
    8954  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8955  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8956  {
    8957  VmaSuballocation& suballoc = suballocations1st[i];
    8958  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8959  suballoc.hAllocation->CanBecomeLost() &&
    8960  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8961  {
    8962  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8963  suballoc.hAllocation = VK_NULL_HANDLE;
    8964  ++m_1stNullItemsMiddleCount;
    8965  m_SumFreeSize += suballoc.size;
    8966  ++lostAllocationCount;
    8967  }
    8968  }
    8969 
    8970  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8971  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8972  {
    8973  VmaSuballocation& suballoc = suballocations2nd[i];
    8974  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8975  suballoc.hAllocation->CanBecomeLost() &&
    8976  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8977  {
    8978  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8979  suballoc.hAllocation = VK_NULL_HANDLE;
    8980  ++m_2ndNullItemsCount;
    8981  ++lostAllocationCount;
    8982  }
    8983  }
    8984 
    8985  if(lostAllocationCount)
    8986  {
    8987  CleanupAfterFree();
    8988  }
    8989 
    8990  return lostAllocationCount;
    8991 }
    8992 
    8993 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8994 {
    8995  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8996  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8997  {
    8998  const VmaSuballocation& suballoc = suballocations1st[i];
    8999  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9000  {
    9001  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9002  {
    9003  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9004  return VK_ERROR_VALIDATION_FAILED_EXT;
    9005  }
    9006  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9007  {
    9008  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9009  return VK_ERROR_VALIDATION_FAILED_EXT;
    9010  }
    9011  }
    9012  }
    9013 
    9014  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9015  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9016  {
    9017  const VmaSuballocation& suballoc = suballocations2nd[i];
    9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9019  {
    9020  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9021  {
    9022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9023  return VK_ERROR_VALIDATION_FAILED_EXT;
    9024  }
    9025  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9026  {
    9027  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9028  return VK_ERROR_VALIDATION_FAILED_EXT;
    9029  }
    9030  }
    9031  }
    9032 
    9033  return VK_SUCCESS;
    9034 }
    9035 
    9036 void VmaBlockMetadata_Linear::Alloc(
    9037  const VmaAllocationRequest& request,
    9038  VmaSuballocationType type,
    9039  VkDeviceSize allocSize,
    9040  bool upperAddress,
    9041  VmaAllocation hAllocation)
    9042 {
    9043  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9044 
    9045  if(upperAddress)
    9046  {
    9047  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9048  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9049  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9050  suballocations2nd.push_back(newSuballoc);
    9051  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9052  }
    9053  else
    9054  {
    9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9056 
    9057  // First allocation.
    9058  if(suballocations1st.empty())
    9059  {
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  else
    9063  {
    9064  // New allocation at the end of 1st vector.
    9065  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9066  {
    9067  // Check if it fits before the end of the block.
    9068  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9069  suballocations1st.push_back(newSuballoc);
    9070  }
    9071  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9072  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9073  {
    9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075 
    9076  switch(m_2ndVectorMode)
    9077  {
    9078  case SECOND_VECTOR_EMPTY:
    9079  // First allocation from second part ring buffer.
    9080  VMA_ASSERT(suballocations2nd.empty());
    9081  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9082  break;
    9083  case SECOND_VECTOR_RING_BUFFER:
    9084  // 2-part ring buffer is already started.
    9085  VMA_ASSERT(!suballocations2nd.empty());
    9086  break;
    9087  case SECOND_VECTOR_DOUBLE_STACK:
    9088  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9089  break;
    9090  default:
    9091  VMA_ASSERT(0);
    9092  }
    9093 
    9094  suballocations2nd.push_back(newSuballoc);
    9095  }
    9096  else
    9097  {
    9098  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9099  }
    9100  }
    9101  }
    9102 
    9103  m_SumFreeSize -= newSuballoc.size;
    9104 }
    9105 
    9106 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9107 {
    9108  FreeAtOffset(allocation->GetOffset());
    9109 }
    9110 
    9111 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9112 {
    9113  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9114  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9115 
    9116  if(!suballocations1st.empty())
    9117  {
    9118  // First allocation: Mark it as next empty at the beginning.
    9119  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9120  if(firstSuballoc.offset == offset)
    9121  {
    9122  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9123  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9124  m_SumFreeSize += firstSuballoc.size;
    9125  ++m_1stNullItemsBeginCount;
    9126  CleanupAfterFree();
    9127  return;
    9128  }
    9129  }
    9130 
    9131  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9132  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9133  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9134  {
    9135  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9136  if(lastSuballoc.offset == offset)
    9137  {
    9138  m_SumFreeSize += lastSuballoc.size;
    9139  suballocations2nd.pop_back();
    9140  CleanupAfterFree();
    9141  return;
    9142  }
    9143  }
    9144  // Last allocation in 1st vector.
    9145  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9146  {
    9147  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9148  if(lastSuballoc.offset == offset)
    9149  {
    9150  m_SumFreeSize += lastSuballoc.size;
    9151  suballocations1st.pop_back();
    9152  CleanupAfterFree();
    9153  return;
    9154  }
    9155  }
    9156 
    9157  // Item from the middle of 1st vector.
    9158  {
    9159  VmaSuballocation refSuballoc;
    9160  refSuballoc.offset = offset;
    9161  // Rest of members stays uninitialized intentionally for better performance.
    9162  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9163  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9164  suballocations1st.end(),
    9165  refSuballoc);
    9166  if(it != suballocations1st.end())
    9167  {
    9168  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  it->hAllocation = VK_NULL_HANDLE;
    9170  ++m_1stNullItemsMiddleCount;
    9171  m_SumFreeSize += it->size;
    9172  CleanupAfterFree();
    9173  return;
    9174  }
    9175  }
    9176 
    9177  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9178  {
    9179  // Item from the middle of 2nd vector.
    9180  VmaSuballocation refSuballoc;
    9181  refSuballoc.offset = offset;
    9182  // Rest of members stays uninitialized intentionally for better performance.
    9183  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9184  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9185  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9186  if(it != suballocations2nd.end())
    9187  {
    9188  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9189  it->hAllocation = VK_NULL_HANDLE;
    9190  ++m_2ndNullItemsCount;
    9191  m_SumFreeSize += it->size;
    9192  CleanupAfterFree();
    9193  return;
    9194  }
    9195  }
    9196 
    9197  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9198 }
    9199 
    9200 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9201 {
    9202  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9203  const size_t suballocCount = AccessSuballocations1st().size();
    9204  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9205 }
    9206 
    9207 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9208 {
    9209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9211 
    9212  if(IsEmpty())
    9213  {
    9214  suballocations1st.clear();
    9215  suballocations2nd.clear();
    9216  m_1stNullItemsBeginCount = 0;
    9217  m_1stNullItemsMiddleCount = 0;
    9218  m_2ndNullItemsCount = 0;
    9219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9220  }
    9221  else
    9222  {
    9223  const size_t suballoc1stCount = suballocations1st.size();
    9224  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9225  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9226 
    9227  // Find more null items at the beginning of 1st vector.
    9228  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9229  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9230  {
    9231  ++m_1stNullItemsBeginCount;
    9232  --m_1stNullItemsMiddleCount;
    9233  }
    9234 
    9235  // Find more null items at the end of 1st vector.
    9236  while(m_1stNullItemsMiddleCount > 0 &&
    9237  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9238  {
    9239  --m_1stNullItemsMiddleCount;
    9240  suballocations1st.pop_back();
    9241  }
    9242 
    9243  // Find more null items at the end of 2nd vector.
    9244  while(m_2ndNullItemsCount > 0 &&
    9245  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9246  {
    9247  --m_2ndNullItemsCount;
    9248  suballocations2nd.pop_back();
    9249  }
    9250 
    9251  if(ShouldCompact1st())
    9252  {
    9253  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9254  size_t srcIndex = m_1stNullItemsBeginCount;
    9255  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9256  {
    9257  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9258  {
    9259  ++srcIndex;
    9260  }
    9261  if(dstIndex != srcIndex)
    9262  {
    9263  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9264  }
    9265  ++srcIndex;
    9266  }
    9267  suballocations1st.resize(nonNullItemCount);
    9268  m_1stNullItemsBeginCount = 0;
    9269  m_1stNullItemsMiddleCount = 0;
    9270  }
    9271 
    9272  // 2nd vector became empty.
    9273  if(suballocations2nd.empty())
    9274  {
    9275  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9276  }
    9277 
    9278  // 1st vector became empty.
    9279  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9280  {
    9281  suballocations1st.clear();
    9282  m_1stNullItemsBeginCount = 0;
    9283 
    9284  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9285  {
    9286  // Swap 1st with 2nd. Now 2nd is empty.
    9287  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9288  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9289  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9290  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9291  {
    9292  ++m_1stNullItemsBeginCount;
    9293  --m_1stNullItemsMiddleCount;
    9294  }
    9295  m_2ndNullItemsCount = 0;
    9296  m_1stVectorIndex ^= 1;
    9297  }
    9298  }
    9299  }
    9300 
    9301  VMA_HEAVY_ASSERT(Validate());
    9302 }
    9303 
    9304 
    9306 // class VmaBlockMetadata_Buddy
    9307 
    9308 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9309  VmaBlockMetadata(hAllocator),
    9310  m_Root(VMA_NULL),
    9311  m_AllocationCount(0),
    9312  m_FreeCount(1),
    9313  m_SumFreeSize(0)
    9314 {
    9315  memset(m_FreeList, 0, sizeof(m_FreeList));
    9316 }
    9317 
    9318 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9319 {
    9320  DeleteNode(m_Root);
    9321 }
    9322 
    9323 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9324 {
    9325  VmaBlockMetadata::Init(size);
    9326 
    9327  m_UsableSize = VmaPrevPow2(size);
    9328  m_SumFreeSize = m_UsableSize;
    9329 
    9330  // Calculate m_LevelCount.
    9331  m_LevelCount = 1;
    9332  while(m_LevelCount < MAX_LEVELS &&
    9333  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9334  {
    9335  ++m_LevelCount;
    9336  }
    9337 
    9338  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9339  rootNode->offset = 0;
    9340  rootNode->type = Node::TYPE_FREE;
    9341  rootNode->parent = VMA_NULL;
    9342  rootNode->buddy = VMA_NULL;
    9343 
    9344  m_Root = rootNode;
    9345  AddToFreeListFront(0, rootNode);
    9346 }
    9347 
    9348 bool VmaBlockMetadata_Buddy::Validate() const
    9349 {
    9350  // Validate tree.
    9351  ValidationContext ctx;
    9352  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9353  {
    9354  VMA_VALIDATE(false && "ValidateNode failed.");
    9355  }
    9356  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9357  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9358 
    9359  // Validate free node lists.
    9360  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9361  {
    9362  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9363  m_FreeList[level].front->free.prev == VMA_NULL);
    9364 
    9365  for(Node* node = m_FreeList[level].front;
    9366  node != VMA_NULL;
    9367  node = node->free.next)
    9368  {
    9369  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9370 
    9371  if(node->free.next == VMA_NULL)
    9372  {
    9373  VMA_VALIDATE(m_FreeList[level].back == node);
    9374  }
    9375  else
    9376  {
    9377  VMA_VALIDATE(node->free.next->free.prev == node);
    9378  }
    9379  }
    9380  }
    9381 
    9382  // Validate that free lists ar higher levels are empty.
    9383  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9384  {
    9385  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9386  }
    9387 
    9388  return true;
    9389 }
    9390 
    9391 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9392 {
    9393  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9394  {
    9395  if(m_FreeList[level].front != VMA_NULL)
    9396  {
    9397  return LevelToNodeSize(level);
    9398  }
    9399  }
    9400  return 0;
    9401 }
    9402 
    9403 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9404 {
    9405  const VkDeviceSize unusableSize = GetUnusableSize();
    9406 
    9407  outInfo.blockCount = 1;
    9408 
    9409  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9410  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9411 
    9412  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9413  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9414  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9415 
    9416  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9417 
    9418  if(unusableSize > 0)
    9419  {
    9420  ++outInfo.unusedRangeCount;
    9421  outInfo.unusedBytes += unusableSize;
    9422  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9423  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9424  }
    9425 }
    9426 
    9427 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9428 {
    9429  const VkDeviceSize unusableSize = GetUnusableSize();
    9430 
    9431  inoutStats.size += GetSize();
    9432  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9433  inoutStats.allocationCount += m_AllocationCount;
    9434  inoutStats.unusedRangeCount += m_FreeCount;
    9435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9436 
    9437  if(unusableSize > 0)
    9438  {
    9439  ++inoutStats.unusedRangeCount;
    9440  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9441  }
    9442 }
    9443 
    9444 #if VMA_STATS_STRING_ENABLED
    9445 
    9446 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9447 {
    9448  // TODO optimize
    9449  VmaStatInfo stat;
    9450  CalcAllocationStatInfo(stat);
    9451 
    9452  PrintDetailedMap_Begin(
    9453  json,
    9454  stat.unusedBytes,
    9455  stat.allocationCount,
    9456  stat.unusedRangeCount);
    9457 
    9458  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9459 
    9460  const VkDeviceSize unusableSize = GetUnusableSize();
    9461  if(unusableSize > 0)
    9462  {
    9463  PrintDetailedMap_UnusedRange(json,
    9464  m_UsableSize, // offset
    9465  unusableSize); // size
    9466  }
    9467 
    9468  PrintDetailedMap_End(json);
    9469 }
    9470 
    9471 #endif // #if VMA_STATS_STRING_ENABLED
    9472 
    9473 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9474  uint32_t currentFrameIndex,
    9475  uint32_t frameInUseCount,
    9476  VkDeviceSize bufferImageGranularity,
    9477  VkDeviceSize allocSize,
    9478  VkDeviceSize allocAlignment,
    9479  bool upperAddress,
    9480  VmaSuballocationType allocType,
    9481  bool canMakeOtherLost,
    9482  uint32_t strategy,
    9483  VmaAllocationRequest* pAllocationRequest)
    9484 {
    9485  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9486 
    9487  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9488  // Whenever it might be an OPTIMAL image...
    9489  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9491  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9492  {
    9493  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9494  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9495  }
    9496 
    9497  if(allocSize > m_UsableSize)
    9498  {
    9499  return false;
    9500  }
    9501 
    9502  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9503  for(uint32_t level = targetLevel + 1; level--; )
    9504  {
    9505  for(Node* freeNode = m_FreeList[level].front;
    9506  freeNode != VMA_NULL;
    9507  freeNode = freeNode->free.next)
    9508  {
    9509  if(freeNode->offset % allocAlignment == 0)
    9510  {
    9511  pAllocationRequest->offset = freeNode->offset;
    9512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9513  pAllocationRequest->sumItemSize = 0;
    9514  pAllocationRequest->itemsToMakeLostCount = 0;
    9515  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9516  return true;
    9517  }
    9518  }
    9519  }
    9520 
    9521  return false;
    9522 }
    9523 
    9524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9525  uint32_t currentFrameIndex,
    9526  uint32_t frameInUseCount,
    9527  VmaAllocationRequest* pAllocationRequest)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return pAllocationRequest->itemsToMakeLostCount == 0;
    9534 }
    9535 
    9536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9537 {
    9538  /*
    9539  Lost allocations are not supported in buddy allocator at the moment.
    9540  Support might be added in the future.
    9541  */
    9542  return 0;
    9543 }
    9544 
    9545 void VmaBlockMetadata_Buddy::Alloc(
    9546  const VmaAllocationRequest& request,
    9547  VmaSuballocationType type,
    9548  VkDeviceSize allocSize,
    9549  bool upperAddress,
    9550  VmaAllocation hAllocation)
    9551 {
    9552  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9553  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9554 
    9555  Node* currNode = m_FreeList[currLevel].front;
    9556  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9557  while(currNode->offset != request.offset)
    9558  {
    9559  currNode = currNode->free.next;
    9560  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9561  }
    9562 
    9563  // Go down, splitting free nodes.
    9564  while(currLevel < targetLevel)
    9565  {
    9566  // currNode is already first free node at currLevel.
    9567  // Remove it from list of free nodes at this currLevel.
    9568  RemoveFromFreeList(currLevel, currNode);
    9569 
    9570  const uint32_t childrenLevel = currLevel + 1;
    9571 
    9572  // Create two free sub-nodes.
    9573  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9574  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9575 
    9576  leftChild->offset = currNode->offset;
    9577  leftChild->type = Node::TYPE_FREE;
    9578  leftChild->parent = currNode;
    9579  leftChild->buddy = rightChild;
    9580 
    9581  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9582  rightChild->type = Node::TYPE_FREE;
    9583  rightChild->parent = currNode;
    9584  rightChild->buddy = leftChild;
    9585 
    9586  // Convert current currNode to split type.
    9587  currNode->type = Node::TYPE_SPLIT;
    9588  currNode->split.leftChild = leftChild;
    9589 
    9590  // Add child nodes to free list. Order is important!
    9591  AddToFreeListFront(childrenLevel, rightChild);
    9592  AddToFreeListFront(childrenLevel, leftChild);
    9593 
    9594  ++m_FreeCount;
    9595  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9596  ++currLevel;
    9597  currNode = m_FreeList[currLevel].front;
    9598 
    9599  /*
    9600  We can be sure that currNode, as left child of node previously split,
    9601  also fullfills the alignment requirement.
    9602  */
    9603  }
    9604 
    9605  // Remove from free list.
    9606  VMA_ASSERT(currLevel == targetLevel &&
    9607  currNode != VMA_NULL &&
    9608  currNode->type == Node::TYPE_FREE);
    9609  RemoveFromFreeList(currLevel, currNode);
    9610 
    9611  // Convert to allocation node.
    9612  currNode->type = Node::TYPE_ALLOCATION;
    9613  currNode->allocation.alloc = hAllocation;
    9614 
    9615  ++m_AllocationCount;
    9616  --m_FreeCount;
    9617  m_SumFreeSize -= allocSize;
    9618 }
    9619 
    9620 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9621 {
    9622  if(node->type == Node::TYPE_SPLIT)
    9623  {
    9624  DeleteNode(node->split.leftChild->buddy);
    9625  DeleteNode(node->split.leftChild);
    9626  }
    9627 
    9628  vma_delete(GetAllocationCallbacks(), node);
    9629 }
    9630 
    9631 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9632 {
    9633  VMA_VALIDATE(level < m_LevelCount);
    9634  VMA_VALIDATE(curr->parent == parent);
    9635  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9636  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9637  switch(curr->type)
    9638  {
    9639  case Node::TYPE_FREE:
    9640  // curr->free.prev, next are validated separately.
    9641  ctx.calculatedSumFreeSize += levelNodeSize;
    9642  ++ctx.calculatedFreeCount;
    9643  break;
    9644  case Node::TYPE_ALLOCATION:
    9645  ++ctx.calculatedAllocationCount;
    9646  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9647  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9648  break;
    9649  case Node::TYPE_SPLIT:
    9650  {
    9651  const uint32_t childrenLevel = level + 1;
    9652  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9653  const Node* const leftChild = curr->split.leftChild;
    9654  VMA_VALIDATE(leftChild != VMA_NULL);
    9655  VMA_VALIDATE(leftChild->offset == curr->offset);
    9656  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9657  {
    9658  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9659  }
    9660  const Node* const rightChild = leftChild->buddy;
    9661  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9662  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9663  {
    9664  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9665  }
    9666  }
    9667  break;
    9668  default:
    9669  return false;
    9670  }
    9671 
    9672  return true;
    9673 }
    9674 
    9675 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9676 {
    9677  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9678  uint32_t level = 0;
    9679  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9680  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9681  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9682  {
    9683  ++level;
    9684  currLevelNodeSize = nextLevelNodeSize;
    9685  nextLevelNodeSize = currLevelNodeSize >> 1;
    9686  }
    9687  return level;
    9688 }
    9689 
    9690 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9691 {
    9692  // Find node and level.
    9693  Node* node = m_Root;
    9694  VkDeviceSize nodeOffset = 0;
    9695  uint32_t level = 0;
    9696  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9697  while(node->type == Node::TYPE_SPLIT)
    9698  {
    9699  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9700  if(offset < nodeOffset + nextLevelSize)
    9701  {
    9702  node = node->split.leftChild;
    9703  }
    9704  else
    9705  {
    9706  node = node->split.leftChild->buddy;
    9707  nodeOffset += nextLevelSize;
    9708  }
    9709  ++level;
    9710  levelNodeSize = nextLevelSize;
    9711  }
    9712 
    9713  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9714  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9715 
    9716  ++m_FreeCount;
    9717  --m_AllocationCount;
    9718  m_SumFreeSize += alloc->GetSize();
    9719 
    9720  node->type = Node::TYPE_FREE;
    9721 
    9722  // Join free nodes if possible.
    9723  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9724  {
    9725  RemoveFromFreeList(level, node->buddy);
    9726  Node* const parent = node->parent;
    9727 
    9728  vma_delete(GetAllocationCallbacks(), node->buddy);
    9729  vma_delete(GetAllocationCallbacks(), node);
    9730  parent->type = Node::TYPE_FREE;
    9731 
    9732  node = parent;
    9733  --level;
    9734  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9735  --m_FreeCount;
    9736  }
    9737 
    9738  AddToFreeListFront(level, node);
    9739 }
    9740 
    9741 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9742 {
    9743  switch(node->type)
    9744  {
    9745  case Node::TYPE_FREE:
    9746  ++outInfo.unusedRangeCount;
    9747  outInfo.unusedBytes += levelNodeSize;
    9748  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9749  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9750  break;
    9751  case Node::TYPE_ALLOCATION:
    9752  {
    9753  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9754  ++outInfo.allocationCount;
    9755  outInfo.usedBytes += allocSize;
    9756  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9757  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9758 
    9759  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9760  if(unusedRangeSize > 0)
    9761  {
    9762  ++outInfo.unusedRangeCount;
    9763  outInfo.unusedBytes += unusedRangeSize;
    9764  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9765  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9766  }
    9767  }
    9768  break;
    9769  case Node::TYPE_SPLIT:
    9770  {
    9771  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9772  const Node* const leftChild = node->split.leftChild;
    9773  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9774  const Node* const rightChild = leftChild->buddy;
    9775  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9776  }
    9777  break;
    9778  default:
    9779  VMA_ASSERT(0);
    9780  }
    9781 }
    9782 
    9783 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9784 {
    9785  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9786 
    9787  // List is empty.
    9788  Node* const frontNode = m_FreeList[level].front;
    9789  if(frontNode == VMA_NULL)
    9790  {
    9791  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9792  node->free.prev = node->free.next = VMA_NULL;
    9793  m_FreeList[level].front = m_FreeList[level].back = node;
    9794  }
    9795  else
    9796  {
    9797  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9798  node->free.prev = VMA_NULL;
    9799  node->free.next = frontNode;
    9800  frontNode->free.prev = node;
    9801  m_FreeList[level].front = node;
    9802  }
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9806 {
    9807  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9808 
    9809  // It is at the front.
    9810  if(node->free.prev == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].front == node);
    9813  m_FreeList[level].front = node->free.next;
    9814  }
    9815  else
    9816  {
    9817  Node* const prevFreeNode = node->free.prev;
    9818  VMA_ASSERT(prevFreeNode->free.next == node);
    9819  prevFreeNode->free.next = node->free.next;
    9820  }
    9821 
    9822  // It is at the back.
    9823  if(node->free.next == VMA_NULL)
    9824  {
    9825  VMA_ASSERT(m_FreeList[level].back == node);
    9826  m_FreeList[level].back = node->free.prev;
    9827  }
    9828  else
    9829  {
    9830  Node* const nextFreeNode = node->free.next;
    9831  VMA_ASSERT(nextFreeNode->free.prev == node);
    9832  nextFreeNode->free.prev = node->free.prev;
    9833  }
    9834 }
    9835 
    9836 #if VMA_STATS_STRING_ENABLED
    9837 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9838 {
    9839  switch(node->type)
    9840  {
    9841  case Node::TYPE_FREE:
    9842  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9843  break;
    9844  case Node::TYPE_ALLOCATION:
    9845  {
    9846  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9848  if(allocSize < levelNodeSize)
    9849  {
    9850  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9851  }
    9852  }
    9853  break;
    9854  case Node::TYPE_SPLIT:
    9855  {
    9856  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9857  const Node* const leftChild = node->split.leftChild;
    9858  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9859  const Node* const rightChild = leftChild->buddy;
    9860  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9861  }
    9862  break;
    9863  default:
    9864  VMA_ASSERT(0);
    9865  }
    9866 }
    9867 #endif // #if VMA_STATS_STRING_ENABLED
    9868 
    9869 
    9871 // class VmaDeviceMemoryBlock
    9872 
    9873 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9874  m_pMetadata(VMA_NULL),
    9875  m_MemoryTypeIndex(UINT32_MAX),
    9876  m_Id(0),
    9877  m_hMemory(VK_NULL_HANDLE),
    9878  m_MapCount(0),
    9879  m_pMappedData(VMA_NULL)
    9880 {
    9881 }
    9882 
    9883 void VmaDeviceMemoryBlock::Init(
    9884  VmaAllocator hAllocator,
    9885  uint32_t newMemoryTypeIndex,
    9886  VkDeviceMemory newMemory,
    9887  VkDeviceSize newSize,
    9888  uint32_t id,
    9889  uint32_t algorithm)
    9890 {
    9891  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9892 
    9893  m_MemoryTypeIndex = newMemoryTypeIndex;
    9894  m_Id = id;
    9895  m_hMemory = newMemory;
    9896 
    9897  switch(algorithm)
    9898  {
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9901  break;
    9903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9904  break;
    9905  default:
    9906  VMA_ASSERT(0);
    9907  // Fall-through.
    9908  case 0:
    9909  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9910  }
    9911  m_pMetadata->Init(newSize);
    9912 }
    9913 
    9914 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9915 {
    9916  // This is the most important assert in the entire library.
    9917  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9918  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9919 
    9920  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9921  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9922  m_hMemory = VK_NULL_HANDLE;
    9923 
    9924  vma_delete(allocator, m_pMetadata);
    9925  m_pMetadata = VMA_NULL;
    9926 }
    9927 
    9928 bool VmaDeviceMemoryBlock::Validate() const
    9929 {
    9930  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9931  (m_pMetadata->GetSize() != 0));
    9932 
    9933  return m_pMetadata->Validate();
    9934 }
    9935 
    9936 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9937 {
    9938  void* pData = nullptr;
    9939  VkResult res = Map(hAllocator, 1, &pData);
    9940  if(res != VK_SUCCESS)
    9941  {
    9942  return res;
    9943  }
    9944 
    9945  res = m_pMetadata->CheckCorruption(pData);
    9946 
    9947  Unmap(hAllocator, 1);
    9948 
    9949  return res;
    9950 }
    9951 
    9952 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9953 {
    9954  if(count == 0)
    9955  {
    9956  return VK_SUCCESS;
    9957  }
    9958 
    9959  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9960  if(m_MapCount != 0)
    9961  {
    9962  m_MapCount += count;
    9963  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9964  if(ppData != VMA_NULL)
    9965  {
    9966  *ppData = m_pMappedData;
    9967  }
    9968  return VK_SUCCESS;
    9969  }
    9970  else
    9971  {
    9972  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9973  hAllocator->m_hDevice,
    9974  m_hMemory,
    9975  0, // offset
    9976  VK_WHOLE_SIZE,
    9977  0, // flags
    9978  &m_pMappedData);
    9979  if(result == VK_SUCCESS)
    9980  {
    9981  if(ppData != VMA_NULL)
    9982  {
    9983  *ppData = m_pMappedData;
    9984  }
    9985  m_MapCount = count;
    9986  }
    9987  return result;
    9988  }
    9989 }
    9990 
    9991 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9992 {
    9993  if(count == 0)
    9994  {
    9995  return;
    9996  }
    9997 
    9998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9999  if(m_MapCount >= count)
    10000  {
    10001  m_MapCount -= count;
    10002  if(m_MapCount == 0)
    10003  {
    10004  m_pMappedData = VMA_NULL;
    10005  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10006  }
    10007  }
    10008  else
    10009  {
    10010  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10011  }
    10012 }
    10013 
    10014 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10015 {
    10016  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10017  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10018 
    10019  void* pData;
    10020  VkResult res = Map(hAllocator, 1, &pData);
    10021  if(res != VK_SUCCESS)
    10022  {
    10023  return res;
    10024  }
    10025 
    10026  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10027  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10028 
    10029  Unmap(hAllocator, 1);
    10030 
    10031  return VK_SUCCESS;
    10032 }
    10033 
    10034 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10035 {
    10036  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10037  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10038 
    10039  void* pData;
    10040  VkResult res = Map(hAllocator, 1, &pData);
    10041  if(res != VK_SUCCESS)
    10042  {
    10043  return res;
    10044  }
    10045 
    10046  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10047  {
    10048  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10049  }
    10050  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10051  {
    10052  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10053  }
    10054 
    10055  Unmap(hAllocator, 1);
    10056 
    10057  return VK_SUCCESS;
    10058 }
    10059 
    10060 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10061  const VmaAllocator hAllocator,
    10062  const VmaAllocation hAllocation,
    10063  VkBuffer hBuffer)
    10064 {
    10065  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10066  hAllocation->GetBlock() == this);
    10067  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10068  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10069  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10070  hAllocator->m_hDevice,
    10071  hBuffer,
    10072  m_hMemory,
    10073  hAllocation->GetOffset());
    10074 }
    10075 
    10076 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10077  const VmaAllocator hAllocator,
    10078  const VmaAllocation hAllocation,
    10079  VkImage hImage)
    10080 {
    10081  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10082  hAllocation->GetBlock() == this);
    10083  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10084  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10085  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10086  hAllocator->m_hDevice,
    10087  hImage,
    10088  m_hMemory,
    10089  hAllocation->GetOffset());
    10090 }
    10091 
    10092 static void InitStatInfo(VmaStatInfo& outInfo)
    10093 {
    10094  memset(&outInfo, 0, sizeof(outInfo));
    10095  outInfo.allocationSizeMin = UINT64_MAX;
    10096  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10097 }
    10098 
    10099 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10100 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10101 {
    10102  inoutInfo.blockCount += srcInfo.blockCount;
    10103  inoutInfo.allocationCount += srcInfo.allocationCount;
    10104  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10105  inoutInfo.usedBytes += srcInfo.usedBytes;
    10106  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10107  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10108  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10109  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10110  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10111 }
    10112 
    10113 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10114 {
    10115  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10116  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10117  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10118  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10119 }
    10120 
    10121 VmaPool_T::VmaPool_T(
    10122  VmaAllocator hAllocator,
    10123  const VmaPoolCreateInfo& createInfo,
    10124  VkDeviceSize preferredBlockSize) :
    10125  m_BlockVector(
    10126  hAllocator,
    10127  createInfo.memoryTypeIndex,
    10128  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10129  createInfo.minBlockCount,
    10130  createInfo.maxBlockCount,
    10131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10132  createInfo.frameInUseCount,
    10133  true, // isCustomPool
    10134  createInfo.blockSize != 0, // explicitBlockSize
    10135  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10136  m_Id(0)
    10137 {
    10138 }
    10139 
    10140 VmaPool_T::~VmaPool_T()
    10141 {
    10142 }
    10143 
    10144 #if VMA_STATS_STRING_ENABLED
    10145 
    10146 #endif // #if VMA_STATS_STRING_ENABLED
    10147 
    10148 VmaBlockVector::VmaBlockVector(
    10149  VmaAllocator hAllocator,
    10150  uint32_t memoryTypeIndex,
    10151  VkDeviceSize preferredBlockSize,
    10152  size_t minBlockCount,
    10153  size_t maxBlockCount,
    10154  VkDeviceSize bufferImageGranularity,
    10155  uint32_t frameInUseCount,
    10156  bool isCustomPool,
    10157  bool explicitBlockSize,
    10158  uint32_t algorithm) :
    10159  m_hAllocator(hAllocator),
    10160  m_MemoryTypeIndex(memoryTypeIndex),
    10161  m_PreferredBlockSize(preferredBlockSize),
    10162  m_MinBlockCount(minBlockCount),
    10163  m_MaxBlockCount(maxBlockCount),
    10164  m_BufferImageGranularity(bufferImageGranularity),
    10165  m_FrameInUseCount(frameInUseCount),
    10166  m_IsCustomPool(isCustomPool),
    10167  m_ExplicitBlockSize(explicitBlockSize),
    10168  m_Algorithm(algorithm),
    10169  m_HasEmptyBlock(false),
    10170  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10171  m_pDefragmentator(VMA_NULL),
    10172  m_NextBlockId(0)
    10173 {
    10174 }
    10175 
    10176 VmaBlockVector::~VmaBlockVector()
    10177 {
    10178  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10179 
    10180  for(size_t i = m_Blocks.size(); i--; )
    10181  {
    10182  m_Blocks[i]->Destroy(m_hAllocator);
    10183  vma_delete(m_hAllocator, m_Blocks[i]);
    10184  }
    10185 }
    10186 
    10187 VkResult VmaBlockVector::CreateMinBlocks()
    10188 {
    10189  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10190  {
    10191  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10192  if(res != VK_SUCCESS)
    10193  {
    10194  return res;
    10195  }
    10196  }
    10197  return VK_SUCCESS;
    10198 }
    10199 
    10200 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10201 {
    10202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10203 
    10204  const size_t blockCount = m_Blocks.size();
    10205 
    10206  pStats->size = 0;
    10207  pStats->unusedSize = 0;
    10208  pStats->allocationCount = 0;
    10209  pStats->unusedRangeCount = 0;
    10210  pStats->unusedRangeSizeMax = 0;
    10211  pStats->blockCount = blockCount;
    10212 
    10213  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10214  {
    10215  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10216  VMA_ASSERT(pBlock);
    10217  VMA_HEAVY_ASSERT(pBlock->Validate());
    10218  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10219  }
    10220 }
    10221 
    10222 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10223 {
    10224  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10225  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10226  (VMA_DEBUG_MARGIN > 0) &&
    10227  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10228 }
    10229 
    10230 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10231 
    10232 VkResult VmaBlockVector::Allocate(
    10233  VmaPool hCurrentPool,
    10234  uint32_t currentFrameIndex,
    10235  VkDeviceSize size,
    10236  VkDeviceSize alignment,
    10237  const VmaAllocationCreateInfo& createInfo,
    10238  VmaSuballocationType suballocType,
    10239  VmaAllocation* pAllocation)
    10240 {
    10241  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10242  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10243  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10244  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10245  const bool canCreateNewBlock =
    10246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10247  (m_Blocks.size() < m_MaxBlockCount);
    10248  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10249 
    10250  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10251  // Which in turn is available only when maxBlockCount = 1.
    10252  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10253  {
    10254  canMakeOtherLost = false;
    10255  }
    10256 
    10257  // Upper address can only be used with linear allocator and within single memory block.
    10258  if(isUpperAddress &&
    10259  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10260  {
    10261  return VK_ERROR_FEATURE_NOT_PRESENT;
    10262  }
    10263 
    10264  // Validate strategy.
    10265  switch(strategy)
    10266  {
    10267  case 0:
    10269  break;
    10273  break;
    10274  default:
    10275  return VK_ERROR_FEATURE_NOT_PRESENT;
    10276  }
    10277 
    10278  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10279  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10280  {
    10281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10282  }
    10283 
    10284  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10285 
    10286  /*
    10287  Under certain condition, this whole section can be skipped for optimization, so
    10288  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10289  e.g. for custom pools with linear algorithm.
    10290  */
    10291  if(!canMakeOtherLost || canCreateNewBlock)
    10292  {
    10293  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10294  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10296 
    10297  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10298  {
    10299  // Use only last block.
    10300  if(!m_Blocks.empty())
    10301  {
    10302  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10303  VMA_ASSERT(pCurrBlock);
    10304  VkResult res = AllocateFromBlock(
    10305  pCurrBlock,
    10306  hCurrentPool,
    10307  currentFrameIndex,
    10308  size,
    10309  alignment,
    10310  allocFlagsCopy,
    10311  createInfo.pUserData,
    10312  suballocType,
    10313  strategy,
    10314  pAllocation);
    10315  if(res == VK_SUCCESS)
    10316  {
    10317  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10318  return VK_SUCCESS;
    10319  }
    10320  }
    10321  }
    10322  else
    10323  {
    10325  {
    10326  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10327  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10328  {
    10329  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10330  VMA_ASSERT(pCurrBlock);
    10331  VkResult res = AllocateFromBlock(
    10332  pCurrBlock,
    10333  hCurrentPool,
    10334  currentFrameIndex,
    10335  size,
    10336  alignment,
    10337  allocFlagsCopy,
    10338  createInfo.pUserData,
    10339  suballocType,
    10340  strategy,
    10341  pAllocation);
    10342  if(res == VK_SUCCESS)
    10343  {
    10344  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10345  return VK_SUCCESS;
    10346  }
    10347  }
    10348  }
    10349  else // WORST_FIT, FIRST_FIT
    10350  {
    10351  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10352  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10353  {
    10354  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10355  VMA_ASSERT(pCurrBlock);
    10356  VkResult res = AllocateFromBlock(
    10357  pCurrBlock,
    10358  hCurrentPool,
    10359  currentFrameIndex,
    10360  size,
    10361  alignment,
    10362  allocFlagsCopy,
    10363  createInfo.pUserData,
    10364  suballocType,
    10365  strategy,
    10366  pAllocation);
    10367  if(res == VK_SUCCESS)
    10368  {
    10369  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10370  return VK_SUCCESS;
    10371  }
    10372  }
    10373  }
    10374  }
    10375 
    10376  // 2. Try to create new block.
    10377  if(canCreateNewBlock)
    10378  {
    10379  // Calculate optimal size for new block.
    10380  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10381  uint32_t newBlockSizeShift = 0;
    10382  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10383 
    10384  if(!m_ExplicitBlockSize)
    10385  {
    10386  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10387  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10388  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10389  {
    10390  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10391  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10392  {
    10393  newBlockSize = smallerNewBlockSize;
    10394  ++newBlockSizeShift;
    10395  }
    10396  else
    10397  {
    10398  break;
    10399  }
    10400  }
    10401  }
    10402 
    10403  size_t newBlockIndex = 0;
    10404  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10405  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10406  if(!m_ExplicitBlockSize)
    10407  {
    10408  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10409  {
    10410  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10411  if(smallerNewBlockSize >= size)
    10412  {
    10413  newBlockSize = smallerNewBlockSize;
    10414  ++newBlockSizeShift;
    10415  res = CreateBlock(newBlockSize, &newBlockIndex);
    10416  }
    10417  else
    10418  {
    10419  break;
    10420  }
    10421  }
    10422  }
    10423 
    10424  if(res == VK_SUCCESS)
    10425  {
    10426  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10427  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10428 
    10429  res = AllocateFromBlock(
    10430  pBlock,
    10431  hCurrentPool,
    10432  currentFrameIndex,
    10433  size,
    10434  alignment,
    10435  allocFlagsCopy,
    10436  createInfo.pUserData,
    10437  suballocType,
    10438  strategy,
    10439  pAllocation);
    10440  if(res == VK_SUCCESS)
    10441  {
    10442  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10443  return VK_SUCCESS;
    10444  }
    10445  else
    10446  {
    10447  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10449  }
    10450  }
    10451  }
    10452  }
    10453 
    10454  // 3. Try to allocate from existing blocks with making other allocations lost.
    10455  if(canMakeOtherLost)
    10456  {
    10457  uint32_t tryIndex = 0;
    10458  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10459  {
    10460  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10461  VmaAllocationRequest bestRequest = {};
    10462  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10463 
    10464  // 1. Search existing allocations.
    10466  {
    10467  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10468  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10469  {
    10470  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10471  VMA_ASSERT(pCurrBlock);
    10472  VmaAllocationRequest currRequest = {};
    10473  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10474  currentFrameIndex,
    10475  m_FrameInUseCount,
    10476  m_BufferImageGranularity,
    10477  size,
    10478  alignment,
    10479  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10480  suballocType,
    10481  canMakeOtherLost,
    10482  strategy,
    10483  &currRequest))
    10484  {
    10485  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10486  if(pBestRequestBlock == VMA_NULL ||
    10487  currRequestCost < bestRequestCost)
    10488  {
    10489  pBestRequestBlock = pCurrBlock;
    10490  bestRequest = currRequest;
    10491  bestRequestCost = currRequestCost;
    10492 
    10493  if(bestRequestCost == 0)
    10494  {
    10495  break;
    10496  }
    10497  }
    10498  }
    10499  }
    10500  }
    10501  else // WORST_FIT, FIRST_FIT
    10502  {
    10503  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10504  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10505  {
    10506  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10507  VMA_ASSERT(pCurrBlock);
    10508  VmaAllocationRequest currRequest = {};
    10509  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10510  currentFrameIndex,
    10511  m_FrameInUseCount,
    10512  m_BufferImageGranularity,
    10513  size,
    10514  alignment,
    10515  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10516  suballocType,
    10517  canMakeOtherLost,
    10518  strategy,
    10519  &currRequest))
    10520  {
    10521  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10522  if(pBestRequestBlock == VMA_NULL ||
    10523  currRequestCost < bestRequestCost ||
    10525  {
    10526  pBestRequestBlock = pCurrBlock;
    10527  bestRequest = currRequest;
    10528  bestRequestCost = currRequestCost;
    10529 
    10530  if(bestRequestCost == 0 ||
    10532  {
    10533  break;
    10534  }
    10535  }
    10536  }
    10537  }
    10538  }
    10539 
    10540  if(pBestRequestBlock != VMA_NULL)
    10541  {
    10542  if(mapped)
    10543  {
    10544  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10545  if(res != VK_SUCCESS)
    10546  {
    10547  return res;
    10548  }
    10549  }
    10550 
    10551  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10552  currentFrameIndex,
    10553  m_FrameInUseCount,
    10554  &bestRequest))
    10555  {
    10556  // We no longer have an empty Allocation.
    10557  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10558  {
    10559  m_HasEmptyBlock = false;
    10560  }
    10561  // Allocate from this pBlock.
    10562  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10563  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10564  (*pAllocation)->InitBlockAllocation(
    10565  hCurrentPool,
    10566  pBestRequestBlock,
    10567  bestRequest.offset,
    10568  alignment,
    10569  size,
    10570  suballocType,
    10571  mapped,
    10572  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10573  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10574  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10575  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10576  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10577  {
    10578  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10579  }
    10580  if(IsCorruptionDetectionEnabled())
    10581  {
    10582  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10583  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10584  }
    10585  return VK_SUCCESS;
    10586  }
    10587  // else: Some allocations must have been touched while we are here. Next try.
    10588  }
    10589  else
    10590  {
    10591  // Could not find place in any of the blocks - break outer loop.
    10592  break;
    10593  }
    10594  }
    10595  /* Maximum number of tries exceeded - a very unlike event when many other
    10596  threads are simultaneously touching allocations making it impossible to make
    10597  lost at the same time as we try to allocate. */
    10598  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10599  {
    10600  return VK_ERROR_TOO_MANY_OBJECTS;
    10601  }
    10602  }
    10603 
    10604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10605 }
    10606 
    10607 void VmaBlockVector::Free(
    10608  VmaAllocation hAllocation)
    10609 {
    10610  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10611 
    10612  // Scope for lock.
    10613  {
    10614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10615 
    10616  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10617 
    10618  if(IsCorruptionDetectionEnabled())
    10619  {
    10620  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10621  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10622  }
    10623 
    10624  if(hAllocation->IsPersistentMap())
    10625  {
    10626  pBlock->Unmap(m_hAllocator, 1);
    10627  }
    10628 
    10629  pBlock->m_pMetadata->Free(hAllocation);
    10630  VMA_HEAVY_ASSERT(pBlock->Validate());
    10631 
    10632  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10633 
    10634  // pBlock became empty after this deallocation.
    10635  if(pBlock->m_pMetadata->IsEmpty())
    10636  {
    10637  // Already has empty Allocation. We don't want to have two, so delete this one.
    10638  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10639  {
    10640  pBlockToDelete = pBlock;
    10641  Remove(pBlock);
    10642  }
    10643  // We now have first empty block.
    10644  else
    10645  {
    10646  m_HasEmptyBlock = true;
    10647  }
    10648  }
    10649  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10650  // (This is optional, heuristics.)
    10651  else if(m_HasEmptyBlock)
    10652  {
    10653  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10654  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10655  {
    10656  pBlockToDelete = pLastBlock;
    10657  m_Blocks.pop_back();
    10658  m_HasEmptyBlock = false;
    10659  }
    10660  }
    10661 
    10662  IncrementallySortBlocks();
    10663  }
    10664 
    10665  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10666  // lock, for performance reason.
    10667  if(pBlockToDelete != VMA_NULL)
    10668  {
    10669  VMA_DEBUG_LOG(" Deleted empty allocation");
    10670  pBlockToDelete->Destroy(m_hAllocator);
    10671  vma_delete(m_hAllocator, pBlockToDelete);
    10672  }
    10673 }
    10674 
    10675 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10676 {
    10677  VkDeviceSize result = 0;
    10678  for(size_t i = m_Blocks.size(); i--; )
    10679  {
    10680  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10681  if(result >= m_PreferredBlockSize)
    10682  {
    10683  break;
    10684  }
    10685  }
    10686  return result;
    10687 }
    10688 
    10689 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10690 {
    10691  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10692  {
    10693  if(m_Blocks[blockIndex] == pBlock)
    10694  {
    10695  VmaVectorRemove(m_Blocks, blockIndex);
    10696  return;
    10697  }
    10698  }
    10699  VMA_ASSERT(0);
    10700 }
    10701 
    10702 void VmaBlockVector::IncrementallySortBlocks()
    10703 {
    10704  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10705  {
    10706  // Bubble sort only until first swap.
    10707  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10708  {
    10709  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10710  {
    10711  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10712  return;
    10713  }
    10714  }
    10715  }
    10716 }
    10717 
    10718 VkResult VmaBlockVector::AllocateFromBlock(
    10719  VmaDeviceMemoryBlock* pBlock,
    10720  VmaPool hCurrentPool,
    10721  uint32_t currentFrameIndex,
    10722  VkDeviceSize size,
    10723  VkDeviceSize alignment,
    10724  VmaAllocationCreateFlags allocFlags,
    10725  void* pUserData,
    10726  VmaSuballocationType suballocType,
    10727  uint32_t strategy,
    10728  VmaAllocation* pAllocation)
    10729 {
    10730  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10731  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10732  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10733  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10734 
    10735  VmaAllocationRequest currRequest = {};
    10736  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  m_BufferImageGranularity,
    10740  size,
    10741  alignment,
    10742  isUpperAddress,
    10743  suballocType,
    10744  false, // canMakeOtherLost
    10745  strategy,
    10746  &currRequest))
    10747  {
    10748  // Allocate from pCurrBlock.
    10749  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10750 
    10751  if(mapped)
    10752  {
    10753  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10754  if(res != VK_SUCCESS)
    10755  {
    10756  return res;
    10757  }
    10758  }
    10759 
    10760  // We no longer have an empty Allocation.
    10761  if(pBlock->m_pMetadata->IsEmpty())
    10762  {
    10763  m_HasEmptyBlock = false;
    10764  }
    10765 
    10766  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10767  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10768  (*pAllocation)->InitBlockAllocation(
    10769  hCurrentPool,
    10770  pBlock,
    10771  currRequest.offset,
    10772  alignment,
    10773  size,
    10774  suballocType,
    10775  mapped,
    10776  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10777  VMA_HEAVY_ASSERT(pBlock->Validate());
    10778  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10779  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10780  {
    10781  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10782  }
    10783  if(IsCorruptionDetectionEnabled())
    10784  {
    10785  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10786  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10787  }
    10788  return VK_SUCCESS;
    10789  }
    10790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10791 }
    10792 
    10793 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10794 {
    10795  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10796  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10797  allocInfo.allocationSize = blockSize;
    10798  VkDeviceMemory mem = VK_NULL_HANDLE;
    10799  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10800  if(res < 0)
    10801  {
    10802  return res;
    10803  }
    10804 
    10805  // New VkDeviceMemory successfully created.
    10806 
    10807  // Create new Allocation for it.
    10808  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10809  pBlock->Init(
    10810  m_hAllocator,
    10811  m_MemoryTypeIndex,
    10812  mem,
    10813  allocInfo.allocationSize,
    10814  m_NextBlockId++,
    10815  m_Algorithm);
    10816 
    10817  m_Blocks.push_back(pBlock);
    10818  if(pNewBlockIndex != VMA_NULL)
    10819  {
    10820  *pNewBlockIndex = m_Blocks.size() - 1;
    10821  }
    10822 
    10823  return VK_SUCCESS;
    10824 }
    10825 
    10826 #if VMA_STATS_STRING_ENABLED
    10827 
    10828 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10829 {
    10830  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10831 
    10832  json.BeginObject();
    10833 
    10834  if(m_IsCustomPool)
    10835  {
    10836  json.WriteString("MemoryTypeIndex");
    10837  json.WriteNumber(m_MemoryTypeIndex);
    10838 
    10839  json.WriteString("BlockSize");
    10840  json.WriteNumber(m_PreferredBlockSize);
    10841 
    10842  json.WriteString("BlockCount");
    10843  json.BeginObject(true);
    10844  if(m_MinBlockCount > 0)
    10845  {
    10846  json.WriteString("Min");
    10847  json.WriteNumber((uint64_t)m_MinBlockCount);
    10848  }
    10849  if(m_MaxBlockCount < SIZE_MAX)
    10850  {
    10851  json.WriteString("Max");
    10852  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10853  }
    10854  json.WriteString("Cur");
    10855  json.WriteNumber((uint64_t)m_Blocks.size());
    10856  json.EndObject();
    10857 
    10858  if(m_FrameInUseCount > 0)
    10859  {
    10860  json.WriteString("FrameInUseCount");
    10861  json.WriteNumber(m_FrameInUseCount);
    10862  }
    10863 
    10864  if(m_Algorithm != 0)
    10865  {
    10866  json.WriteString("Algorithm");
    10867  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10868  }
    10869  }
    10870  else
    10871  {
    10872  json.WriteString("PreferredBlockSize");
    10873  json.WriteNumber(m_PreferredBlockSize);
    10874  }
    10875 
    10876  json.WriteString("Blocks");
    10877  json.BeginObject();
    10878  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10879  {
    10880  json.BeginString();
    10881  json.ContinueString(m_Blocks[i]->GetId());
    10882  json.EndString();
    10883 
    10884  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10885  }
    10886  json.EndObject();
    10887 
    10888  json.EndObject();
    10889 }
    10890 
    10891 #endif // #if VMA_STATS_STRING_ENABLED
    10892 
    10893 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10894  VmaAllocator hAllocator,
    10895  uint32_t currentFrameIndex)
    10896 {
    10897  if(m_pDefragmentator == VMA_NULL)
    10898  {
    10899  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10900  hAllocator,
    10901  this,
    10902  currentFrameIndex);
    10903  }
    10904 
    10905  return m_pDefragmentator;
    10906 }
    10907 
    10908 VkResult VmaBlockVector::Defragment(
    10909  VmaDefragmentationStats* pDefragmentationStats,
    10910  VkDeviceSize& maxBytesToMove,
    10911  uint32_t& maxAllocationsToMove)
    10912 {
    10913  if(m_pDefragmentator == VMA_NULL)
    10914  {
    10915  return VK_SUCCESS;
    10916  }
    10917 
    10918  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10919 
    10920  // Defragment.
    10921  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10922 
    10923  // Accumulate statistics.
    10924  if(pDefragmentationStats != VMA_NULL)
    10925  {
    10926  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10927  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10928  pDefragmentationStats->bytesMoved += bytesMoved;
    10929  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10930  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10931  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10932  maxBytesToMove -= bytesMoved;
    10933  maxAllocationsToMove -= allocationsMoved;
    10934  }
    10935 
    10936  // Free empty blocks.
    10937  m_HasEmptyBlock = false;
    10938  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10939  {
    10940  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10941  if(pBlock->m_pMetadata->IsEmpty())
    10942  {
    10943  if(m_Blocks.size() > m_MinBlockCount)
    10944  {
    10945  if(pDefragmentationStats != VMA_NULL)
    10946  {
    10947  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10948  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10949  }
    10950 
    10951  VmaVectorRemove(m_Blocks, blockIndex);
    10952  pBlock->Destroy(m_hAllocator);
    10953  vma_delete(m_hAllocator, pBlock);
    10954  }
    10955  else
    10956  {
    10957  m_HasEmptyBlock = true;
    10958  }
    10959  }
    10960  }
    10961 
    10962  return result;
    10963 }
    10964 
    10965 void VmaBlockVector::DestroyDefragmentator()
    10966 {
    10967  if(m_pDefragmentator != VMA_NULL)
    10968  {
    10969  vma_delete(m_hAllocator, m_pDefragmentator);
    10970  m_pDefragmentator = VMA_NULL;
    10971  }
    10972 }
    10973 
    10974 void VmaBlockVector::MakePoolAllocationsLost(
    10975  uint32_t currentFrameIndex,
    10976  size_t* pLostAllocationCount)
    10977 {
    10978  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10979  size_t lostAllocationCount = 0;
    10980  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10981  {
    10982  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10983  VMA_ASSERT(pBlock);
    10984  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10985  }
    10986  if(pLostAllocationCount != VMA_NULL)
    10987  {
    10988  *pLostAllocationCount = lostAllocationCount;
    10989  }
    10990 }
    10991 
    10992 VkResult VmaBlockVector::CheckCorruption()
    10993 {
    10994  if(!IsCorruptionDetectionEnabled())
    10995  {
    10996  return VK_ERROR_FEATURE_NOT_PRESENT;
    10997  }
    10998 
    10999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11000  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11001  {
    11002  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11003  VMA_ASSERT(pBlock);
    11004  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11005  if(res != VK_SUCCESS)
    11006  {
    11007  return res;
    11008  }
    11009  }
    11010  return VK_SUCCESS;
    11011 }
    11012 
    11013 void VmaBlockVector::AddStats(VmaStats* pStats)
    11014 {
    11015  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11016  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11017 
    11018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11019 
    11020  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11021  {
    11022  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11023  VMA_ASSERT(pBlock);
    11024  VMA_HEAVY_ASSERT(pBlock->Validate());
    11025  VmaStatInfo allocationStatInfo;
    11026  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11027  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11028  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11029  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11030  }
    11031 }
    11032 
    11034 // VmaDefragmentator members definition
    11035 
    11036 VmaDefragmentator::VmaDefragmentator(
    11037  VmaAllocator hAllocator,
    11038  VmaBlockVector* pBlockVector,
    11039  uint32_t currentFrameIndex) :
    11040  m_hAllocator(hAllocator),
    11041  m_pBlockVector(pBlockVector),
    11042  m_CurrentFrameIndex(currentFrameIndex),
    11043  m_BytesMoved(0),
    11044  m_AllocationsMoved(0),
    11045  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11046  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11047 {
    11048  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11049 }
    11050 
    11051 VmaDefragmentator::~VmaDefragmentator()
    11052 {
    11053  for(size_t i = m_Blocks.size(); i--; )
    11054  {
    11055  vma_delete(m_hAllocator, m_Blocks[i]);
    11056  }
    11057 }
    11058 
    11059 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11060 {
    11061  AllocationInfo allocInfo;
    11062  allocInfo.m_hAllocation = hAlloc;
    11063  allocInfo.m_pChanged = pChanged;
    11064  m_Allocations.push_back(allocInfo);
    11065 }
    11066 
    11067 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11068 {
    11069  // It has already been mapped for defragmentation.
    11070  if(m_pMappedDataForDefragmentation)
    11071  {
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return VK_SUCCESS;
    11074  }
    11075 
    11076  // It is originally mapped.
    11077  if(m_pBlock->GetMappedData())
    11078  {
    11079  *ppMappedData = m_pBlock->GetMappedData();
    11080  return VK_SUCCESS;
    11081  }
    11082 
    11083  // Map on first usage.
    11084  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11085  *ppMappedData = m_pMappedDataForDefragmentation;
    11086  return res;
    11087 }
    11088 
    11089 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11090 {
    11091  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11092  {
    11093  m_pBlock->Unmap(hAllocator, 1);
    11094  }
    11095 }
    11096 
    11097 VkResult VmaDefragmentator::DefragmentRound(
    11098  VkDeviceSize maxBytesToMove,
    11099  uint32_t maxAllocationsToMove)
    11100 {
    11101  if(m_Blocks.empty())
    11102  {
    11103  return VK_SUCCESS;
    11104  }
    11105 
    11106  size_t srcBlockIndex = m_Blocks.size() - 1;
    11107  size_t srcAllocIndex = SIZE_MAX;
    11108  for(;;)
    11109  {
    11110  // 1. Find next allocation to move.
    11111  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11112  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11113  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11114  {
    11115  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11116  {
    11117  // Finished: no more allocations to process.
    11118  if(srcBlockIndex == 0)
    11119  {
    11120  return VK_SUCCESS;
    11121  }
    11122  else
    11123  {
    11124  --srcBlockIndex;
    11125  srcAllocIndex = SIZE_MAX;
    11126  }
    11127  }
    11128  else
    11129  {
    11130  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11131  }
    11132  }
    11133 
    11134  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11135  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11136 
    11137  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11138  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11139  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11140  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11141 
    11142  // 2. Try to find new place for this allocation in preceding or current block.
    11143  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11144  {
    11145  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11146  VmaAllocationRequest dstAllocRequest;
    11147  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11148  m_CurrentFrameIndex,
    11149  m_pBlockVector->GetFrameInUseCount(),
    11150  m_pBlockVector->GetBufferImageGranularity(),
    11151  size,
    11152  alignment,
    11153  false, // upperAddress
    11154  suballocType,
    11155  false, // canMakeOtherLost
    11157  &dstAllocRequest) &&
    11158  MoveMakesSense(
    11159  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11160  {
    11161  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11162 
    11163  // Reached limit on number of allocations or bytes to move.
    11164  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11165  (m_BytesMoved + size > maxBytesToMove))
    11166  {
    11167  return VK_INCOMPLETE;
    11168  }
    11169 
    11170  void* pDstMappedData = VMA_NULL;
    11171  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11172  if(res != VK_SUCCESS)
    11173  {
    11174  return res;
    11175  }
    11176 
    11177  void* pSrcMappedData = VMA_NULL;
    11178  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11179  if(res != VK_SUCCESS)
    11180  {
    11181  return res;
    11182  }
    11183 
    11184  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11185  memcpy(
    11186  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11187  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11188  static_cast<size_t>(size));
    11189 
    11190  if(VMA_DEBUG_MARGIN > 0)
    11191  {
    11192  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11193  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11194  }
    11195 
    11196  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11197  dstAllocRequest,
    11198  suballocType,
    11199  size,
    11200  false, // upperAddress
    11201  allocInfo.m_hAllocation);
    11202  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11203 
    11204  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11205 
    11206  if(allocInfo.m_pChanged != VMA_NULL)
    11207  {
    11208  *allocInfo.m_pChanged = VK_TRUE;
    11209  }
    11210 
    11211  ++m_AllocationsMoved;
    11212  m_BytesMoved += size;
    11213 
    11214  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11215 
    11216  break;
    11217  }
    11218  }
    11219 
    11220  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11221 
    11222  if(srcAllocIndex > 0)
    11223  {
    11224  --srcAllocIndex;
    11225  }
    11226  else
    11227  {
    11228  if(srcBlockIndex > 0)
    11229  {
    11230  --srcBlockIndex;
    11231  srcAllocIndex = SIZE_MAX;
    11232  }
    11233  else
    11234  {
    11235  return VK_SUCCESS;
    11236  }
    11237  }
    11238  }
    11239 }
    11240 
    11241 VkResult VmaDefragmentator::Defragment(
    11242  VkDeviceSize maxBytesToMove,
    11243  uint32_t maxAllocationsToMove)
    11244 {
    11245  if(m_Allocations.empty())
    11246  {
    11247  return VK_SUCCESS;
    11248  }
    11249 
    11250  // Create block info for each block.
    11251  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11253  {
    11254  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11255  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11256  m_Blocks.push_back(pBlockInfo);
    11257  }
    11258 
    11259  // Sort them by m_pBlock pointer value.
    11260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11261 
    11262  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11263  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11264  {
    11265  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11266  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11267  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11268  {
    11269  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11270  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11271  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11272  {
    11273  (*it)->m_Allocations.push_back(allocInfo);
    11274  }
    11275  else
    11276  {
    11277  VMA_ASSERT(0);
    11278  }
    11279  }
    11280  }
    11281  m_Allocations.clear();
    11282 
    11283  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11284  {
    11285  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11286  pBlockInfo->CalcHasNonMovableAllocations();
    11287  pBlockInfo->SortAllocationsBySizeDescecnding();
    11288  }
    11289 
    11290  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11292 
    11293  // Execute defragmentation rounds (the main part).
    11294  VkResult result = VK_SUCCESS;
    11295  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11296  {
    11297  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11298  }
    11299 
    11300  // Unmap blocks that were mapped for defragmentation.
    11301  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11302  {
    11303  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11304  }
    11305 
    11306  return result;
    11307 }
    11308 
    11309 bool VmaDefragmentator::MoveMakesSense(
    11310  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11311  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11312 {
    11313  if(dstBlockIndex < srcBlockIndex)
    11314  {
    11315  return true;
    11316  }
    11317  if(dstBlockIndex > srcBlockIndex)
    11318  {
    11319  return false;
    11320  }
    11321  if(dstOffset < srcOffset)
    11322  {
    11323  return true;
    11324  }
    11325  return false;
    11326 }
    11327 
    11329 // VmaRecorder
    11330 
    11331 #if VMA_RECORDING_ENABLED
    11332 
    11333 VmaRecorder::VmaRecorder() :
    11334  m_UseMutex(true),
    11335  m_Flags(0),
    11336  m_File(VMA_NULL),
    11337  m_Freq(INT64_MAX),
    11338  m_StartCounter(INT64_MAX)
    11339 {
    11340 }
    11341 
    11342 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11343 {
    11344  m_UseMutex = useMutex;
    11345  m_Flags = settings.flags;
    11346 
    11347  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11348  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11349 
    11350  // Open file for writing.
    11351  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11352  if(err != 0)
    11353  {
    11354  return VK_ERROR_INITIALIZATION_FAILED;
    11355  }
    11356 
    11357  // Write header.
    11358  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11359  fprintf(m_File, "%s\n", "1,3");
    11360 
    11361  return VK_SUCCESS;
    11362 }
    11363 
    11364 VmaRecorder::~VmaRecorder()
    11365 {
    11366  if(m_File != VMA_NULL)
    11367  {
    11368  fclose(m_File);
    11369  }
    11370 }
    11371 
    11372 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11373 {
    11374  CallParams callParams;
    11375  GetBasicParams(callParams);
    11376 
    11377  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11378  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11379  Flush();
    11380 }
    11381 
    11382 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11383 {
    11384  CallParams callParams;
    11385  GetBasicParams(callParams);
    11386 
    11387  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11388  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11389  Flush();
    11390 }
    11391 
    11392 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11393 {
    11394  CallParams callParams;
    11395  GetBasicParams(callParams);
    11396 
    11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11398  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11399  createInfo.memoryTypeIndex,
    11400  createInfo.flags,
    11401  createInfo.blockSize,
    11402  (uint64_t)createInfo.minBlockCount,
    11403  (uint64_t)createInfo.maxBlockCount,
    11404  createInfo.frameInUseCount,
    11405  pool);
    11406  Flush();
    11407 }
    11408 
    11409 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11410 {
    11411  CallParams callParams;
    11412  GetBasicParams(callParams);
    11413 
    11414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11415  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11416  pool);
    11417  Flush();
    11418 }
    11419 
    11420 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11421  const VkMemoryRequirements& vkMemReq,
    11422  const VmaAllocationCreateInfo& createInfo,
    11423  VmaAllocation allocation)
    11424 {
    11425  CallParams callParams;
    11426  GetBasicParams(callParams);
    11427 
    11428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11429  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11430  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11431  vkMemReq.size,
    11432  vkMemReq.alignment,
    11433  vkMemReq.memoryTypeBits,
    11434  createInfo.flags,
    11435  createInfo.usage,
    11436  createInfo.requiredFlags,
    11437  createInfo.preferredFlags,
    11438  createInfo.memoryTypeBits,
    11439  createInfo.pool,
    11440  allocation,
    11441  userDataStr.GetString());
    11442  Flush();
    11443 }
    11444 
    11445 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11446  const VkMemoryRequirements& vkMemReq,
    11447  bool requiresDedicatedAllocation,
    11448  bool prefersDedicatedAllocation,
    11449  const VmaAllocationCreateInfo& createInfo,
    11450  VmaAllocation allocation)
    11451 {
    11452  CallParams callParams;
    11453  GetBasicParams(callParams);
    11454 
    11455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11456  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11457  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11458  vkMemReq.size,
    11459  vkMemReq.alignment,
    11460  vkMemReq.memoryTypeBits,
    11461  requiresDedicatedAllocation ? 1 : 0,
    11462  prefersDedicatedAllocation ? 1 : 0,
    11463  createInfo.flags,
    11464  createInfo.usage,
    11465  createInfo.requiredFlags,
    11466  createInfo.preferredFlags,
    11467  createInfo.memoryTypeBits,
    11468  createInfo.pool,
    11469  allocation,
    11470  userDataStr.GetString());
    11471  Flush();
    11472 }
    11473 
    11474 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11475  const VkMemoryRequirements& vkMemReq,
    11476  bool requiresDedicatedAllocation,
    11477  bool prefersDedicatedAllocation,
    11478  const VmaAllocationCreateInfo& createInfo,
    11479  VmaAllocation allocation)
    11480 {
    11481  CallParams callParams;
    11482  GetBasicParams(callParams);
    11483 
    11484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11487  vkMemReq.size,
    11488  vkMemReq.alignment,
    11489  vkMemReq.memoryTypeBits,
    11490  requiresDedicatedAllocation ? 1 : 0,
    11491  prefersDedicatedAllocation ? 1 : 0,
    11492  createInfo.flags,
    11493  createInfo.usage,
    11494  createInfo.requiredFlags,
    11495  createInfo.preferredFlags,
    11496  createInfo.memoryTypeBits,
    11497  createInfo.pool,
    11498  allocation,
    11499  userDataStr.GetString());
    11500  Flush();
    11501 }
    11502 
    11503 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11504  VmaAllocation allocation)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11511  allocation);
    11512  Flush();
    11513 }
    11514 
    11515 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11516  VmaAllocation allocation,
    11517  const void* pUserData)
    11518 {
    11519  CallParams callParams;
    11520  GetBasicParams(callParams);
    11521 
    11522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11523  UserDataString userDataStr(
    11524  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11525  pUserData);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation,
    11528  userDataStr.GetString());
    11529  Flush();
    11530 }
    11531 
    11532 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11533  VmaAllocation allocation)
    11534 {
    11535  CallParams callParams;
    11536  GetBasicParams(callParams);
    11537 
    11538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11539  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11540  allocation);
    11541  Flush();
    11542 }
    11543 
    11544 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11545  VmaAllocation allocation)
    11546 {
    11547  CallParams callParams;
    11548  GetBasicParams(callParams);
    11549 
    11550  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11551  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11552  allocation);
    11553  Flush();
    11554 }
    11555 
    11556 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11557  VmaAllocation allocation)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11564  allocation);
    11565  Flush();
    11566 }
    11567 
    11568 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11569  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11570 {
    11571  CallParams callParams;
    11572  GetBasicParams(callParams);
    11573 
    11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11575  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11576  allocation,
    11577  offset,
    11578  size);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11590  allocation,
    11591  offset,
    11592  size);
    11593  Flush();
    11594 }
    11595 
    11596 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11597  const VkBufferCreateInfo& bufCreateInfo,
    11598  const VmaAllocationCreateInfo& allocCreateInfo,
    11599  VmaAllocation allocation)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11607  bufCreateInfo.flags,
    11608  bufCreateInfo.size,
    11609  bufCreateInfo.usage,
    11610  bufCreateInfo.sharingMode,
    11611  allocCreateInfo.flags,
    11612  allocCreateInfo.usage,
    11613  allocCreateInfo.requiredFlags,
    11614  allocCreateInfo.preferredFlags,
    11615  allocCreateInfo.memoryTypeBits,
    11616  allocCreateInfo.pool,
    11617  allocation,
    11618  userDataStr.GetString());
    11619  Flush();
    11620 }
    11621 
    11622 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11623  const VkImageCreateInfo& imageCreateInfo,
    11624  const VmaAllocationCreateInfo& allocCreateInfo,
    11625  VmaAllocation allocation)
    11626 {
    11627  CallParams callParams;
    11628  GetBasicParams(callParams);
    11629 
    11630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11631  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11632  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11633  imageCreateInfo.flags,
    11634  imageCreateInfo.imageType,
    11635  imageCreateInfo.format,
    11636  imageCreateInfo.extent.width,
    11637  imageCreateInfo.extent.height,
    11638  imageCreateInfo.extent.depth,
    11639  imageCreateInfo.mipLevels,
    11640  imageCreateInfo.arrayLayers,
    11641  imageCreateInfo.samples,
    11642  imageCreateInfo.tiling,
    11643  imageCreateInfo.usage,
    11644  imageCreateInfo.sharingMode,
    11645  imageCreateInfo.initialLayout,
    11646  allocCreateInfo.flags,
    11647  allocCreateInfo.usage,
    11648  allocCreateInfo.requiredFlags,
    11649  allocCreateInfo.preferredFlags,
    11650  allocCreateInfo.memoryTypeBits,
    11651  allocCreateInfo.pool,
    11652  allocation,
    11653  userDataStr.GetString());
    11654  Flush();
    11655 }
    11656 
    11657 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11658  VmaAllocation allocation)
    11659 {
    11660  CallParams callParams;
    11661  GetBasicParams(callParams);
    11662 
    11663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11665  allocation);
    11666  Flush();
    11667 }
    11668 
    11669 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11677  allocation);
    11678  Flush();
    11679 }
    11680 
    11681 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11682  VmaAllocation allocation)
    11683 {
    11684  CallParams callParams;
    11685  GetBasicParams(callParams);
    11686 
    11687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11688  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11689  allocation);
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11706  VmaPool pool)
    11707 {
    11708  CallParams callParams;
    11709  GetBasicParams(callParams);
    11710 
    11711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11712  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11713  pool);
    11714  Flush();
    11715 }
    11716 
    11717 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11718 {
    11719  if(pUserData != VMA_NULL)
    11720  {
    11721  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11722  {
    11723  m_Str = (const char*)pUserData;
    11724  }
    11725  else
    11726  {
    11727  sprintf_s(m_PtrStr, "%p", pUserData);
    11728  m_Str = m_PtrStr;
    11729  }
    11730  }
    11731  else
    11732  {
    11733  m_Str = "";
    11734  }
    11735 }
    11736 
    11737 void VmaRecorder::WriteConfiguration(
    11738  const VkPhysicalDeviceProperties& devProps,
    11739  const VkPhysicalDeviceMemoryProperties& memProps,
    11740  bool dedicatedAllocationExtensionEnabled)
    11741 {
    11742  fprintf(m_File, "Config,Begin\n");
    11743 
    11744  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11745  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11746  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11747  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11748  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11749  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11750 
    11751  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11752  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11753  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11754 
    11755  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11756  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11757  {
    11758  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11759  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11760  }
    11761  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11762  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11763  {
    11764  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11765  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11766  }
    11767 
    11768  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11769 
    11770  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11771  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11772  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11773  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11774  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11775  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11776  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11777  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11778  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11779 
    11780  fprintf(m_File, "Config,End\n");
    11781 }
    11782 
    11783 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11784 {
    11785  outParams.threadId = GetCurrentThreadId();
    11786 
    11787  LARGE_INTEGER counter;
    11788  QueryPerformanceCounter(&counter);
    11789  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11790 }
    11791 
    11792 void VmaRecorder::Flush()
    11793 {
    11794  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11795  {
    11796  fflush(m_File);
    11797  }
    11798 }
    11799 
    11800 #endif // #if VMA_RECORDING_ENABLED
    11801 
    11803 // VmaAllocator_T
    11804 
    11805 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11806  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11807  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11808  m_hDevice(pCreateInfo->device),
    11809  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11810  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11811  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11812  m_PreferredLargeHeapBlockSize(0),
    11813  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11814  m_CurrentFrameIndex(0),
    11815  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11816  m_NextPoolId(0)
    11818  ,m_pRecorder(VMA_NULL)
    11819 #endif
    11820 {
    11821  if(VMA_DEBUG_DETECT_CORRUPTION)
    11822  {
    11823  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11824  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11825  }
    11826 
    11827  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11828 
    11829 #if !(VMA_DEDICATED_ALLOCATION)
    11831  {
    11832  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11833  }
    11834 #endif
    11835 
    11836  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11837  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11838  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11839 
    11840  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11841  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11842 
    11843  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11844  {
    11845  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11846  }
    11847 
    11848  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11849  {
    11850  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11851  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11852  }
    11853 
    11854  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11855 
    11856  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11857  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11858 
    11859  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11860  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11861  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11862  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11863 
    11864  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11865  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11866 
    11867  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11868  {
    11869  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11870  {
    11871  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11872  if(limit != VK_WHOLE_SIZE)
    11873  {
    11874  m_HeapSizeLimit[heapIndex] = limit;
    11875  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11876  {
    11877  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11878  }
    11879  }
    11880  }
    11881  }
    11882 
    11883  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11884  {
    11885  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11886 
    11887  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11888  this,
    11889  memTypeIndex,
    11890  preferredBlockSize,
    11891  0,
    11892  SIZE_MAX,
    11893  GetBufferImageGranularity(),
    11894  pCreateInfo->frameInUseCount,
    11895  false, // isCustomPool
    11896  false, // explicitBlockSize
    11897  false); // linearAlgorithm
    11898  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11899  // becase minBlockCount is 0.
    11900  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11901 
    11902  }
    11903 }
    11904 
    11905 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11906 {
    11907  VkResult res = VK_SUCCESS;
    11908 
    11909  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11910  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11911  {
    11912 #if VMA_RECORDING_ENABLED
    11913  m_pRecorder = vma_new(this, VmaRecorder)();
    11914  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11915  if(res != VK_SUCCESS)
    11916  {
    11917  return res;
    11918  }
    11919  m_pRecorder->WriteConfiguration(
    11920  m_PhysicalDeviceProperties,
    11921  m_MemProps,
    11922  m_UseKhrDedicatedAllocation);
    11923  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11924 #else
    11925  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11926  return VK_ERROR_FEATURE_NOT_PRESENT;
    11927 #endif
    11928  }
    11929 
    11930  return res;
    11931 }
    11932 
    11933 VmaAllocator_T::~VmaAllocator_T()
    11934 {
    11935 #if VMA_RECORDING_ENABLED
    11936  if(m_pRecorder != VMA_NULL)
    11937  {
    11938  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11939  vma_delete(this, m_pRecorder);
    11940  }
    11941 #endif
    11942 
    11943  VMA_ASSERT(m_Pools.empty());
    11944 
    11945  for(size_t i = GetMemoryTypeCount(); i--; )
    11946  {
    11947  vma_delete(this, m_pDedicatedAllocations[i]);
    11948  vma_delete(this, m_pBlockVectors[i]);
    11949  }
    11950 }
    11951 
    11952 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11953 {
    11954 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11955  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11956  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11957  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11958  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11959  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11960  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11961  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11962  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11963  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11964  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11966  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11967  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11968  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11969  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11970  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11971 #if VMA_DEDICATED_ALLOCATION
    11972  if(m_UseKhrDedicatedAllocation)
    11973  {
    11974  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11975  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11976  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11977  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11978  }
    11979 #endif // #if VMA_DEDICATED_ALLOCATION
    11980 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11981 
    11982 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11983  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11984 
    11985  if(pVulkanFunctions != VMA_NULL)
    11986  {
    11987  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11988  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11989  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11990  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11991  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11992  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11993  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11994  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11995  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11996  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11997  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11998  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11999  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12000  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12001  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12002  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12003 #if VMA_DEDICATED_ALLOCATION
    12004  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12005  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12006 #endif
    12007  }
    12008 
    12009 #undef VMA_COPY_IF_NOT_NULL
    12010 
    12011  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12012  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12013  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12021  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12022  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12025  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12026  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12027  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12028  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12029 #if VMA_DEDICATED_ALLOCATION
    12030  if(m_UseKhrDedicatedAllocation)
    12031  {
    12032  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12033  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12034  }
    12035 #endif
    12036 }
    12037 
    12038 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12039 {
    12040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12041  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12042  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12043  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12044 }
    12045 
    12046 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12047  VkDeviceSize size,
    12048  VkDeviceSize alignment,
    12049  bool dedicatedAllocation,
    12050  VkBuffer dedicatedBuffer,
    12051  VkImage dedicatedImage,
    12052  const VmaAllocationCreateInfo& createInfo,
    12053  uint32_t memTypeIndex,
    12054  VmaSuballocationType suballocType,
    12055  VmaAllocation* pAllocation)
    12056 {
    12057  VMA_ASSERT(pAllocation != VMA_NULL);
    12058  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12059 
    12060  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12061 
    12062  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12063  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12064  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12065  {
    12066  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12067  }
    12068 
    12069  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12070  VMA_ASSERT(blockVector);
    12071 
    12072  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12073  bool preferDedicatedMemory =
    12074  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12075  dedicatedAllocation ||
    12076  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12077  size > preferredBlockSize / 2;
    12078 
    12079  if(preferDedicatedMemory &&
    12080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12081  finalCreateInfo.pool == VK_NULL_HANDLE)
    12082  {
    12084  }
    12085 
    12086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12087  {
    12088  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12089  {
    12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12091  }
    12092  else
    12093  {
    12094  return AllocateDedicatedMemory(
    12095  size,
    12096  suballocType,
    12097  memTypeIndex,
    12098  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12099  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12100  finalCreateInfo.pUserData,
    12101  dedicatedBuffer,
    12102  dedicatedImage,
    12103  pAllocation);
    12104  }
    12105  }
    12106  else
    12107  {
    12108  VkResult res = blockVector->Allocate(
    12109  VK_NULL_HANDLE, // hCurrentPool
    12110  m_CurrentFrameIndex.load(),
    12111  size,
    12112  alignment,
    12113  finalCreateInfo,
    12114  suballocType,
    12115  pAllocation);
    12116  if(res == VK_SUCCESS)
    12117  {
    12118  return res;
    12119  }
    12120 
    12121  // 5. Try dedicated memory.
    12122  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12123  {
    12124  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12125  }
    12126  else
    12127  {
    12128  res = AllocateDedicatedMemory(
    12129  size,
    12130  suballocType,
    12131  memTypeIndex,
    12132  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12133  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12134  finalCreateInfo.pUserData,
    12135  dedicatedBuffer,
    12136  dedicatedImage,
    12137  pAllocation);
    12138  if(res == VK_SUCCESS)
    12139  {
    12140  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12141  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12142  return VK_SUCCESS;
    12143  }
    12144  else
    12145  {
    12146  // Everything failed: Return error code.
    12147  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12148  return res;
    12149  }
    12150  }
    12151  }
    12152 }
    12153 
    12154 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12155  VkDeviceSize size,
    12156  VmaSuballocationType suballocType,
    12157  uint32_t memTypeIndex,
    12158  bool map,
    12159  bool isUserDataString,
    12160  void* pUserData,
    12161  VkBuffer dedicatedBuffer,
    12162  VkImage dedicatedImage,
    12163  VmaAllocation* pAllocation)
    12164 {
    12165  VMA_ASSERT(pAllocation);
    12166 
    12167  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12168  allocInfo.memoryTypeIndex = memTypeIndex;
    12169  allocInfo.allocationSize = size;
    12170 
    12171 #if VMA_DEDICATED_ALLOCATION
    12172  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12173  if(m_UseKhrDedicatedAllocation)
    12174  {
    12175  if(dedicatedBuffer != VK_NULL_HANDLE)
    12176  {
    12177  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12178  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12179  allocInfo.pNext = &dedicatedAllocInfo;
    12180  }
    12181  else if(dedicatedImage != VK_NULL_HANDLE)
    12182  {
    12183  dedicatedAllocInfo.image = dedicatedImage;
    12184  allocInfo.pNext = &dedicatedAllocInfo;
    12185  }
    12186  }
    12187 #endif // #if VMA_DEDICATED_ALLOCATION
    12188 
    12189  // Allocate VkDeviceMemory.
    12190  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12191  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12192  if(res < 0)
    12193  {
    12194  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12195  return res;
    12196  }
    12197 
    12198  void* pMappedData = VMA_NULL;
    12199  if(map)
    12200  {
    12201  res = (*m_VulkanFunctions.vkMapMemory)(
    12202  m_hDevice,
    12203  hMemory,
    12204  0,
    12205  VK_WHOLE_SIZE,
    12206  0,
    12207  &pMappedData);
    12208  if(res < 0)
    12209  {
    12210  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12211  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12212  return res;
    12213  }
    12214  }
    12215 
    12216  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12217  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12218  (*pAllocation)->SetUserData(this, pUserData);
    12219  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12220  {
    12221  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12222  }
    12223 
    12224  // Register it in m_pDedicatedAllocations.
    12225  {
    12226  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12227  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12228  VMA_ASSERT(pDedicatedAllocations);
    12229  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12230  }
    12231 
    12232  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12233 
    12234  return VK_SUCCESS;
    12235 }
    12236 
    12237 void VmaAllocator_T::GetBufferMemoryRequirements(
    12238  VkBuffer hBuffer,
    12239  VkMemoryRequirements& memReq,
    12240  bool& requiresDedicatedAllocation,
    12241  bool& prefersDedicatedAllocation) const
    12242 {
    12243 #if VMA_DEDICATED_ALLOCATION
    12244  if(m_UseKhrDedicatedAllocation)
    12245  {
    12246  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12247  memReqInfo.buffer = hBuffer;
    12248 
    12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12250 
    12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12252  memReq2.pNext = &memDedicatedReq;
    12253 
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12255 
    12256  memReq = memReq2.memoryRequirements;
    12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12259  }
    12260  else
    12261 #endif // #if VMA_DEDICATED_ALLOCATION
    12262  {
    12263  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12264  requiresDedicatedAllocation = false;
    12265  prefersDedicatedAllocation = false;
    12266  }
    12267 }
    12268 
    12269 void VmaAllocator_T::GetImageMemoryRequirements(
    12270  VkImage hImage,
    12271  VkMemoryRequirements& memReq,
    12272  bool& requiresDedicatedAllocation,
    12273  bool& prefersDedicatedAllocation) const
    12274 {
    12275 #if VMA_DEDICATED_ALLOCATION
    12276  if(m_UseKhrDedicatedAllocation)
    12277  {
    12278  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12279  memReqInfo.image = hImage;
    12280 
    12281  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12282 
    12283  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12284  memReq2.pNext = &memDedicatedReq;
    12285 
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12287 
    12288  memReq = memReq2.memoryRequirements;
    12289  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12290  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12291  }
    12292  else
    12293 #endif // #if VMA_DEDICATED_ALLOCATION
    12294  {
    12295  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12296  requiresDedicatedAllocation = false;
    12297  prefersDedicatedAllocation = false;
    12298  }
    12299 }
    12300 
    12301 VkResult VmaAllocator_T::AllocateMemory(
    12302  const VkMemoryRequirements& vkMemReq,
    12303  bool requiresDedicatedAllocation,
    12304  bool prefersDedicatedAllocation,
    12305  VkBuffer dedicatedBuffer,
    12306  VkImage dedicatedImage,
    12307  const VmaAllocationCreateInfo& createInfo,
    12308  VmaSuballocationType suballocType,
    12309  VmaAllocation* pAllocation)
    12310 {
    12311  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12312 
    12313  if(vkMemReq.size == 0)
    12314  {
    12315  return VK_ERROR_VALIDATION_FAILED_EXT;
    12316  }
    12317  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12318  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12325  {
    12326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12328  }
    12329  if(requiresDedicatedAllocation)
    12330  {
    12331  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12332  {
    12333  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12334  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12335  }
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12340  }
    12341  }
    12342  if((createInfo.pool != VK_NULL_HANDLE) &&
    12343  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12344  {
    12345  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12347  }
    12348 
    12349  if(createInfo.pool != VK_NULL_HANDLE)
    12350  {
    12351  const VkDeviceSize alignmentForPool = VMA_MAX(
    12352  vkMemReq.alignment,
    12353  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12354  return createInfo.pool->m_BlockVector.Allocate(
    12355  createInfo.pool,
    12356  m_CurrentFrameIndex.load(),
    12357  vkMemReq.size,
    12358  alignmentForPool,
    12359  createInfo,
    12360  suballocType,
    12361  pAllocation);
    12362  }
    12363  else
    12364  {
    12365  // Bit mask of memory Vulkan types acceptable for this allocation.
    12366  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12367  uint32_t memTypeIndex = UINT32_MAX;
    12368  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  VkDeviceSize alignmentForMemType = VMA_MAX(
    12372  vkMemReq.alignment,
    12373  GetMemoryTypeMinAlignment(memTypeIndex));
    12374 
    12375  res = AllocateMemoryOfType(
    12376  vkMemReq.size,
    12377  alignmentForMemType,
    12378  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12379  dedicatedBuffer,
    12380  dedicatedImage,
    12381  createInfo,
    12382  memTypeIndex,
    12383  suballocType,
    12384  pAllocation);
    12385  // Succeeded on first try.
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  return res;
    12389  }
    12390  // Allocation from this memory type failed. Try other compatible memory types.
    12391  else
    12392  {
    12393  for(;;)
    12394  {
    12395  // Remove old memTypeIndex from list of possibilities.
    12396  memoryTypeBits &= ~(1u << memTypeIndex);
    12397  // Find alternative memTypeIndex.
    12398  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  alignmentForMemType = VMA_MAX(
    12402  vkMemReq.alignment,
    12403  GetMemoryTypeMinAlignment(memTypeIndex));
    12404 
    12405  res = AllocateMemoryOfType(
    12406  vkMemReq.size,
    12407  alignmentForMemType,
    12408  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12409  dedicatedBuffer,
    12410  dedicatedImage,
    12411  createInfo,
    12412  memTypeIndex,
    12413  suballocType,
    12414  pAllocation);
    12415  // Allocation from this alternative memory type succeeded.
    12416  if(res == VK_SUCCESS)
    12417  {
    12418  return res;
    12419  }
    12420  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12421  }
    12422  // No other matching memory type index could be found.
    12423  else
    12424  {
    12425  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12427  }
    12428  }
    12429  }
    12430  }
    12431  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12432  else
    12433  return res;
    12434  }
    12435 }
    12436 
    12437 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12438 {
    12439  VMA_ASSERT(allocation);
    12440 
    12441  if(TouchAllocation(allocation))
    12442  {
    12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12444  {
    12445  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12446  }
    12447 
    12448  switch(allocation->GetType())
    12449  {
    12450  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12451  {
    12452  VmaBlockVector* pBlockVector = VMA_NULL;
    12453  VmaPool hPool = allocation->GetPool();
    12454  if(hPool != VK_NULL_HANDLE)
    12455  {
    12456  pBlockVector = &hPool->m_BlockVector;
    12457  }
    12458  else
    12459  {
    12460  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12461  pBlockVector = m_pBlockVectors[memTypeIndex];
    12462  }
    12463  pBlockVector->Free(allocation);
    12464  }
    12465  break;
    12466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12467  FreeDedicatedMemory(allocation);
    12468  break;
    12469  default:
    12470  VMA_ASSERT(0);
    12471  }
    12472  }
    12473 
    12474  allocation->SetUserData(this, VMA_NULL);
    12475  vma_delete(this, allocation);
    12476 }
    12477 
    12478 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12479 {
    12480  // Initialize.
    12481  InitStatInfo(pStats->total);
    12482  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12483  InitStatInfo(pStats->memoryType[i]);
    12484  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12485  InitStatInfo(pStats->memoryHeap[i]);
    12486 
    12487  // Process default pools.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12491  VMA_ASSERT(pBlockVector);
    12492  pBlockVector->AddStats(pStats);
    12493  }
    12494 
    12495  // Process custom pools.
    12496  {
    12497  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12498  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12499  {
    12500  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12501  }
    12502  }
    12503 
    12504  // Process dedicated allocations.
    12505  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12506  {
    12507  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12508  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12509  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12510  VMA_ASSERT(pDedicatedAllocVector);
    12511  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12512  {
    12513  VmaStatInfo allocationStatInfo;
    12514  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12515  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12516  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12517  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12518  }
    12519  }
    12520 
    12521  // Postprocess.
    12522  VmaPostprocessCalcStatInfo(pStats->total);
    12523  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12524  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12525  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12526  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12527 }
    12528 
    12529 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12530 
    12531 VkResult VmaAllocator_T::Defragment(
    12532  VmaAllocation* pAllocations,
    12533  size_t allocationCount,
    12534  VkBool32* pAllocationsChanged,
    12535  const VmaDefragmentationInfo* pDefragmentationInfo,
    12536  VmaDefragmentationStats* pDefragmentationStats)
    12537 {
    12538  if(pAllocationsChanged != VMA_NULL)
    12539  {
    12540  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12541  }
    12542  if(pDefragmentationStats != VMA_NULL)
    12543  {
    12544  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12545  }
    12546 
    12547  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12548 
    12549  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12550 
    12551  const size_t poolCount = m_Pools.size();
    12552 
    12553  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12554  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12555  {
    12556  VmaAllocation hAlloc = pAllocations[allocIndex];
    12557  VMA_ASSERT(hAlloc);
    12558  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12559  // DedicatedAlloc cannot be defragmented.
    12560  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12561  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12562  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12563  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12564  // Lost allocation cannot be defragmented.
    12565  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12566  {
    12567  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12568 
    12569  const VmaPool hAllocPool = hAlloc->GetPool();
    12570  // This allocation belongs to custom pool.
    12571  if(hAllocPool != VK_NULL_HANDLE)
    12572  {
    12573  // Pools with linear or buddy algorithm are not defragmented.
    12574  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12575  {
    12576  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12577  }
    12578  }
    12579  // This allocation belongs to general pool.
    12580  else
    12581  {
    12582  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12583  }
    12584 
    12585  if(pAllocBlockVector != VMA_NULL)
    12586  {
    12587  VmaDefragmentator* const pDefragmentator =
    12588  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12589  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12590  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12591  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12592  }
    12593  }
    12594  }
    12595 
    12596  VkResult result = VK_SUCCESS;
    12597 
    12598  // ======== Main processing.
    12599 
    12600  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12601  uint32_t maxAllocationsToMove = UINT32_MAX;
    12602  if(pDefragmentationInfo != VMA_NULL)
    12603  {
    12604  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12605  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12606  }
    12607 
    12608  // Process standard memory.
    12609  for(uint32_t memTypeIndex = 0;
    12610  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12611  ++memTypeIndex)
    12612  {
    12613  // Only HOST_VISIBLE memory types can be defragmented.
    12614  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12615  {
    12616  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12617  pDefragmentationStats,
    12618  maxBytesToMove,
    12619  maxAllocationsToMove);
    12620  }
    12621  }
    12622 
    12623  // Process custom pools.
    12624  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12625  {
    12626  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12627  pDefragmentationStats,
    12628  maxBytesToMove,
    12629  maxAllocationsToMove);
    12630  }
    12631 
    12632  // ======== Destroy defragmentators.
    12633 
    12634  // Process custom pools.
    12635  for(size_t poolIndex = poolCount; poolIndex--; )
    12636  {
    12637  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12638  }
    12639 
    12640  // Process standard memory.
    12641  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12642  {
    12643  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12644  {
    12645  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12646  }
    12647  }
    12648 
    12649  return result;
    12650 }
    12651 
    12652 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12653 {
    12654  if(hAllocation->CanBecomeLost())
    12655  {
    12656  /*
    12657  Warning: This is a carefully designed algorithm.
    12658  Do not modify unless you really know what you're doing :)
    12659  */
    12660  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12662  for(;;)
    12663  {
    12664  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12665  {
    12666  pAllocationInfo->memoryType = UINT32_MAX;
    12667  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12668  pAllocationInfo->offset = 0;
    12669  pAllocationInfo->size = hAllocation->GetSize();
    12670  pAllocationInfo->pMappedData = VMA_NULL;
    12671  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12672  return;
    12673  }
    12674  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12675  {
    12676  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12677  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12678  pAllocationInfo->offset = hAllocation->GetOffset();
    12679  pAllocationInfo->size = hAllocation->GetSize();
    12680  pAllocationInfo->pMappedData = VMA_NULL;
    12681  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12682  return;
    12683  }
    12684  else // Last use time earlier than current time.
    12685  {
    12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12687  {
    12688  localLastUseFrameIndex = localCurrFrameIndex;
    12689  }
    12690  }
    12691  }
    12692  }
    12693  else
    12694  {
    12695 #if VMA_STATS_STRING_ENABLED
    12696  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12697  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12698  for(;;)
    12699  {
    12700  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12701  if(localLastUseFrameIndex == localCurrFrameIndex)
    12702  {
    12703  break;
    12704  }
    12705  else // Last use time earlier than current time.
    12706  {
    12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12708  {
    12709  localLastUseFrameIndex = localCurrFrameIndex;
    12710  }
    12711  }
    12712  }
    12713 #endif
    12714 
    12715  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12716  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12717  pAllocationInfo->offset = hAllocation->GetOffset();
    12718  pAllocationInfo->size = hAllocation->GetSize();
    12719  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12720  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12721  }
    12722 }
    12723 
    12724 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12725 {
    12726  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12727  if(hAllocation->CanBecomeLost())
    12728  {
    12729  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12730  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12731  for(;;)
    12732  {
    12733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12734  {
    12735  return false;
    12736  }
    12737  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12738  {
    12739  return true;
    12740  }
    12741  else // Last use time earlier than current time.
    12742  {
    12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12744  {
    12745  localLastUseFrameIndex = localCurrFrameIndex;
    12746  }
    12747  }
    12748  }
    12749  }
    12750  else
    12751  {
    12752 #if VMA_STATS_STRING_ENABLED
    12753  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12754  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12755  for(;;)
    12756  {
    12757  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12758  if(localLastUseFrameIndex == localCurrFrameIndex)
    12759  {
    12760  break;
    12761  }
    12762  else // Last use time earlier than current time.
    12763  {
    12764  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12765  {
    12766  localLastUseFrameIndex = localCurrFrameIndex;
    12767  }
    12768  }
    12769  }
    12770 #endif
    12771 
    12772  return true;
    12773  }
    12774 }
    12775 
    12776 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12777 {
    12778  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12779 
    12780  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12781 
    12782  if(newCreateInfo.maxBlockCount == 0)
    12783  {
    12784  newCreateInfo.maxBlockCount = SIZE_MAX;
    12785  }
    12786  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12787  {
    12788  return VK_ERROR_INITIALIZATION_FAILED;
    12789  }
    12790 
    12791  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12792 
    12793  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12794 
    12795  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12796  if(res != VK_SUCCESS)
    12797  {
    12798  vma_delete(this, *pPool);
    12799  *pPool = VMA_NULL;
    12800  return res;
    12801  }
    12802 
    12803  // Add to m_Pools.
    12804  {
    12805  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12806  (*pPool)->SetId(m_NextPoolId++);
    12807  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12808  }
    12809 
    12810  return VK_SUCCESS;
    12811 }
    12812 
    12813 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12814 {
    12815  // Remove from m_Pools.
    12816  {
    12817  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12818  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12819  VMA_ASSERT(success && "Pool not found in Allocator.");
    12820  }
    12821 
    12822  vma_delete(this, pool);
    12823 }
    12824 
    12825 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12826 {
    12827  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12828 }
    12829 
    12830 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12831 {
    12832  m_CurrentFrameIndex.store(frameIndex);
    12833 }
    12834 
    12835 void VmaAllocator_T::MakePoolAllocationsLost(
    12836  VmaPool hPool,
    12837  size_t* pLostAllocationCount)
    12838 {
    12839  hPool->m_BlockVector.MakePoolAllocationsLost(
    12840  m_CurrentFrameIndex.load(),
    12841  pLostAllocationCount);
    12842 }
    12843 
    12844 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12845 {
    12846  return hPool->m_BlockVector.CheckCorruption();
    12847 }
    12848 
    12849 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12850 {
    12851  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12852 
    12853  // Process default pools.
    12854  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12855  {
    12856  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12857  {
    12858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12859  VMA_ASSERT(pBlockVector);
    12860  VkResult localRes = pBlockVector->CheckCorruption();
    12861  switch(localRes)
    12862  {
    12863  case VK_ERROR_FEATURE_NOT_PRESENT:
    12864  break;
    12865  case VK_SUCCESS:
    12866  finalRes = VK_SUCCESS;
    12867  break;
    12868  default:
    12869  return localRes;
    12870  }
    12871  }
    12872  }
    12873 
    12874  // Process custom pools.
    12875  {
    12876  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12877  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12878  {
    12879  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12880  {
    12881  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12882  switch(localRes)
    12883  {
    12884  case VK_ERROR_FEATURE_NOT_PRESENT:
    12885  break;
    12886  case VK_SUCCESS:
    12887  finalRes = VK_SUCCESS;
    12888  break;
    12889  default:
    12890  return localRes;
    12891  }
    12892  }
    12893  }
    12894  }
    12895 
    12896  return finalRes;
    12897 }
    12898 
    12899 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12900 {
    12901  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12902  (*pAllocation)->InitLost();
    12903 }
    12904 
    12905 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12906 {
    12907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12908 
    12909  VkResult res;
    12910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12911  {
    12912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12913  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  if(res == VK_SUCCESS)
    12917  {
    12918  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12919  }
    12920  }
    12921  else
    12922  {
    12923  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12924  }
    12925  }
    12926  else
    12927  {
    12928  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12929  }
    12930 
    12931  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12932  {
    12933  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12934  }
    12935 
    12936  return res;
    12937 }
    12938 
    12939 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12940 {
    12941  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12942  {
    12943  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12944  }
    12945 
    12946  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12947 
    12948  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12950  {
    12951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12952  m_HeapSizeLimit[heapIndex] += size;
    12953  }
    12954 }
    12955 
    12956 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12957 {
    12958  if(hAllocation->CanBecomeLost())
    12959  {
    12960  return VK_ERROR_MEMORY_MAP_FAILED;
    12961  }
    12962 
    12963  switch(hAllocation->GetType())
    12964  {
    12965  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12966  {
    12967  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12968  char *pBytes = VMA_NULL;
    12969  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12970  if(res == VK_SUCCESS)
    12971  {
    12972  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12973  hAllocation->BlockAllocMap();
    12974  }
    12975  return res;
    12976  }
    12977  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12978  return hAllocation->DedicatedAllocMap(this, ppData);
    12979  default:
    12980  VMA_ASSERT(0);
    12981  return VK_ERROR_MEMORY_MAP_FAILED;
    12982  }
    12983 }
    12984 
    12985 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12986 {
    12987  switch(hAllocation->GetType())
    12988  {
    12989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12990  {
    12991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12992  hAllocation->BlockAllocUnmap();
    12993  pBlock->Unmap(this, 1);
    12994  }
    12995  break;
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  hAllocation->DedicatedAllocUnmap(this);
    12998  break;
    12999  default:
    13000  VMA_ASSERT(0);
    13001  }
    13002 }
    13003 
    13004 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13005 {
    13006  VkResult res = VK_SUCCESS;
    13007  switch(hAllocation->GetType())
    13008  {
    13009  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13010  res = GetVulkanFunctions().vkBindBufferMemory(
    13011  m_hDevice,
    13012  hBuffer,
    13013  hAllocation->GetMemory(),
    13014  0); //memoryOffset
    13015  break;
    13016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13017  {
    13018  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13019  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13020  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13021  break;
    13022  }
    13023  default:
    13024  VMA_ASSERT(0);
    13025  }
    13026  return res;
    13027 }
    13028 
    13029 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13030 {
    13031  VkResult res = VK_SUCCESS;
    13032  switch(hAllocation->GetType())
    13033  {
    13034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13035  res = GetVulkanFunctions().vkBindImageMemory(
    13036  m_hDevice,
    13037  hImage,
    13038  hAllocation->GetMemory(),
    13039  0); //memoryOffset
    13040  break;
    13041  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13042  {
    13043  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13044  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13045  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13046  break;
    13047  }
    13048  default:
    13049  VMA_ASSERT(0);
    13050  }
    13051  return res;
    13052 }
    13053 
    13054 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13055  VmaAllocation hAllocation,
    13056  VkDeviceSize offset, VkDeviceSize size,
    13057  VMA_CACHE_OPERATION op)
    13058 {
    13059  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13060  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13061  {
    13062  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13063  VMA_ASSERT(offset <= allocationSize);
    13064 
    13065  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13066 
    13067  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13068  memRange.memory = hAllocation->GetMemory();
    13069 
    13070  switch(hAllocation->GetType())
    13071  {
    13072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  memRange.size = allocationSize - memRange.offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  memRange.size = VMA_MIN(
    13082  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13083  allocationSize - memRange.offset);
    13084  }
    13085  break;
    13086 
    13087  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13088  {
    13089  // 1. Still within this allocation.
    13090  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13091  if(size == VK_WHOLE_SIZE)
    13092  {
    13093  size = allocationSize - offset;
    13094  }
    13095  else
    13096  {
    13097  VMA_ASSERT(offset + size <= allocationSize);
    13098  }
    13099  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13100 
    13101  // 2. Adjust to whole block.
    13102  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13103  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13104  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13105  memRange.offset += allocationOffset;
    13106  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13107 
    13108  break;
    13109  }
    13110 
    13111  default:
    13112  VMA_ASSERT(0);
    13113  }
    13114 
    13115  switch(op)
    13116  {
    13117  case VMA_CACHE_FLUSH:
    13118  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13119  break;
    13120  case VMA_CACHE_INVALIDATE:
    13121  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13122  break;
    13123  default:
    13124  VMA_ASSERT(0);
    13125  }
    13126  }
    13127  // else: Just ignore this call.
    13128 }
    13129 
    13130 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13131 {
    13132  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13133 
    13134  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13135  {
    13136  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13137  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13138  VMA_ASSERT(pDedicatedAllocations);
    13139  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13140  VMA_ASSERT(success);
    13141  }
    13142 
    13143  VkDeviceMemory hMemory = allocation->GetMemory();
    13144 
    13145  /*
    13146  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13147  before vkFreeMemory.
    13148 
    13149  if(allocation->GetMappedData() != VMA_NULL)
    13150  {
    13151  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13152  }
    13153  */
    13154 
    13155  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13156 
    13157  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13158 }
    13159 
    13160 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13161 {
    13162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13163  !hAllocation->CanBecomeLost() &&
    13164  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13165  {
    13166  void* pData = VMA_NULL;
    13167  VkResult res = Map(hAllocation, &pData);
    13168  if(res == VK_SUCCESS)
    13169  {
    13170  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13171  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13172  Unmap(hAllocation);
    13173  }
    13174  else
    13175  {
    13176  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13177  }
    13178  }
    13179 }
    13180 
    13181 #if VMA_STATS_STRING_ENABLED
    13182 
    13183 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13184 {
    13185  bool dedicatedAllocationsStarted = false;
    13186  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13187  {
    13188  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13189  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13190  VMA_ASSERT(pDedicatedAllocVector);
    13191  if(pDedicatedAllocVector->empty() == false)
    13192  {
    13193  if(dedicatedAllocationsStarted == false)
    13194  {
    13195  dedicatedAllocationsStarted = true;
    13196  json.WriteString("DedicatedAllocations");
    13197  json.BeginObject();
    13198  }
    13199 
    13200  json.BeginString("Type ");
    13201  json.ContinueString(memTypeIndex);
    13202  json.EndString();
    13203 
    13204  json.BeginArray();
    13205 
    13206  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13207  {
    13208  json.BeginObject(true);
    13209  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13210  hAlloc->PrintParameters(json);
    13211  json.EndObject();
    13212  }
    13213 
    13214  json.EndArray();
    13215  }
    13216  }
    13217  if(dedicatedAllocationsStarted)
    13218  {
    13219  json.EndObject();
    13220  }
    13221 
    13222  {
    13223  bool allocationsStarted = false;
    13224  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13225  {
    13226  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13227  {
    13228  if(allocationsStarted == false)
    13229  {
    13230  allocationsStarted = true;
    13231  json.WriteString("DefaultPools");
    13232  json.BeginObject();
    13233  }
    13234 
    13235  json.BeginString("Type ");
    13236  json.ContinueString(memTypeIndex);
    13237  json.EndString();
    13238 
    13239  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13240  }
    13241  }
    13242  if(allocationsStarted)
    13243  {
    13244  json.EndObject();
    13245  }
    13246  }
    13247 
    13248  // Custom pools
    13249  {
    13250  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13251  const size_t poolCount = m_Pools.size();
    13252  if(poolCount > 0)
    13253  {
    13254  json.WriteString("Pools");
    13255  json.BeginObject();
    13256  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13257  {
    13258  json.BeginString();
    13259  json.ContinueString(m_Pools[poolIndex]->GetId());
    13260  json.EndString();
    13261 
    13262  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13263  }
    13264  json.EndObject();
    13265  }
    13266  }
    13267 }
    13268 
    13269 #endif // #if VMA_STATS_STRING_ENABLED
    13270 
    13272 // Public interface
    13273 
    13274 VkResult vmaCreateAllocator(
    13275  const VmaAllocatorCreateInfo* pCreateInfo,
    13276  VmaAllocator* pAllocator)
    13277 {
    13278  VMA_ASSERT(pCreateInfo && pAllocator);
    13279  VMA_DEBUG_LOG("vmaCreateAllocator");
    13280  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13281  return (*pAllocator)->Init(pCreateInfo);
    13282 }
    13283 
    13284 void vmaDestroyAllocator(
    13285  VmaAllocator allocator)
    13286 {
    13287  if(allocator != VK_NULL_HANDLE)
    13288  {
    13289  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13290  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13291  vma_delete(&allocationCallbacks, allocator);
    13292  }
    13293 }
    13294 
    13296  VmaAllocator allocator,
    13297  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13298 {
    13299  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13300  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13301 }
    13302 
    13304  VmaAllocator allocator,
    13305  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13306 {
    13307  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13308  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13309 }
    13310 
    13312  VmaAllocator allocator,
    13313  uint32_t memoryTypeIndex,
    13314  VkMemoryPropertyFlags* pFlags)
    13315 {
    13316  VMA_ASSERT(allocator && pFlags);
    13317  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13318  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13319 }
    13320 
    13322  VmaAllocator allocator,
    13323  uint32_t frameIndex)
    13324 {
    13325  VMA_ASSERT(allocator);
    13326  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13327 
    13328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13329 
    13330  allocator->SetCurrentFrameIndex(frameIndex);
    13331 }
    13332 
    13333 void vmaCalculateStats(
    13334  VmaAllocator allocator,
    13335  VmaStats* pStats)
    13336 {
    13337  VMA_ASSERT(allocator && pStats);
    13338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13339  allocator->CalculateStats(pStats);
    13340 }
    13341 
    13342 #if VMA_STATS_STRING_ENABLED
    13343 
    13344 void vmaBuildStatsString(
    13345  VmaAllocator allocator,
    13346  char** ppStatsString,
    13347  VkBool32 detailedMap)
    13348 {
    13349  VMA_ASSERT(allocator && ppStatsString);
    13350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13351 
    13352  VmaStringBuilder sb(allocator);
    13353  {
    13354  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13355  json.BeginObject();
    13356 
    13357  VmaStats stats;
    13358  allocator->CalculateStats(&stats);
    13359 
    13360  json.WriteString("Total");
    13361  VmaPrintStatInfo(json, stats.total);
    13362 
    13363  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13364  {
    13365  json.BeginString("Heap ");
    13366  json.ContinueString(heapIndex);
    13367  json.EndString();
    13368  json.BeginObject();
    13369 
    13370  json.WriteString("Size");
    13371  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13372 
    13373  json.WriteString("Flags");
    13374  json.BeginArray(true);
    13375  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13376  {
    13377  json.WriteString("DEVICE_LOCAL");
    13378  }
    13379  json.EndArray();
    13380 
    13381  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13382  {
    13383  json.WriteString("Stats");
    13384  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13385  }
    13386 
    13387  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13388  {
    13389  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13390  {
    13391  json.BeginString("Type ");
    13392  json.ContinueString(typeIndex);
    13393  json.EndString();
    13394 
    13395  json.BeginObject();
    13396 
    13397  json.WriteString("Flags");
    13398  json.BeginArray(true);
    13399  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13400  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13401  {
    13402  json.WriteString("DEVICE_LOCAL");
    13403  }
    13404  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13405  {
    13406  json.WriteString("HOST_VISIBLE");
    13407  }
    13408  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13409  {
    13410  json.WriteString("HOST_COHERENT");
    13411  }
    13412  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13413  {
    13414  json.WriteString("HOST_CACHED");
    13415  }
    13416  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13417  {
    13418  json.WriteString("LAZILY_ALLOCATED");
    13419  }
    13420  json.EndArray();
    13421 
    13422  if(stats.memoryType[typeIndex].blockCount > 0)
    13423  {
    13424  json.WriteString("Stats");
    13425  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13426  }
    13427 
    13428  json.EndObject();
    13429  }
    13430  }
    13431 
    13432  json.EndObject();
    13433  }
    13434  if(detailedMap == VK_TRUE)
    13435  {
    13436  allocator->PrintDetailedMap(json);
    13437  }
    13438 
    13439  json.EndObject();
    13440  }
    13441 
    13442  const size_t len = sb.GetLength();
    13443  char* const pChars = vma_new_array(allocator, char, len + 1);
    13444  if(len > 0)
    13445  {
    13446  memcpy(pChars, sb.GetData(), len);
    13447  }
    13448  pChars[len] = '\0';
    13449  *ppStatsString = pChars;
    13450 }
    13451 
    13452 void vmaFreeStatsString(
    13453  VmaAllocator allocator,
    13454  char* pStatsString)
    13455 {
    13456  if(pStatsString != VMA_NULL)
    13457  {
    13458  VMA_ASSERT(allocator);
    13459  size_t len = strlen(pStatsString);
    13460  vma_delete_array(allocator, pStatsString, len + 1);
    13461  }
    13462 }
    13463 
    13464 #endif // #if VMA_STATS_STRING_ENABLED
    13465 
    13466 /*
    13467 This function is not protected by any mutex because it just reads immutable data.
    13468 */
    13469 VkResult vmaFindMemoryTypeIndex(
    13470  VmaAllocator allocator,
    13471  uint32_t memoryTypeBits,
    13472  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13473  uint32_t* pMemoryTypeIndex)
    13474 {
    13475  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13476  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13477  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13478 
    13479  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13480  {
    13481  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13482  }
    13483 
    13484  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13485  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13486 
    13487  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13488  if(mapped)
    13489  {
    13490  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13491  }
    13492 
    13493  // Convert usage to requiredFlags and preferredFlags.
    13494  switch(pAllocationCreateInfo->usage)
    13495  {
    13497  break;
    13499  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13500  {
    13501  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13502  }
    13503  break;
    13505  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13506  break;
    13508  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13509  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13510  {
    13511  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13512  }
    13513  break;
    13515  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13516  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13517  break;
    13518  default:
    13519  break;
    13520  }
    13521 
    13522  *pMemoryTypeIndex = UINT32_MAX;
    13523  uint32_t minCost = UINT32_MAX;
    13524  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13525  memTypeIndex < allocator->GetMemoryTypeCount();
    13526  ++memTypeIndex, memTypeBit <<= 1)
    13527  {
    13528  // This memory type is acceptable according to memoryTypeBits bitmask.
    13529  if((memTypeBit & memoryTypeBits) != 0)
    13530  {
    13531  const VkMemoryPropertyFlags currFlags =
    13532  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13533  // This memory type contains requiredFlags.
    13534  if((requiredFlags & ~currFlags) == 0)
    13535  {
    13536  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13537  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13538  // Remember memory type with lowest cost.
    13539  if(currCost < minCost)
    13540  {
    13541  *pMemoryTypeIndex = memTypeIndex;
    13542  if(currCost == 0)
    13543  {
    13544  return VK_SUCCESS;
    13545  }
    13546  minCost = currCost;
    13547  }
    13548  }
    13549  }
    13550  }
    13551  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  const VkBufferCreateInfo* pBufferCreateInfo,
    13557  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13558  uint32_t* pMemoryTypeIndex)
    13559 {
    13560  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13561  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13562  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13563  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13564 
    13565  const VkDevice hDev = allocator->m_hDevice;
    13566  VkBuffer hBuffer = VK_NULL_HANDLE;
    13567  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13568  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13569  if(res == VK_SUCCESS)
    13570  {
    13571  VkMemoryRequirements memReq = {};
    13572  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13573  hDev, hBuffer, &memReq);
    13574 
    13575  res = vmaFindMemoryTypeIndex(
    13576  allocator,
    13577  memReq.memoryTypeBits,
    13578  pAllocationCreateInfo,
    13579  pMemoryTypeIndex);
    13580 
    13581  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13582  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13583  }
    13584  return res;
    13585 }
    13586 
    13588  VmaAllocator allocator,
    13589  const VkImageCreateInfo* pImageCreateInfo,
    13590  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13591  uint32_t* pMemoryTypeIndex)
    13592 {
    13593  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13594  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13597 
    13598  const VkDevice hDev = allocator->m_hDevice;
    13599  VkImage hImage = VK_NULL_HANDLE;
    13600  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13601  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13602  if(res == VK_SUCCESS)
    13603  {
    13604  VkMemoryRequirements memReq = {};
    13605  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13606  hDev, hImage, &memReq);
    13607 
    13608  res = vmaFindMemoryTypeIndex(
    13609  allocator,
    13610  memReq.memoryTypeBits,
    13611  pAllocationCreateInfo,
    13612  pMemoryTypeIndex);
    13613 
    13614  allocator->GetVulkanFunctions().vkDestroyImage(
    13615  hDev, hImage, allocator->GetAllocationCallbacks());
    13616  }
    13617  return res;
    13618 }
    13619 
    13620 VkResult vmaCreatePool(
    13621  VmaAllocator allocator,
    13622  const VmaPoolCreateInfo* pCreateInfo,
    13623  VmaPool* pPool)
    13624 {
    13625  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13626 
    13627  VMA_DEBUG_LOG("vmaCreatePool");
    13628 
    13629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13630 
    13631  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13632 
    13633 #if VMA_RECORDING_ENABLED
    13634  if(allocator->GetRecorder() != VMA_NULL)
    13635  {
    13636  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13637  }
    13638 #endif
    13639 
    13640  return res;
    13641 }
    13642 
    13643 void vmaDestroyPool(
    13644  VmaAllocator allocator,
    13645  VmaPool pool)
    13646 {
    13647  VMA_ASSERT(allocator);
    13648 
    13649  if(pool == VK_NULL_HANDLE)
    13650  {
    13651  return;
    13652  }
    13653 
    13654  VMA_DEBUG_LOG("vmaDestroyPool");
    13655 
    13656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13657 
    13658 #if VMA_RECORDING_ENABLED
    13659  if(allocator->GetRecorder() != VMA_NULL)
    13660  {
    13661  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13662  }
    13663 #endif
    13664 
    13665  allocator->DestroyPool(pool);
    13666 }
    13667 
    13668 void vmaGetPoolStats(
    13669  VmaAllocator allocator,
    13670  VmaPool pool,
    13671  VmaPoolStats* pPoolStats)
    13672 {
    13673  VMA_ASSERT(allocator && pool && pPoolStats);
    13674 
    13675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13676 
    13677  allocator->GetPoolStats(pool, pPoolStats);
    13678 }
    13679 
    13681  VmaAllocator allocator,
    13682  VmaPool pool,
    13683  size_t* pLostAllocationCount)
    13684 {
    13685  VMA_ASSERT(allocator && pool);
    13686 
    13687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13688 
    13689 #if VMA_RECORDING_ENABLED
    13690  if(allocator->GetRecorder() != VMA_NULL)
    13691  {
    13692  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13693  }
    13694 #endif
    13695 
    13696  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13697 }
    13698 
    13699 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13700 {
    13701  VMA_ASSERT(allocator && pool);
    13702 
    13703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13704 
    13705  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13706 
    13707  return allocator->CheckPoolCorruption(pool);
    13708 }
    13709 
    13710 VkResult vmaAllocateMemory(
    13711  VmaAllocator allocator,
    13712  const VkMemoryRequirements* pVkMemoryRequirements,
    13713  const VmaAllocationCreateInfo* pCreateInfo,
    13714  VmaAllocation* pAllocation,
    13715  VmaAllocationInfo* pAllocationInfo)
    13716 {
    13717  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13718 
    13719  VMA_DEBUG_LOG("vmaAllocateMemory");
    13720 
    13721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13722 
    13723  VkResult result = allocator->AllocateMemory(
    13724  *pVkMemoryRequirements,
    13725  false, // requiresDedicatedAllocation
    13726  false, // prefersDedicatedAllocation
    13727  VK_NULL_HANDLE, // dedicatedBuffer
    13728  VK_NULL_HANDLE, // dedicatedImage
    13729  *pCreateInfo,
    13730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13731  pAllocation);
    13732 
    13733 #if VMA_RECORDING_ENABLED
    13734  if(allocator->GetRecorder() != VMA_NULL)
    13735  {
    13736  allocator->GetRecorder()->RecordAllocateMemory(
    13737  allocator->GetCurrentFrameIndex(),
    13738  *pVkMemoryRequirements,
    13739  *pCreateInfo,
    13740  *pAllocation);
    13741  }
    13742 #endif
    13743 
    13744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13745  {
    13746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13747  }
    13748 
    13749  return result;
    13750 }
    13751 
    13753  VmaAllocator allocator,
    13754  VkBuffer buffer,
    13755  const VmaAllocationCreateInfo* pCreateInfo,
    13756  VmaAllocation* pAllocation,
    13757  VmaAllocationInfo* pAllocationInfo)
    13758 {
    13759  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13760 
    13761  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13762 
    13763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13764 
    13765  VkMemoryRequirements vkMemReq = {};
    13766  bool requiresDedicatedAllocation = false;
    13767  bool prefersDedicatedAllocation = false;
    13768  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13769  requiresDedicatedAllocation,
    13770  prefersDedicatedAllocation);
    13771 
    13772  VkResult result = allocator->AllocateMemory(
    13773  vkMemReq,
    13774  requiresDedicatedAllocation,
    13775  prefersDedicatedAllocation,
    13776  buffer, // dedicatedBuffer
    13777  VK_NULL_HANDLE, // dedicatedImage
    13778  *pCreateInfo,
    13779  VMA_SUBALLOCATION_TYPE_BUFFER,
    13780  pAllocation);
    13781 
    13782 #if VMA_RECORDING_ENABLED
    13783  if(allocator->GetRecorder() != VMA_NULL)
    13784  {
    13785  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13786  allocator->GetCurrentFrameIndex(),
    13787  vkMemReq,
    13788  requiresDedicatedAllocation,
    13789  prefersDedicatedAllocation,
    13790  *pCreateInfo,
    13791  *pAllocation);
    13792  }
    13793 #endif
    13794 
    13795  if(pAllocationInfo && result == VK_SUCCESS)
    13796  {
    13797  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13798  }
    13799 
    13800  return result;
    13801 }
    13802 
    13803 VkResult vmaAllocateMemoryForImage(
    13804  VmaAllocator allocator,
    13805  VkImage image,
    13806  const VmaAllocationCreateInfo* pCreateInfo,
    13807  VmaAllocation* pAllocation,
    13808  VmaAllocationInfo* pAllocationInfo)
    13809 {
    13810  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13811 
    13812  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13813 
    13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13815 
    13816  VkMemoryRequirements vkMemReq = {};
    13817  bool requiresDedicatedAllocation = false;
    13818  bool prefersDedicatedAllocation = false;
    13819  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13820  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13821 
    13822  VkResult result = allocator->AllocateMemory(
    13823  vkMemReq,
    13824  requiresDedicatedAllocation,
    13825  prefersDedicatedAllocation,
    13826  VK_NULL_HANDLE, // dedicatedBuffer
    13827  image, // dedicatedImage
    13828  *pCreateInfo,
    13829  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13830  pAllocation);
    13831 
    13832 #if VMA_RECORDING_ENABLED
    13833  if(allocator->GetRecorder() != VMA_NULL)
    13834  {
    13835  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13836  allocator->GetCurrentFrameIndex(),
    13837  vkMemReq,
    13838  requiresDedicatedAllocation,
    13839  prefersDedicatedAllocation,
    13840  *pCreateInfo,
    13841  *pAllocation);
    13842  }
    13843 #endif
    13844 
    13845  if(pAllocationInfo && result == VK_SUCCESS)
    13846  {
    13847  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13848  }
    13849 
    13850  return result;
    13851 }
    13852 
    13853 void vmaFreeMemory(
    13854  VmaAllocator allocator,
    13855  VmaAllocation allocation)
    13856 {
    13857  VMA_ASSERT(allocator);
    13858 
    13859  if(allocation == VK_NULL_HANDLE)
    13860  {
    13861  return;
    13862  }
    13863 
    13864  VMA_DEBUG_LOG("vmaFreeMemory");
    13865 
    13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordFreeMemory(
    13872  allocator->GetCurrentFrameIndex(),
    13873  allocation);
    13874  }
    13875 #endif
    13876 
    13877  allocator->FreeMemory(allocation);
    13878 }
    13879 
    13881  VmaAllocator allocator,
    13882  VmaAllocation allocation,
    13883  VmaAllocationInfo* pAllocationInfo)
    13884 {
    13885  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13886 
    13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13888 
    13889 #if VMA_RECORDING_ENABLED
    13890  if(allocator->GetRecorder() != VMA_NULL)
    13891  {
    13892  allocator->GetRecorder()->RecordGetAllocationInfo(
    13893  allocator->GetCurrentFrameIndex(),
    13894  allocation);
    13895  }
    13896 #endif
    13897 
    13898  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13899 }
    13900 
    13901 VkBool32 vmaTouchAllocation(
    13902  VmaAllocator allocator,
    13903  VmaAllocation allocation)
    13904 {
    13905  VMA_ASSERT(allocator && allocation);
    13906 
    13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13908 
    13909 #if VMA_RECORDING_ENABLED
    13910  if(allocator->GetRecorder() != VMA_NULL)
    13911  {
    13912  allocator->GetRecorder()->RecordTouchAllocation(
    13913  allocator->GetCurrentFrameIndex(),
    13914  allocation);
    13915  }
    13916 #endif
    13917 
    13918  return allocator->TouchAllocation(allocation);
    13919 }
    13920 
    13922  VmaAllocator allocator,
    13923  VmaAllocation allocation,
    13924  void* pUserData)
    13925 {
    13926  VMA_ASSERT(allocator && allocation);
    13927 
    13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13929 
    13930  allocation->SetUserData(allocator, pUserData);
    13931 
    13932 #if VMA_RECORDING_ENABLED
    13933  if(allocator->GetRecorder() != VMA_NULL)
    13934  {
    13935  allocator->GetRecorder()->RecordSetAllocationUserData(
    13936  allocator->GetCurrentFrameIndex(),
    13937  allocation,
    13938  pUserData);
    13939  }
    13940 #endif
    13941 }
    13942 
    13944  VmaAllocator allocator,
    13945  VmaAllocation* pAllocation)
    13946 {
    13947  VMA_ASSERT(allocator && pAllocation);
    13948 
    13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13950 
    13951  allocator->CreateLostAllocation(pAllocation);
    13952 
    13953 #if VMA_RECORDING_ENABLED
    13954  if(allocator->GetRecorder() != VMA_NULL)
    13955  {
    13956  allocator->GetRecorder()->RecordCreateLostAllocation(
    13957  allocator->GetCurrentFrameIndex(),
    13958  *pAllocation);
    13959  }
    13960 #endif
    13961 }
    13962 
    13963 VkResult vmaMapMemory(
    13964  VmaAllocator allocator,
    13965  VmaAllocation allocation,
    13966  void** ppData)
    13967 {
    13968  VMA_ASSERT(allocator && allocation && ppData);
    13969 
    13970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13971 
    13972  VkResult res = allocator->Map(allocation, ppData);
    13973 
    13974 #if VMA_RECORDING_ENABLED
    13975  if(allocator->GetRecorder() != VMA_NULL)
    13976  {
    13977  allocator->GetRecorder()->RecordMapMemory(
    13978  allocator->GetCurrentFrameIndex(),
    13979  allocation);
    13980  }
    13981 #endif
    13982 
    13983  return res;
    13984 }
    13985 
    13986 void vmaUnmapMemory(
    13987  VmaAllocator allocator,
    13988  VmaAllocation allocation)
    13989 {
    13990  VMA_ASSERT(allocator && allocation);
    13991 
    13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13993 
    13994 #if VMA_RECORDING_ENABLED
    13995  if(allocator->GetRecorder() != VMA_NULL)
    13996  {
    13997  allocator->GetRecorder()->RecordUnmapMemory(
    13998  allocator->GetCurrentFrameIndex(),
    13999  allocation);
    14000  }
    14001 #endif
    14002 
    14003  allocator->Unmap(allocation);
    14004 }
    14005 
    14006 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14007 {
    14008  VMA_ASSERT(allocator && allocation);
    14009 
    14010  VMA_DEBUG_LOG("vmaFlushAllocation");
    14011 
    14012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14013 
    14014  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14015 
    14016 #if VMA_RECORDING_ENABLED
    14017  if(allocator->GetRecorder() != VMA_NULL)
    14018  {
    14019  allocator->GetRecorder()->RecordFlushAllocation(
    14020  allocator->GetCurrentFrameIndex(),
    14021  allocation, offset, size);
    14022  }
    14023 #endif
    14024 }
    14025 
    14026 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14027 {
    14028  VMA_ASSERT(allocator && allocation);
    14029 
    14030  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14031 
    14032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14033 
    14034  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14035 
    14036 #if VMA_RECORDING_ENABLED
    14037  if(allocator->GetRecorder() != VMA_NULL)
    14038  {
    14039  allocator->GetRecorder()->RecordInvalidateAllocation(
    14040  allocator->GetCurrentFrameIndex(),
    14041  allocation, offset, size);
    14042  }
    14043 #endif
    14044 }
    14045 
    14046 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14047 {
    14048  VMA_ASSERT(allocator);
    14049 
    14050  VMA_DEBUG_LOG("vmaCheckCorruption");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->CheckCorruption(memoryTypeBits);
    14055 }
    14056 
    14057 VkResult vmaDefragment(
    14058  VmaAllocator allocator,
    14059  VmaAllocation* pAllocations,
    14060  size_t allocationCount,
    14061  VkBool32* pAllocationsChanged,
    14062  const VmaDefragmentationInfo *pDefragmentationInfo,
    14063  VmaDefragmentationStats* pDefragmentationStats)
    14064 {
    14065  VMA_ASSERT(allocator && pAllocations);
    14066 
    14067  VMA_DEBUG_LOG("vmaDefragment");
    14068 
    14069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14070 
    14071  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14072 }
    14073 
    14074 VkResult vmaBindBufferMemory(
    14075  VmaAllocator allocator,
    14076  VmaAllocation allocation,
    14077  VkBuffer buffer)
    14078 {
    14079  VMA_ASSERT(allocator && allocation && buffer);
    14080 
    14081  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14082 
    14083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14084 
    14085  return allocator->BindBufferMemory(allocation, buffer);
    14086 }
    14087 
    14088 VkResult vmaBindImageMemory(
    14089  VmaAllocator allocator,
    14090  VmaAllocation allocation,
    14091  VkImage image)
    14092 {
    14093  VMA_ASSERT(allocator && allocation && image);
    14094 
    14095  VMA_DEBUG_LOG("vmaBindImageMemory");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  return allocator->BindImageMemory(allocation, image);
    14100 }
    14101 
    14102 VkResult vmaCreateBuffer(
    14103  VmaAllocator allocator,
    14104  const VkBufferCreateInfo* pBufferCreateInfo,
    14105  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14106  VkBuffer* pBuffer,
    14107  VmaAllocation* pAllocation,
    14108  VmaAllocationInfo* pAllocationInfo)
    14109 {
    14110  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14111 
    14112  if(pBufferCreateInfo->size == 0)
    14113  {
    14114  return VK_ERROR_VALIDATION_FAILED_EXT;
    14115  }
    14116 
    14117  VMA_DEBUG_LOG("vmaCreateBuffer");
    14118 
    14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14120 
    14121  *pBuffer = VK_NULL_HANDLE;
    14122  *pAllocation = VK_NULL_HANDLE;
    14123 
    14124  // 1. Create VkBuffer.
    14125  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14126  allocator->m_hDevice,
    14127  pBufferCreateInfo,
    14128  allocator->GetAllocationCallbacks(),
    14129  pBuffer);
    14130  if(res >= 0)
    14131  {
    14132  // 2. vkGetBufferMemoryRequirements.
    14133  VkMemoryRequirements vkMemReq = {};
    14134  bool requiresDedicatedAllocation = false;
    14135  bool prefersDedicatedAllocation = false;
    14136  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14137  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14138 
    14139  // Make sure alignment requirements for specific buffer usages reported
    14140  // in Physical Device Properties are included in alignment reported by memory requirements.
    14141  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14142  {
    14143  VMA_ASSERT(vkMemReq.alignment %
    14144  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14145  }
    14146  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14147  {
    14148  VMA_ASSERT(vkMemReq.alignment %
    14149  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14150  }
    14151  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14152  {
    14153  VMA_ASSERT(vkMemReq.alignment %
    14154  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14155  }
    14156 
    14157  // 3. Allocate memory using allocator.
    14158  res = allocator->AllocateMemory(
    14159  vkMemReq,
    14160  requiresDedicatedAllocation,
    14161  prefersDedicatedAllocation,
    14162  *pBuffer, // dedicatedBuffer
    14163  VK_NULL_HANDLE, // dedicatedImage
    14164  *pAllocationCreateInfo,
    14165  VMA_SUBALLOCATION_TYPE_BUFFER,
    14166  pAllocation);
    14167 
    14168 #if VMA_RECORDING_ENABLED
    14169  if(allocator->GetRecorder() != VMA_NULL)
    14170  {
    14171  allocator->GetRecorder()->RecordCreateBuffer(
    14172  allocator->GetCurrentFrameIndex(),
    14173  *pBufferCreateInfo,
    14174  *pAllocationCreateInfo,
    14175  *pAllocation);
    14176  }
    14177 #endif
    14178 
    14179  if(res >= 0)
    14180  {
    14181  // 3. Bind buffer with memory.
    14182  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14183  if(res >= 0)
    14184  {
    14185  // All steps succeeded.
    14186  #if VMA_STATS_STRING_ENABLED
    14187  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14188  #endif
    14189  if(pAllocationInfo != VMA_NULL)
    14190  {
    14191  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14192  }
    14193 
    14194  return VK_SUCCESS;
    14195  }
    14196  allocator->FreeMemory(*pAllocation);
    14197  *pAllocation = VK_NULL_HANDLE;
    14198  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14199  *pBuffer = VK_NULL_HANDLE;
    14200  return res;
    14201  }
    14202  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14203  *pBuffer = VK_NULL_HANDLE;
    14204  return res;
    14205  }
    14206  return res;
    14207 }
    14208 
    14209 void vmaDestroyBuffer(
    14210  VmaAllocator allocator,
    14211  VkBuffer buffer,
    14212  VmaAllocation allocation)
    14213 {
    14214  VMA_ASSERT(allocator);
    14215 
    14216  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14217  {
    14218  return;
    14219  }
    14220 
    14221  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14222 
    14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14224 
    14225 #if VMA_RECORDING_ENABLED
    14226  if(allocator->GetRecorder() != VMA_NULL)
    14227  {
    14228  allocator->GetRecorder()->RecordDestroyBuffer(
    14229  allocator->GetCurrentFrameIndex(),
    14230  allocation);
    14231  }
    14232 #endif
    14233 
    14234  if(buffer != VK_NULL_HANDLE)
    14235  {
    14236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14237  }
    14238 
    14239  if(allocation != VK_NULL_HANDLE)
    14240  {
    14241  allocator->FreeMemory(allocation);
    14242  }
    14243 }
    14244 
    14245 VkResult vmaCreateImage(
    14246  VmaAllocator allocator,
    14247  const VkImageCreateInfo* pImageCreateInfo,
    14248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14249  VkImage* pImage,
    14250  VmaAllocation* pAllocation,
    14251  VmaAllocationInfo* pAllocationInfo)
    14252 {
    14253  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14254 
    14255  if(pImageCreateInfo->extent.width == 0 ||
    14256  pImageCreateInfo->extent.height == 0 ||
    14257  pImageCreateInfo->extent.depth == 0 ||
    14258  pImageCreateInfo->mipLevels == 0 ||
    14259  pImageCreateInfo->arrayLayers == 0)
    14260  {
    14261  return VK_ERROR_VALIDATION_FAILED_EXT;
    14262  }
    14263 
    14264  VMA_DEBUG_LOG("vmaCreateImage");
    14265 
    14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14267 
    14268  *pImage = VK_NULL_HANDLE;
    14269  *pAllocation = VK_NULL_HANDLE;
    14270 
    14271  // 1. Create VkImage.
    14272  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14273  allocator->m_hDevice,
    14274  pImageCreateInfo,
    14275  allocator->GetAllocationCallbacks(),
    14276  pImage);
    14277  if(res >= 0)
    14278  {
    14279  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14280  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14281  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14282 
    14283  // 2. Allocate memory using allocator.
    14284  VkMemoryRequirements vkMemReq = {};
    14285  bool requiresDedicatedAllocation = false;
    14286  bool prefersDedicatedAllocation = false;
    14287  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14288  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14289 
    14290  res = allocator->AllocateMemory(
    14291  vkMemReq,
    14292  requiresDedicatedAllocation,
    14293  prefersDedicatedAllocation,
    14294  VK_NULL_HANDLE, // dedicatedBuffer
    14295  *pImage, // dedicatedImage
    14296  *pAllocationCreateInfo,
    14297  suballocType,
    14298  pAllocation);
    14299 
    14300 #if VMA_RECORDING_ENABLED
    14301  if(allocator->GetRecorder() != VMA_NULL)
    14302  {
    14303  allocator->GetRecorder()->RecordCreateImage(
    14304  allocator->GetCurrentFrameIndex(),
    14305  *pImageCreateInfo,
    14306  *pAllocationCreateInfo,
    14307  *pAllocation);
    14308  }
    14309 #endif
    14310 
    14311  if(res >= 0)
    14312  {
    14313  // 3. Bind image with memory.
    14314  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14315  if(res >= 0)
    14316  {
    14317  // All steps succeeded.
    14318  #if VMA_STATS_STRING_ENABLED
    14319  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14320  #endif
    14321  if(pAllocationInfo != VMA_NULL)
    14322  {
    14323  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14324  }
    14325 
    14326  return VK_SUCCESS;
    14327  }
    14328  allocator->FreeMemory(*pAllocation);
    14329  *pAllocation = VK_NULL_HANDLE;
    14330  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14331  *pImage = VK_NULL_HANDLE;
    14332  return res;
    14333  }
    14334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14335  *pImage = VK_NULL_HANDLE;
    14336  return res;
    14337  }
    14338  return res;
    14339 }
    14340 
    14341 void vmaDestroyImage(
    14342  VmaAllocator allocator,
    14343  VkImage image,
    14344  VmaAllocation allocation)
    14345 {
    14346  VMA_ASSERT(allocator);
    14347 
    14348  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14349  {
    14350  return;
    14351  }
    14352 
    14353  VMA_DEBUG_LOG("vmaDestroyImage");
    14354 
    14355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14356 
    14357 #if VMA_RECORDING_ENABLED
    14358  if(allocator->GetRecorder() != VMA_NULL)
    14359  {
    14360  allocator->GetRecorder()->RecordDestroyImage(
    14361  allocator->GetCurrentFrameIndex(),
    14362  allocation);
    14363  }
    14364 #endif
    14365 
    14366  if(image != VK_NULL_HANDLE)
    14367  {
    14368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14369  }
    14370  if(allocation != VK_NULL_HANDLE)
    14371  {
    14372  allocator->FreeMemory(allocation);
    14373  }
    14374 }
    14375 
    14376 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2397 VkResult vmaResizeAllocation(
    2398  VmaAllocator allocator,
    2399  VmaAllocation allocation,
    2400  VkDeviceSize newSize);
    2401 
    2419  VmaAllocator allocator,
    2420  VmaAllocation allocation,
    2421  VmaAllocationInfo* pAllocationInfo);
    2422 
    2437 VkBool32 vmaTouchAllocation(
    2438  VmaAllocator allocator,
    2439  VmaAllocation allocation);
    2440 
    2455  VmaAllocator allocator,
    2456  VmaAllocation allocation,
    2457  void* pUserData);
    2458 
    2470  VmaAllocator allocator,
    2471  VmaAllocation* pAllocation);
    2472 
    2507 VkResult vmaMapMemory(
    2508  VmaAllocator allocator,
    2509  VmaAllocation allocation,
    2510  void** ppData);
    2511 
    2516 void vmaUnmapMemory(
    2517  VmaAllocator allocator,
    2518  VmaAllocation allocation);
    2519 
    2532 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2533 
    2546 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2547 
    2564 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2565 
    2567 typedef struct VmaDefragmentationInfo {
    2572  VkDeviceSize maxBytesToMove;
    2579 
    2581 typedef struct VmaDefragmentationStats {
    2583  VkDeviceSize bytesMoved;
    2585  VkDeviceSize bytesFreed;
    2591 
    2630 VkResult vmaDefragment(
    2631  VmaAllocator allocator,
    2632  VmaAllocation* pAllocations,
    2633  size_t allocationCount,
    2634  VkBool32* pAllocationsChanged,
    2635  const VmaDefragmentationInfo *pDefragmentationInfo,
    2636  VmaDefragmentationStats* pDefragmentationStats);
    2637 
    2650 VkResult vmaBindBufferMemory(
    2651  VmaAllocator allocator,
    2652  VmaAllocation allocation,
    2653  VkBuffer buffer);
    2654 
    2667 VkResult vmaBindImageMemory(
    2668  VmaAllocator allocator,
    2669  VmaAllocation allocation,
    2670  VkImage image);
    2671 
    2698 VkResult vmaCreateBuffer(
    2699  VmaAllocator allocator,
    2700  const VkBufferCreateInfo* pBufferCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkBuffer* pBuffer,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyBuffer(
    2718  VmaAllocator allocator,
    2719  VkBuffer buffer,
    2720  VmaAllocation allocation);
    2721 
    2723 VkResult vmaCreateImage(
    2724  VmaAllocator allocator,
    2725  const VkImageCreateInfo* pImageCreateInfo,
    2726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2727  VkImage* pImage,
    2728  VmaAllocation* pAllocation,
    2729  VmaAllocationInfo* pAllocationInfo);
    2730 
    2742 void vmaDestroyImage(
    2743  VmaAllocator allocator,
    2744  VkImage image,
    2745  VmaAllocation allocation);
    2746 
    2747 #ifdef __cplusplus
    2748 }
    2749 #endif
    2750 
    2751 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2752 
    2753 // For Visual Studio IntelliSense.
    2754 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2755 #define VMA_IMPLEMENTATION
    2756 #endif
    2757 
    2758 #ifdef VMA_IMPLEMENTATION
    2759 #undef VMA_IMPLEMENTATION
    2760 
    2761 #include <cstdint>
    2762 #include <cstdlib>
    2763 #include <cstring>
    2764 
    2765 /*******************************************************************************
    2766 CONFIGURATION SECTION
    2767 
    2768 Define some of these macros before each #include of this header or change them
    2769 here if you need other then default behavior depending on your environment.
    2770 */
    2771 
    2772 /*
    2773 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2774 internally, like:
    2775 
    2776  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2777 
    2778 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2779 VmaAllocatorCreateInfo::pVulkanFunctions.
    2780 */
    2781 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2782 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2783 #endif
    2784 
    2785 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2786 //#define VMA_USE_STL_CONTAINERS 1
    2787 
    2788 /* Set this macro to 1 to make the library including and using STL containers:
    2789 std::pair, std::vector, std::list, std::unordered_map.
    2790 
    2791 Set it to 0 or undefined to make the library using its own implementation of
    2792 the containers.
    2793 */
    2794 #if VMA_USE_STL_CONTAINERS
    2795  #define VMA_USE_STL_VECTOR 1
    2796  #define VMA_USE_STL_UNORDERED_MAP 1
    2797  #define VMA_USE_STL_LIST 1
    2798 #endif
    2799 
    2800 #if VMA_USE_STL_VECTOR
    2801  #include <vector>
    2802 #endif
    2803 
    2804 #if VMA_USE_STL_UNORDERED_MAP
    2805  #include <unordered_map>
    2806 #endif
    2807 
    2808 #if VMA_USE_STL_LIST
    2809  #include <list>
    2810 #endif
    2811 
    2812 /*
    2813 Following headers are used in this CONFIGURATION section only, so feel free to
    2814 remove them if not needed.
    2815 */
    2816 #include <cassert> // for assert
    2817 #include <algorithm> // for min, max
    2818 #include <mutex> // for std::mutex
    2819 #include <atomic> // for std::atomic
    2820 
    2821 #ifndef VMA_NULL
    2822  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2823  #define VMA_NULL nullptr
    2824 #endif
    2825 
    2826 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2827 #include <cstdlib>
    2828 void *aligned_alloc(size_t alignment, size_t size)
    2829 {
    2830  // alignment must be >= sizeof(void*)
    2831  if(alignment < sizeof(void*))
    2832  {
    2833  alignment = sizeof(void*);
    2834  }
    2835 
    2836  return memalign(alignment, size);
    2837 }
    2838 #elif defined(__APPLE__) || defined(__ANDROID__)
    2839 #include <cstdlib>
    2840 void *aligned_alloc(size_t alignment, size_t size)
    2841 {
    2842  // alignment must be >= sizeof(void*)
    2843  if(alignment < sizeof(void*))
    2844  {
    2845  alignment = sizeof(void*);
    2846  }
    2847 
    2848  void *pointer;
    2849  if(posix_memalign(&pointer, alignment, size) == 0)
    2850  return pointer;
    2851  return VMA_NULL;
    2852 }
    2853 #endif
    2854 
    2855 // If your compiler is not compatible with C++11 and definition of
    2856 // aligned_alloc() function is missing, uncommeting following line may help:
    2857 
    2858 //#include <malloc.h>
    2859 
    2860 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2861 #ifndef VMA_ASSERT
    2862  #ifdef _DEBUG
    2863  #define VMA_ASSERT(expr) assert(expr)
    2864  #else
    2865  #define VMA_ASSERT(expr)
    2866  #endif
    2867 #endif
    2868 
    2869 // Assert that will be called very often, like inside data structures e.g. operator[].
    2870 // Making it non-empty can make program slow.
    2871 #ifndef VMA_HEAVY_ASSERT
    2872  #ifdef _DEBUG
    2873  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2874  #else
    2875  #define VMA_HEAVY_ASSERT(expr)
    2876  #endif
    2877 #endif
    2878 
    2879 #ifndef VMA_ALIGN_OF
    2880  #define VMA_ALIGN_OF(type) (__alignof(type))
    2881 #endif
    2882 
    2883 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2884  #if defined(_WIN32)
    2885  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2886  #else
    2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2888  #endif
    2889 #endif
    2890 
    2891 #ifndef VMA_SYSTEM_FREE
    2892  #if defined(_WIN32)
    2893  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2894  #else
    2895  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2896  #endif
    2897 #endif
    2898 
    2899 #ifndef VMA_MIN
    2900  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2901 #endif
    2902 
    2903 #ifndef VMA_MAX
    2904  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2905 #endif
    2906 
    2907 #ifndef VMA_SWAP
    2908  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2909 #endif
    2910 
    2911 #ifndef VMA_SORT
    2912  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2913 #endif
    2914 
    2915 #ifndef VMA_DEBUG_LOG
    2916  #define VMA_DEBUG_LOG(format, ...)
    2917  /*
    2918  #define VMA_DEBUG_LOG(format, ...) do { \
    2919  printf(format, __VA_ARGS__); \
    2920  printf("\n"); \
    2921  } while(false)
    2922  */
    2923 #endif
    2924 
    2925 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2926 #if VMA_STATS_STRING_ENABLED
    2927  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2928  {
    2929  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2930  }
    2931  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2932  {
    2933  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2934  }
    2935  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2936  {
    2937  snprintf(outStr, strLen, "%p", ptr);
    2938  }
    2939 #endif
    2940 
    2941 #ifndef VMA_MUTEX
    2942  class VmaMutex
    2943  {
    2944  public:
    2945  VmaMutex() { }
    2946  ~VmaMutex() { }
    2947  void Lock() { m_Mutex.lock(); }
    2948  void Unlock() { m_Mutex.unlock(); }
    2949  private:
    2950  std::mutex m_Mutex;
    2951  };
    2952  #define VMA_MUTEX VmaMutex
    2953 #endif
    2954 
    2955 /*
    2956 If providing your own implementation, you need to implement a subset of std::atomic:
    2957 
    2958 - Constructor(uint32_t desired)
    2959 - uint32_t load() const
    2960 - void store(uint32_t desired)
    2961 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2962 */
    2963 #ifndef VMA_ATOMIC_UINT32
    2964  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2965 #endif
    2966 
    2967 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2968 
    2972  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2973 #endif
    2974 
    2975 #ifndef VMA_DEBUG_ALIGNMENT
    2976 
    2980  #define VMA_DEBUG_ALIGNMENT (1)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEBUG_MARGIN
    2984 
    2988  #define VMA_DEBUG_MARGIN (0)
    2989 #endif
    2990 
    2991 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2992 
    2996  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2997 #endif
    2998 
    2999 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3000 
    3005  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3006 #endif
    3007 
    3008 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3009 
    3013  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3014 #endif
    3015 
    3016 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3017 
    3021  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3022 #endif
    3023 
    3024 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3025  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3027 #endif
    3028 
    3029 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3030  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_CLASS_NO_COPY
    3035  #define VMA_CLASS_NO_COPY(className) \
    3036  private: \
    3037  className(const className&) = delete; \
    3038  className& operator=(const className&) = delete;
    3039 #endif
    3040 
    3041 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3042 
    3043 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3044 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3045 
    3046 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3047 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3048 
    3049 /*******************************************************************************
    3050 END OF CONFIGURATION
    3051 */
    3052 
    3053 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3054  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3055 
    3056 // Returns number of bits set to 1 in (v).
    3057 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3058 {
    3059  uint32_t c = v - ((v >> 1) & 0x55555555);
    3060  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3061  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3062  c = ((c >> 8) + c) & 0x00FF00FF;
    3063  c = ((c >> 16) + c) & 0x0000FFFF;
    3064  return c;
    3065 }
    3066 
    3067 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3068 // Use types like uint32_t, uint64_t as T.
    3069 template <typename T>
    3070 static inline T VmaAlignUp(T val, T align)
    3071 {
    3072  return (val + align - 1) / align * align;
    3073 }
    3074 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3075 // Use types like uint32_t, uint64_t as T.
    3076 template <typename T>
    3077 static inline T VmaAlignDown(T val, T align)
    3078 {
    3079  return val / align * align;
    3080 }
    3081 
    3082 // Division with mathematical rounding to nearest number.
    3083 template <typename T>
    3084 static inline T VmaRoundDiv(T x, T y)
    3085 {
    3086  return (x + (y / (T)2)) / y;
    3087 }
    3088 
    3089 /*
    3090 Returns true if given number is a power of two.
    3091 T must be unsigned integer number or signed integer but always nonnegative.
    3092 For 0 returns true.
    3093 */
    3094 template <typename T>
    3095 inline bool VmaIsPow2(T x)
    3096 {
    3097  return (x & (x-1)) == 0;
    3098 }
    3099 
    3100 // Returns smallest power of 2 greater or equal to v.
    3101 static inline uint32_t VmaNextPow2(uint32_t v)
    3102 {
    3103  v--;
    3104  v |= v >> 1;
    3105  v |= v >> 2;
    3106  v |= v >> 4;
    3107  v |= v >> 8;
    3108  v |= v >> 16;
    3109  v++;
    3110  return v;
    3111 }
    3112 static inline uint64_t VmaNextPow2(uint64_t v)
    3113 {
    3114  v--;
    3115  v |= v >> 1;
    3116  v |= v >> 2;
    3117  v |= v >> 4;
    3118  v |= v >> 8;
    3119  v |= v >> 16;
    3120  v |= v >> 32;
    3121  v++;
    3122  return v;
    3123 }
    3124 
    3125 // Returns largest power of 2 less or equal to v.
    3126 static inline uint32_t VmaPrevPow2(uint32_t v)
    3127 {
    3128  v |= v >> 1;
    3129  v |= v >> 2;
    3130  v |= v >> 4;
    3131  v |= v >> 8;
    3132  v |= v >> 16;
    3133  v = v ^ (v >> 1);
    3134  return v;
    3135 }
    3136 static inline uint64_t VmaPrevPow2(uint64_t v)
    3137 {
    3138  v |= v >> 1;
    3139  v |= v >> 2;
    3140  v |= v >> 4;
    3141  v |= v >> 8;
    3142  v |= v >> 16;
    3143  v |= v >> 32;
    3144  v = v ^ (v >> 1);
    3145  return v;
    3146 }
    3147 
    3148 static inline bool VmaStrIsEmpty(const char* pStr)
    3149 {
    3150  return pStr == VMA_NULL || *pStr == '\0';
    3151 }
    3152 
    3153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3154 {
    3155  switch(algorithm)
    3156  {
    3158  return "Linear";
    3160  return "Buddy";
    3161  case 0:
    3162  return "Default";
    3163  default:
    3164  VMA_ASSERT(0);
    3165  return "";
    3166  }
    3167 }
    3168 
    3169 #ifndef VMA_SORT
    3170 
    3171 template<typename Iterator, typename Compare>
    3172 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3173 {
    3174  Iterator centerValue = end; --centerValue;
    3175  Iterator insertIndex = beg;
    3176  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3177  {
    3178  if(cmp(*memTypeIndex, *centerValue))
    3179  {
    3180  if(insertIndex != memTypeIndex)
    3181  {
    3182  VMA_SWAP(*memTypeIndex, *insertIndex);
    3183  }
    3184  ++insertIndex;
    3185  }
    3186  }
    3187  if(insertIndex != centerValue)
    3188  {
    3189  VMA_SWAP(*insertIndex, *centerValue);
    3190  }
    3191  return insertIndex;
    3192 }
    3193 
    3194 template<typename Iterator, typename Compare>
    3195 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3196 {
    3197  if(beg < end)
    3198  {
    3199  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3200  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3201  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3202  }
    3203 }
    3204 
    3205 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3206 
    3207 #endif // #ifndef VMA_SORT
    3208 
    3209 /*
    3210 Returns true if two memory blocks occupy overlapping pages.
    3211 ResourceA must be in less memory offset than ResourceB.
    3212 
    3213 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3214 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3215 */
    3216 static inline bool VmaBlocksOnSamePage(
    3217  VkDeviceSize resourceAOffset,
    3218  VkDeviceSize resourceASize,
    3219  VkDeviceSize resourceBOffset,
    3220  VkDeviceSize pageSize)
    3221 {
    3222  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3223  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3224  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3225  VkDeviceSize resourceBStart = resourceBOffset;
    3226  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3227  return resourceAEndPage == resourceBStartPage;
    3228 }
    3229 
    3230 enum VmaSuballocationType
    3231 {
    3232  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3233  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3234  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3235  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3236  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3237  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3238  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3239 };
    3240 
    3241 /*
    3242 Returns true if given suballocation types could conflict and must respect
    3243 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3244 or linear image and another one is optimal image. If type is unknown, behave
    3245 conservatively.
    3246 */
    3247 static inline bool VmaIsBufferImageGranularityConflict(
    3248  VmaSuballocationType suballocType1,
    3249  VmaSuballocationType suballocType2)
    3250 {
    3251  if(suballocType1 > suballocType2)
    3252  {
    3253  VMA_SWAP(suballocType1, suballocType2);
    3254  }
    3255 
    3256  switch(suballocType1)
    3257  {
    3258  case VMA_SUBALLOCATION_TYPE_FREE:
    3259  return false;
    3260  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3261  return true;
    3262  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3263  return
    3264  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3266  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3267  return
    3268  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3274  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3275  return false;
    3276  default:
    3277  VMA_ASSERT(0);
    3278  return true;
    3279  }
    3280 }
    3281 
    3282 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3283 {
    3284  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3285  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3286  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3287  {
    3288  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3289  }
    3290 }
    3291 
    3292 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3293 {
    3294  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3295  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3296  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3297  {
    3298  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3299  {
    3300  return false;
    3301  }
    3302  }
    3303  return true;
    3304 }
    3305 
    3306 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3307 struct VmaMutexLock
    3308 {
    3309  VMA_CLASS_NO_COPY(VmaMutexLock)
    3310 public:
    3311  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3312  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3313  {
    3314  if(m_pMutex)
    3315  {
    3316  m_pMutex->Lock();
    3317  }
    3318  }
    3319 
    3320  ~VmaMutexLock()
    3321  {
    3322  if(m_pMutex)
    3323  {
    3324  m_pMutex->Unlock();
    3325  }
    3326  }
    3327 
    3328 private:
    3329  VMA_MUTEX* m_pMutex;
    3330 };
    3331 
    3332 #if VMA_DEBUG_GLOBAL_MUTEX
    3333  static VMA_MUTEX gDebugGlobalMutex;
    3334  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3335 #else
    3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3337 #endif
    3338 
    3339 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3340 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3341 
    3342 /*
    3343 Performs binary search and returns iterator to first element that is greater or
    3344 equal to (key), according to comparison (cmp).
    3345 
    3346 Cmp should return true if first argument is less than second argument.
    3347 
    3348 Returned value is the found element, if present in the collection or place where
    3349 new element with value (key) should be inserted.
    3350 */
    3351 template <typename CmpLess, typename IterT, typename KeyT>
    3352 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3353 {
    3354  size_t down = 0, up = (end - beg);
    3355  while(down < up)
    3356  {
    3357  const size_t mid = (down + up) / 2;
    3358  if(cmp(*(beg+mid), key))
    3359  {
    3360  down = mid + 1;
    3361  }
    3362  else
    3363  {
    3364  up = mid;
    3365  }
    3366  }
    3367  return beg + down;
    3368 }
    3369 
    3371 // Memory allocation
    3372 
    3373 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3374 {
    3375  if((pAllocationCallbacks != VMA_NULL) &&
    3376  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3377  {
    3378  return (*pAllocationCallbacks->pfnAllocation)(
    3379  pAllocationCallbacks->pUserData,
    3380  size,
    3381  alignment,
    3382  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3383  }
    3384  else
    3385  {
    3386  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3387  }
    3388 }
    3389 
    3390 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3391 {
    3392  if((pAllocationCallbacks != VMA_NULL) &&
    3393  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3394  {
    3395  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3396  }
    3397  else
    3398  {
    3399  VMA_SYSTEM_FREE(ptr);
    3400  }
    3401 }
    3402 
    3403 template<typename T>
    3404 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3405 {
    3406  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3416 
    3417 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3418 
    3419 template<typename T>
    3420 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3421 {
    3422  ptr->~T();
    3423  VmaFree(pAllocationCallbacks, ptr);
    3424 }
    3425 
    3426 template<typename T>
    3427 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3428 {
    3429  if(ptr != VMA_NULL)
    3430  {
    3431  for(size_t i = count; i--; )
    3432  {
    3433  ptr[i].~T();
    3434  }
    3435  VmaFree(pAllocationCallbacks, ptr);
    3436  }
    3437 }
    3438 
    3439 // STL-compatible allocator.
    3440 template<typename T>
    3441 class VmaStlAllocator
    3442 {
    3443 public:
    3444  const VkAllocationCallbacks* const m_pCallbacks;
    3445  typedef T value_type;
    3446 
    3447  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3448  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3449 
    3450  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3451  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3452 
    3453  template<typename U>
    3454  bool operator==(const VmaStlAllocator<U>& rhs) const
    3455  {
    3456  return m_pCallbacks == rhs.m_pCallbacks;
    3457  }
    3458  template<typename U>
    3459  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks != rhs.m_pCallbacks;
    3462  }
    3463 
    3464  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3465 };
    3466 
    3467 #if VMA_USE_STL_VECTOR
    3468 
    3469 #define VmaVector std::vector
    3470 
    3471 template<typename T, typename allocatorT>
    3472 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3473 {
    3474  vec.insert(vec.begin() + index, item);
    3475 }
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3479 {
    3480  vec.erase(vec.begin() + index);
    3481 }
    3482 
    3483 #else // #if VMA_USE_STL_VECTOR
    3484 
    3485 /* Class with interface compatible with subset of std::vector.
    3486 T must be POD because constructors and destructors are not called and memcpy is
    3487 used for these objects. */
    3488 template<typename T, typename AllocatorT>
    3489 class VmaVector
    3490 {
    3491 public:
    3492  typedef T value_type;
    3493 
    3494  VmaVector(const AllocatorT& allocator) :
    3495  m_Allocator(allocator),
    3496  m_pArray(VMA_NULL),
    3497  m_Count(0),
    3498  m_Capacity(0)
    3499  {
    3500  }
    3501 
    3502  VmaVector(size_t count, const AllocatorT& allocator) :
    3503  m_Allocator(allocator),
    3504  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3505  m_Count(count),
    3506  m_Capacity(count)
    3507  {
    3508  }
    3509 
    3510  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3511  m_Allocator(src.m_Allocator),
    3512  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3513  m_Count(src.m_Count),
    3514  m_Capacity(src.m_Count)
    3515  {
    3516  if(m_Count != 0)
    3517  {
    3518  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3519  }
    3520  }
    3521 
    3522  ~VmaVector()
    3523  {
    3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3525  }
    3526 
    3527  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3528  {
    3529  if(&rhs != this)
    3530  {
    3531  resize(rhs.m_Count);
    3532  if(m_Count != 0)
    3533  {
    3534  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3535  }
    3536  }
    3537  return *this;
    3538  }
    3539 
    3540  bool empty() const { return m_Count == 0; }
    3541  size_t size() const { return m_Count; }
    3542  T* data() { return m_pArray; }
    3543  const T* data() const { return m_pArray; }
    3544 
    3545  T& operator[](size_t index)
    3546  {
    3547  VMA_HEAVY_ASSERT(index < m_Count);
    3548  return m_pArray[index];
    3549  }
    3550  const T& operator[](size_t index) const
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555 
    3556  T& front()
    3557  {
    3558  VMA_HEAVY_ASSERT(m_Count > 0);
    3559  return m_pArray[0];
    3560  }
    3561  const T& front() const
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  T& back()
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[m_Count - 1];
    3570  }
    3571  const T& back() const
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576 
    3577  void reserve(size_t newCapacity, bool freeMemory = false)
    3578  {
    3579  newCapacity = VMA_MAX(newCapacity, m_Count);
    3580 
    3581  if((newCapacity < m_Capacity) && !freeMemory)
    3582  {
    3583  newCapacity = m_Capacity;
    3584  }
    3585 
    3586  if(newCapacity != m_Capacity)
    3587  {
    3588  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3589  if(m_Count != 0)
    3590  {
    3591  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3592  }
    3593  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3594  m_Capacity = newCapacity;
    3595  m_pArray = newArray;
    3596  }
    3597  }
    3598 
    3599  void resize(size_t newCount, bool freeMemory = false)
    3600  {
    3601  size_t newCapacity = m_Capacity;
    3602  if(newCount > m_Capacity)
    3603  {
    3604  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3605  }
    3606  else if(freeMemory)
    3607  {
    3608  newCapacity = newCount;
    3609  }
    3610 
    3611  if(newCapacity != m_Capacity)
    3612  {
    3613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3614  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3615  if(elementsToCopy != 0)
    3616  {
    3617  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3618  }
    3619  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3620  m_Capacity = newCapacity;
    3621  m_pArray = newArray;
    3622  }
    3623 
    3624  m_Count = newCount;
    3625  }
    3626 
    3627  void clear(bool freeMemory = false)
    3628  {
    3629  resize(0, freeMemory);
    3630  }
    3631 
    3632  void insert(size_t index, const T& src)
    3633  {
    3634  VMA_HEAVY_ASSERT(index <= m_Count);
    3635  const size_t oldCount = size();
    3636  resize(oldCount + 1);
    3637  if(index < oldCount)
    3638  {
    3639  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3640  }
    3641  m_pArray[index] = src;
    3642  }
    3643 
    3644  void remove(size_t index)
    3645  {
    3646  VMA_HEAVY_ASSERT(index < m_Count);
    3647  const size_t oldCount = size();
    3648  if(index < oldCount - 1)
    3649  {
    3650  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3651  }
    3652  resize(oldCount - 1);
    3653  }
    3654 
    3655  void push_back(const T& src)
    3656  {
    3657  const size_t newIndex = size();
    3658  resize(newIndex + 1);
    3659  m_pArray[newIndex] = src;
    3660  }
    3661 
    3662  void pop_back()
    3663  {
    3664  VMA_HEAVY_ASSERT(m_Count > 0);
    3665  resize(size() - 1);
    3666  }
    3667 
    3668  void push_front(const T& src)
    3669  {
    3670  insert(0, src);
    3671  }
    3672 
    3673  void pop_front()
    3674  {
    3675  VMA_HEAVY_ASSERT(m_Count > 0);
    3676  remove(0);
    3677  }
    3678 
    3679  typedef T* iterator;
    3680 
    3681  iterator begin() { return m_pArray; }
    3682  iterator end() { return m_pArray + m_Count; }
    3683 
    3684 private:
    3685  AllocatorT m_Allocator;
    3686  T* m_pArray;
    3687  size_t m_Count;
    3688  size_t m_Capacity;
    3689 };
    3690 
    3691 template<typename T, typename allocatorT>
    3692 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3693 {
    3694  vec.insert(index, item);
    3695 }
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3699 {
    3700  vec.remove(index);
    3701 }
    3702 
    3703 #endif // #if VMA_USE_STL_VECTOR
    3704 
    3705 template<typename CmpLess, typename VectorT>
    3706 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3707 {
    3708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3709  vector.data(),
    3710  vector.data() + vector.size(),
    3711  value,
    3712  CmpLess()) - vector.data();
    3713  VmaVectorInsert(vector, indexToInsert, value);
    3714  return indexToInsert;
    3715 }
    3716 
    3717 template<typename CmpLess, typename VectorT>
    3718 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3719 {
    3720  CmpLess comparator;
    3721  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3722  vector.begin(),
    3723  vector.end(),
    3724  value,
    3725  comparator);
    3726  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3727  {
    3728  size_t indexToRemove = it - vector.begin();
    3729  VmaVectorRemove(vector, indexToRemove);
    3730  return true;
    3731  }
    3732  return false;
    3733 }
    3734 
    3735 template<typename CmpLess, typename IterT, typename KeyT>
    3736 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3737 {
    3738  CmpLess comparator;
    3739  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3740  beg, end, value, comparator);
    3741  if(it == end ||
    3742  (!comparator(*it, value) && !comparator(value, *it)))
    3743  {
    3744  return it;
    3745  }
    3746  return end;
    3747 }
    3748 
    3750 // class VmaPoolAllocator
    3751 
    3752 /*
    3753 Allocator for objects of type T using a list of arrays (pools) to speed up
    3754 allocation. Number of elements that can be allocated is not bounded because
    3755 allocator can create multiple blocks.
    3756 */
    3757 template<typename T>
    3758 class VmaPoolAllocator
    3759 {
    3760  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3761 public:
    3762  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3763  ~VmaPoolAllocator();
    3764  void Clear();
    3765  T* Alloc();
    3766  void Free(T* ptr);
    3767 
    3768 private:
    3769  union Item
    3770  {
    3771  uint32_t NextFreeIndex;
    3772  T Value;
    3773  };
    3774 
    3775  struct ItemBlock
    3776  {
    3777  Item* pItems;
    3778  uint32_t FirstFreeIndex;
    3779  };
    3780 
    3781  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3782  size_t m_ItemsPerBlock;
    3783  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3784 
    3785  ItemBlock& CreateNewBlock();
    3786 };
    3787 
    3788 template<typename T>
    3789 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3790  m_pAllocationCallbacks(pAllocationCallbacks),
    3791  m_ItemsPerBlock(itemsPerBlock),
    3792  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3793 {
    3794  VMA_ASSERT(itemsPerBlock > 0);
    3795 }
    3796 
    3797 template<typename T>
    3798 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3799 {
    3800  Clear();
    3801 }
    3802 
    3803 template<typename T>
    3804 void VmaPoolAllocator<T>::Clear()
    3805 {
    3806  for(size_t i = m_ItemBlocks.size(); i--; )
    3807  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3808  m_ItemBlocks.clear();
    3809 }
    3810 
    3811 template<typename T>
    3812 T* VmaPoolAllocator<T>::Alloc()
    3813 {
    3814  for(size_t i = m_ItemBlocks.size(); i--; )
    3815  {
    3816  ItemBlock& block = m_ItemBlocks[i];
    3817  // This block has some free items: Use first one.
    3818  if(block.FirstFreeIndex != UINT32_MAX)
    3819  {
    3820  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3821  block.FirstFreeIndex = pItem->NextFreeIndex;
    3822  return &pItem->Value;
    3823  }
    3824  }
    3825 
    3826  // No block has free item: Create new one and use it.
    3827  ItemBlock& newBlock = CreateNewBlock();
    3828  Item* const pItem = &newBlock.pItems[0];
    3829  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3830  return &pItem->Value;
    3831 }
    3832 
    3833 template<typename T>
    3834 void VmaPoolAllocator<T>::Free(T* ptr)
    3835 {
    3836  // Search all memory blocks to find ptr.
    3837  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3838  {
    3839  ItemBlock& block = m_ItemBlocks[i];
    3840 
    3841  // Casting to union.
    3842  Item* pItemPtr;
    3843  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3844 
    3845  // Check if pItemPtr is in address range of this block.
    3846  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3847  {
    3848  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3849  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3850  block.FirstFreeIndex = index;
    3851  return;
    3852  }
    3853  }
    3854  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3855 }
    3856 
    3857 template<typename T>
    3858 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3859 {
    3860  ItemBlock newBlock = {
    3861  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3862 
    3863  m_ItemBlocks.push_back(newBlock);
    3864 
    3865  // Setup singly-linked list of all free items in this block.
    3866  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3867  newBlock.pItems[i].NextFreeIndex = i + 1;
    3868  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3869  return m_ItemBlocks.back();
    3870 }
    3871 
    3873 // class VmaRawList, VmaList
    3874 
    3875 #if VMA_USE_STL_LIST
    3876 
    3877 #define VmaList std::list
    3878 
    3879 #else // #if VMA_USE_STL_LIST
    3880 
    3881 template<typename T>
    3882 struct VmaListItem
    3883 {
    3884  VmaListItem* pPrev;
    3885  VmaListItem* pNext;
    3886  T Value;
    3887 };
    3888 
    3889 // Doubly linked list.
    3890 template<typename T>
    3891 class VmaRawList
    3892 {
    3893  VMA_CLASS_NO_COPY(VmaRawList)
    3894 public:
    3895  typedef VmaListItem<T> ItemType;
    3896 
    3897  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3898  ~VmaRawList();
    3899  void Clear();
    3900 
    3901  size_t GetCount() const { return m_Count; }
    3902  bool IsEmpty() const { return m_Count == 0; }
    3903 
    3904  ItemType* Front() { return m_pFront; }
    3905  const ItemType* Front() const { return m_pFront; }
    3906  ItemType* Back() { return m_pBack; }
    3907  const ItemType* Back() const { return m_pBack; }
    3908 
    3909  ItemType* PushBack();
    3910  ItemType* PushFront();
    3911  ItemType* PushBack(const T& value);
    3912  ItemType* PushFront(const T& value);
    3913  void PopBack();
    3914  void PopFront();
    3915 
    3916  // Item can be null - it means PushBack.
    3917  ItemType* InsertBefore(ItemType* pItem);
    3918  // Item can be null - it means PushFront.
    3919  ItemType* InsertAfter(ItemType* pItem);
    3920 
    3921  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3922  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3923 
    3924  void Remove(ItemType* pItem);
    3925 
    3926 private:
    3927  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3928  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3929  ItemType* m_pFront;
    3930  ItemType* m_pBack;
    3931  size_t m_Count;
    3932 };
    3933 
    3934 template<typename T>
    3935 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3936  m_pAllocationCallbacks(pAllocationCallbacks),
    3937  m_ItemAllocator(pAllocationCallbacks, 128),
    3938  m_pFront(VMA_NULL),
    3939  m_pBack(VMA_NULL),
    3940  m_Count(0)
    3941 {
    3942 }
    3943 
    3944 template<typename T>
    3945 VmaRawList<T>::~VmaRawList()
    3946 {
    3947  // Intentionally not calling Clear, because that would be unnecessary
    3948  // computations to return all items to m_ItemAllocator as free.
    3949 }
    3950 
    3951 template<typename T>
    3952 void VmaRawList<T>::Clear()
    3953 {
    3954  if(IsEmpty() == false)
    3955  {
    3956  ItemType* pItem = m_pBack;
    3957  while(pItem != VMA_NULL)
    3958  {
    3959  ItemType* const pPrevItem = pItem->pPrev;
    3960  m_ItemAllocator.Free(pItem);
    3961  pItem = pPrevItem;
    3962  }
    3963  m_pFront = VMA_NULL;
    3964  m_pBack = VMA_NULL;
    3965  m_Count = 0;
    3966  }
    3967 }
    3968 
    3969 template<typename T>
    3970 VmaListItem<T>* VmaRawList<T>::PushBack()
    3971 {
    3972  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3973  pNewItem->pNext = VMA_NULL;
    3974  if(IsEmpty())
    3975  {
    3976  pNewItem->pPrev = VMA_NULL;
    3977  m_pFront = pNewItem;
    3978  m_pBack = pNewItem;
    3979  m_Count = 1;
    3980  }
    3981  else
    3982  {
    3983  pNewItem->pPrev = m_pBack;
    3984  m_pBack->pNext = pNewItem;
    3985  m_pBack = pNewItem;
    3986  ++m_Count;
    3987  }
    3988  return pNewItem;
    3989 }
    3990 
    3991 template<typename T>
    3992 VmaListItem<T>* VmaRawList<T>::PushFront()
    3993 {
    3994  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3995  pNewItem->pPrev = VMA_NULL;
    3996  if(IsEmpty())
    3997  {
    3998  pNewItem->pNext = VMA_NULL;
    3999  m_pFront = pNewItem;
    4000  m_pBack = pNewItem;
    4001  m_Count = 1;
    4002  }
    4003  else
    4004  {
    4005  pNewItem->pNext = m_pFront;
    4006  m_pFront->pPrev = pNewItem;
    4007  m_pFront = pNewItem;
    4008  ++m_Count;
    4009  }
    4010  return pNewItem;
    4011 }
    4012 
    4013 template<typename T>
    4014 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4015 {
    4016  ItemType* const pNewItem = PushBack();
    4017  pNewItem->Value = value;
    4018  return pNewItem;
    4019 }
    4020 
    4021 template<typename T>
    4022 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4023 {
    4024  ItemType* const pNewItem = PushFront();
    4025  pNewItem->Value = value;
    4026  return pNewItem;
    4027 }
    4028 
    4029 template<typename T>
    4030 void VmaRawList<T>::PopBack()
    4031 {
    4032  VMA_HEAVY_ASSERT(m_Count > 0);
    4033  ItemType* const pBackItem = m_pBack;
    4034  ItemType* const pPrevItem = pBackItem->pPrev;
    4035  if(pPrevItem != VMA_NULL)
    4036  {
    4037  pPrevItem->pNext = VMA_NULL;
    4038  }
    4039  m_pBack = pPrevItem;
    4040  m_ItemAllocator.Free(pBackItem);
    4041  --m_Count;
    4042 }
    4043 
    4044 template<typename T>
    4045 void VmaRawList<T>::PopFront()
    4046 {
    4047  VMA_HEAVY_ASSERT(m_Count > 0);
    4048  ItemType* const pFrontItem = m_pFront;
    4049  ItemType* const pNextItem = pFrontItem->pNext;
    4050  if(pNextItem != VMA_NULL)
    4051  {
    4052  pNextItem->pPrev = VMA_NULL;
    4053  }
    4054  m_pFront = pNextItem;
    4055  m_ItemAllocator.Free(pFrontItem);
    4056  --m_Count;
    4057 }
    4058 
    4059 template<typename T>
    4060 void VmaRawList<T>::Remove(ItemType* pItem)
    4061 {
    4062  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4063  VMA_HEAVY_ASSERT(m_Count > 0);
    4064 
    4065  if(pItem->pPrev != VMA_NULL)
    4066  {
    4067  pItem->pPrev->pNext = pItem->pNext;
    4068  }
    4069  else
    4070  {
    4071  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4072  m_pFront = pItem->pNext;
    4073  }
    4074 
    4075  if(pItem->pNext != VMA_NULL)
    4076  {
    4077  pItem->pNext->pPrev = pItem->pPrev;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = pItem->pPrev;
    4083  }
    4084 
    4085  m_ItemAllocator.Free(pItem);
    4086  --m_Count;
    4087 }
    4088 
    4089 template<typename T>
    4090 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4091 {
    4092  if(pItem != VMA_NULL)
    4093  {
    4094  ItemType* const prevItem = pItem->pPrev;
    4095  ItemType* const newItem = m_ItemAllocator.Alloc();
    4096  newItem->pPrev = prevItem;
    4097  newItem->pNext = pItem;
    4098  pItem->pPrev = newItem;
    4099  if(prevItem != VMA_NULL)
    4100  {
    4101  prevItem->pNext = newItem;
    4102  }
    4103  else
    4104  {
    4105  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4106  m_pFront = newItem;
    4107  }
    4108  ++m_Count;
    4109  return newItem;
    4110  }
    4111  else
    4112  return PushBack();
    4113 }
    4114 
    4115 template<typename T>
    4116 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4117 {
    4118  if(pItem != VMA_NULL)
    4119  {
    4120  ItemType* const nextItem = pItem->pNext;
    4121  ItemType* const newItem = m_ItemAllocator.Alloc();
    4122  newItem->pNext = nextItem;
    4123  newItem->pPrev = pItem;
    4124  pItem->pNext = newItem;
    4125  if(nextItem != VMA_NULL)
    4126  {
    4127  nextItem->pPrev = newItem;
    4128  }
    4129  else
    4130  {
    4131  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4132  m_pBack = newItem;
    4133  }
    4134  ++m_Count;
    4135  return newItem;
    4136  }
    4137  else
    4138  return PushFront();
    4139 }
    4140 
    4141 template<typename T>
    4142 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4143 {
    4144  ItemType* const newItem = InsertBefore(pItem);
    4145  newItem->Value = value;
    4146  return newItem;
    4147 }
    4148 
    4149 template<typename T>
    4150 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4151 {
    4152  ItemType* const newItem = InsertAfter(pItem);
    4153  newItem->Value = value;
    4154  return newItem;
    4155 }
    4156 
    4157 template<typename T, typename AllocatorT>
    4158 class VmaList
    4159 {
    4160  VMA_CLASS_NO_COPY(VmaList)
    4161 public:
    4162  class iterator
    4163  {
    4164  public:
    4165  iterator() :
    4166  m_pList(VMA_NULL),
    4167  m_pItem(VMA_NULL)
    4168  {
    4169  }
    4170 
    4171  T& operator*() const
    4172  {
    4173  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4174  return m_pItem->Value;
    4175  }
    4176  T* operator->() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return &m_pItem->Value;
    4180  }
    4181 
    4182  iterator& operator++()
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  m_pItem = m_pItem->pNext;
    4186  return *this;
    4187  }
    4188  iterator& operator--()
    4189  {
    4190  if(m_pItem != VMA_NULL)
    4191  {
    4192  m_pItem = m_pItem->pPrev;
    4193  }
    4194  else
    4195  {
    4196  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4197  m_pItem = m_pList->Back();
    4198  }
    4199  return *this;
    4200  }
    4201 
    4202  iterator operator++(int)
    4203  {
    4204  iterator result = *this;
    4205  ++*this;
    4206  return result;
    4207  }
    4208  iterator operator--(int)
    4209  {
    4210  iterator result = *this;
    4211  --*this;
    4212  return result;
    4213  }
    4214 
    4215  bool operator==(const iterator& rhs) const
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4218  return m_pItem == rhs.m_pItem;
    4219  }
    4220  bool operator!=(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem != rhs.m_pItem;
    4224  }
    4225 
    4226  private:
    4227  VmaRawList<T>* m_pList;
    4228  VmaListItem<T>* m_pItem;
    4229 
    4230  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4231  m_pList(pList),
    4232  m_pItem(pItem)
    4233  {
    4234  }
    4235 
    4236  friend class VmaList<T, AllocatorT>;
    4237  };
    4238 
    4239  class const_iterator
    4240  {
    4241  public:
    4242  const_iterator() :
    4243  m_pList(VMA_NULL),
    4244  m_pItem(VMA_NULL)
    4245  {
    4246  }
    4247 
    4248  const_iterator(const iterator& src) :
    4249  m_pList(src.m_pList),
    4250  m_pItem(src.m_pItem)
    4251  {
    4252  }
    4253 
    4254  const T& operator*() const
    4255  {
    4256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4257  return m_pItem->Value;
    4258  }
    4259  const T* operator->() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return &m_pItem->Value;
    4263  }
    4264 
    4265  const_iterator& operator++()
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  m_pItem = m_pItem->pNext;
    4269  return *this;
    4270  }
    4271  const_iterator& operator--()
    4272  {
    4273  if(m_pItem != VMA_NULL)
    4274  {
    4275  m_pItem = m_pItem->pPrev;
    4276  }
    4277  else
    4278  {
    4279  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4280  m_pItem = m_pList->Back();
    4281  }
    4282  return *this;
    4283  }
    4284 
    4285  const_iterator operator++(int)
    4286  {
    4287  const_iterator result = *this;
    4288  ++*this;
    4289  return result;
    4290  }
    4291  const_iterator operator--(int)
    4292  {
    4293  const_iterator result = *this;
    4294  --*this;
    4295  return result;
    4296  }
    4297 
    4298  bool operator==(const const_iterator& rhs) const
    4299  {
    4300  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4301  return m_pItem == rhs.m_pItem;
    4302  }
    4303  bool operator!=(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem != rhs.m_pItem;
    4307  }
    4308 
    4309  private:
    4310  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4311  m_pList(pList),
    4312  m_pItem(pItem)
    4313  {
    4314  }
    4315 
    4316  const VmaRawList<T>* m_pList;
    4317  const VmaListItem<T>* m_pItem;
    4318 
    4319  friend class VmaList<T, AllocatorT>;
    4320  };
    4321 
    4322  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4323 
    4324  bool empty() const { return m_RawList.IsEmpty(); }
    4325  size_t size() const { return m_RawList.GetCount(); }
    4326 
    4327  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4328  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4329 
    4330  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4331  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4332 
    4333  void clear() { m_RawList.Clear(); }
    4334  void push_back(const T& value) { m_RawList.PushBack(value); }
    4335  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4336  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4337 
    4338 private:
    4339  VmaRawList<T> m_RawList;
    4340 };
    4341 
    4342 #endif // #if VMA_USE_STL_LIST
    4343 
    4345 // class VmaMap
    4346 
    4347 // Unused in this version.
    4348 #if 0
    4349 
    4350 #if VMA_USE_STL_UNORDERED_MAP
    4351 
    4352 #define VmaPair std::pair
    4353 
    4354 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4355  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4356 
    4357 #else // #if VMA_USE_STL_UNORDERED_MAP
    4358 
    4359 template<typename T1, typename T2>
    4360 struct VmaPair
    4361 {
    4362  T1 first;
    4363  T2 second;
    4364 
    4365  VmaPair() : first(), second() { }
    4366  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4367 };
    4368 
    4369 /* Class compatible with subset of interface of std::unordered_map.
    4370 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4371 */
    4372 template<typename KeyT, typename ValueT>
    4373 class VmaMap
    4374 {
    4375 public:
    4376  typedef VmaPair<KeyT, ValueT> PairType;
    4377  typedef PairType* iterator;
    4378 
    4379  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4380 
    4381  iterator begin() { return m_Vector.begin(); }
    4382  iterator end() { return m_Vector.end(); }
    4383 
    4384  void insert(const PairType& pair);
    4385  iterator find(const KeyT& key);
    4386  void erase(iterator it);
    4387 
    4388 private:
    4389  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4390 };
    4391 
    4392 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4393 
    4394 template<typename FirstT, typename SecondT>
    4395 struct VmaPairFirstLess
    4396 {
    4397  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4398  {
    4399  return lhs.first < rhs.first;
    4400  }
    4401  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4402  {
    4403  return lhs.first < rhsFirst;
    4404  }
    4405 };
    4406 
    4407 template<typename KeyT, typename ValueT>
    4408 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4409 {
    4410  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4411  m_Vector.data(),
    4412  m_Vector.data() + m_Vector.size(),
    4413  pair,
    4414  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4415  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4416 }
    4417 
    4418 template<typename KeyT, typename ValueT>
    4419 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4420 {
    4421  PairType* it = VmaBinaryFindFirstNotLess(
    4422  m_Vector.data(),
    4423  m_Vector.data() + m_Vector.size(),
    4424  key,
    4425  VmaPairFirstLess<KeyT, ValueT>());
    4426  if((it != m_Vector.end()) && (it->first == key))
    4427  {
    4428  return it;
    4429  }
    4430  else
    4431  {
    4432  return m_Vector.end();
    4433  }
    4434 }
    4435 
    4436 template<typename KeyT, typename ValueT>
    4437 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4438 {
    4439  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4440 }
    4441 
    4442 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4443 
    4444 #endif // #if 0
    4445 
    4447 
    4448 class VmaDeviceMemoryBlock;
    4449 
    4450 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4451 
    4452 struct VmaAllocation_T
    4453 {
    4454  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4455 private:
    4456  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4457 
    4458  enum FLAGS
    4459  {
    4460  FLAG_USER_DATA_STRING = 0x01,
    4461  };
    4462 
    4463 public:
    4464  enum ALLOCATION_TYPE
    4465  {
    4466  ALLOCATION_TYPE_NONE,
    4467  ALLOCATION_TYPE_BLOCK,
    4468  ALLOCATION_TYPE_DEDICATED,
    4469  };
    4470 
    4471  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4472  m_Alignment(1),
    4473  m_Size(0),
    4474  m_pUserData(VMA_NULL),
    4475  m_LastUseFrameIndex(currentFrameIndex),
    4476  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4477  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4478  m_MapCount(0),
    4479  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4480  {
    4481 #if VMA_STATS_STRING_ENABLED
    4482  m_CreationFrameIndex = currentFrameIndex;
    4483  m_BufferImageUsage = 0;
    4484 #endif
    4485  }
    4486 
    4487  ~VmaAllocation_T()
    4488  {
    4489  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4490 
    4491  // Check if owned string was freed.
    4492  VMA_ASSERT(m_pUserData == VMA_NULL);
    4493  }
    4494 
    4495  void InitBlockAllocation(
    4496  VmaPool hPool,
    4497  VmaDeviceMemoryBlock* block,
    4498  VkDeviceSize offset,
    4499  VkDeviceSize alignment,
    4500  VkDeviceSize size,
    4501  VmaSuballocationType suballocationType,
    4502  bool mapped,
    4503  bool canBecomeLost)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(block != VMA_NULL);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4508  m_Alignment = alignment;
    4509  m_Size = size;
    4510  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4511  m_SuballocationType = (uint8_t)suballocationType;
    4512  m_BlockAllocation.m_hPool = hPool;
    4513  m_BlockAllocation.m_Block = block;
    4514  m_BlockAllocation.m_Offset = offset;
    4515  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4516  }
    4517 
    4518  void InitLost()
    4519  {
    4520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4521  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4523  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4524  m_BlockAllocation.m_Block = VMA_NULL;
    4525  m_BlockAllocation.m_Offset = 0;
    4526  m_BlockAllocation.m_CanBecomeLost = true;
    4527  }
    4528 
    4529  void ChangeBlockAllocation(
    4530  VmaAllocator hAllocator,
    4531  VmaDeviceMemoryBlock* block,
    4532  VkDeviceSize offset);
    4533 
    4534  void ChangeSize(VkDeviceSize newSize);
    4535 
    4536  // pMappedData not null means allocation is created with MAPPED flag.
    4537  void InitDedicatedAllocation(
    4538  uint32_t memoryTypeIndex,
    4539  VkDeviceMemory hMemory,
    4540  VmaSuballocationType suballocationType,
    4541  void* pMappedData,
    4542  VkDeviceSize size)
    4543  {
    4544  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4545  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4546  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4547  m_Alignment = 0;
    4548  m_Size = size;
    4549  m_SuballocationType = (uint8_t)suballocationType;
    4550  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4551  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4552  m_DedicatedAllocation.m_hMemory = hMemory;
    4553  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4554  }
    4555 
    4556  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4557  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4558  VkDeviceSize GetSize() const { return m_Size; }
    4559  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4560  void* GetUserData() const { return m_pUserData; }
    4561  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4562  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4563 
    4564  VmaDeviceMemoryBlock* GetBlock() const
    4565  {
    4566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4567  return m_BlockAllocation.m_Block;
    4568  }
    4569  VkDeviceSize GetOffset() const;
    4570  VkDeviceMemory GetMemory() const;
    4571  uint32_t GetMemoryTypeIndex() const;
    4572  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4573  void* GetMappedData() const;
    4574  bool CanBecomeLost() const;
    4575  VmaPool GetPool() const;
    4576 
    4577  uint32_t GetLastUseFrameIndex() const
    4578  {
    4579  return m_LastUseFrameIndex.load();
    4580  }
    4581  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4582  {
    4583  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4584  }
    4585  /*
    4586  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4587  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4588  - Else, returns false.
    4589 
    4590  If hAllocation is already lost, assert - you should not call it then.
    4591  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4592  */
    4593  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4594 
    4595  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4596  {
    4597  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4598  outInfo.blockCount = 1;
    4599  outInfo.allocationCount = 1;
    4600  outInfo.unusedRangeCount = 0;
    4601  outInfo.usedBytes = m_Size;
    4602  outInfo.unusedBytes = 0;
    4603  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4604  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4605  outInfo.unusedRangeSizeMax = 0;
    4606  }
    4607 
    4608  void BlockAllocMap();
    4609  void BlockAllocUnmap();
    4610  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4611  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4612 
    4613 #if VMA_STATS_STRING_ENABLED
    4614  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4615  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4616 
    4617  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4618  {
    4619  VMA_ASSERT(m_BufferImageUsage == 0);
    4620  m_BufferImageUsage = bufferImageUsage;
    4621  }
    4622 
    4623  void PrintParameters(class VmaJsonWriter& json) const;
    4624 #endif
    4625 
    4626 private:
    4627  VkDeviceSize m_Alignment;
    4628  VkDeviceSize m_Size;
    4629  void* m_pUserData;
    4630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4631  uint8_t m_Type; // ALLOCATION_TYPE
    4632  uint8_t m_SuballocationType; // VmaSuballocationType
    4633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4635  uint8_t m_MapCount;
    4636  uint8_t m_Flags; // enum FLAGS
    4637 
    4638  // Allocation out of VmaDeviceMemoryBlock.
    4639  struct BlockAllocation
    4640  {
    4641  VmaPool m_hPool; // Null if belongs to general memory.
    4642  VmaDeviceMemoryBlock* m_Block;
    4643  VkDeviceSize m_Offset;
    4644  bool m_CanBecomeLost;
    4645  };
    4646 
    4647  // Allocation for an object that has its own private VkDeviceMemory.
    4648  struct DedicatedAllocation
    4649  {
    4650  uint32_t m_MemoryTypeIndex;
    4651  VkDeviceMemory m_hMemory;
    4652  void* m_pMappedData; // Not null means memory is mapped.
    4653  };
    4654 
    4655  union
    4656  {
    4657  // Allocation out of VmaDeviceMemoryBlock.
    4658  BlockAllocation m_BlockAllocation;
    4659  // Allocation for an object that has its own private VkDeviceMemory.
    4660  DedicatedAllocation m_DedicatedAllocation;
    4661  };
    4662 
    4663 #if VMA_STATS_STRING_ENABLED
    4664  uint32_t m_CreationFrameIndex;
    4665  uint32_t m_BufferImageUsage; // 0 if unknown.
    4666 #endif
    4667 
    4668  void FreeUserDataString(VmaAllocator hAllocator);
    4669 };
    4670 
    4671 /*
    4672 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4673 allocated memory block or free.
    4674 */
    4675 struct VmaSuballocation
    4676 {
    4677  VkDeviceSize offset;
    4678  VkDeviceSize size;
    4679  VmaAllocation hAllocation;
    4680  VmaSuballocationType type;
    4681 };
    4682 
    4683 // Comparator for offsets.
    4684 struct VmaSuballocationOffsetLess
    4685 {
    4686  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4687  {
    4688  return lhs.offset < rhs.offset;
    4689  }
    4690 };
    4691 struct VmaSuballocationOffsetGreater
    4692 {
    4693  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4694  {
    4695  return lhs.offset > rhs.offset;
    4696  }
    4697 };
    4698 
    4699 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4700 
    4701 // Cost of one additional allocation lost, as equivalent in bytes.
    4702 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4703 
    4704 /*
    4705 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4706 
    4707 If canMakeOtherLost was false:
    4708 - item points to a FREE suballocation.
    4709 - itemsToMakeLostCount is 0.
    4710 
    4711 If canMakeOtherLost was true:
    4712 - item points to first of sequence of suballocations, which are either FREE,
    4713  or point to VmaAllocations that can become lost.
    4714 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4715  the requested allocation to succeed.
    4716 */
    4717 struct VmaAllocationRequest
    4718 {
    4719  VkDeviceSize offset;
    4720  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4721  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4722  VmaSuballocationList::iterator item;
    4723  size_t itemsToMakeLostCount;
    4724  void* customData;
    4725 
    4726  VkDeviceSize CalcCost() const
    4727  {
    4728  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4729  }
    4730 };
    4731 
    4732 /*
    4733 Data structure used for bookkeeping of allocations and unused ranges of memory
    4734 in a single VkDeviceMemory block.
    4735 */
    4736 class VmaBlockMetadata
    4737 {
    4738 public:
    4739  VmaBlockMetadata(VmaAllocator hAllocator);
    4740  virtual ~VmaBlockMetadata() { }
    4741  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4742 
    4743  // Validates all data structures inside this object. If not valid, returns false.
    4744  virtual bool Validate() const = 0;
    4745  VkDeviceSize GetSize() const { return m_Size; }
    4746  virtual size_t GetAllocationCount() const = 0;
    4747  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4748  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4749  // Returns true if this block is empty - contains only single free suballocation.
    4750  virtual bool IsEmpty() const = 0;
    4751 
    4752  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4753  // Shouldn't modify blockCount.
    4754  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4755 
    4756 #if VMA_STATS_STRING_ENABLED
    4757  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4758 #endif
    4759 
    4760  // Tries to find a place for suballocation with given parameters inside this block.
    4761  // If succeeded, fills pAllocationRequest and returns true.
    4762  // If failed, returns false.
    4763  virtual bool CreateAllocationRequest(
    4764  uint32_t currentFrameIndex,
    4765  uint32_t frameInUseCount,
    4766  VkDeviceSize bufferImageGranularity,
    4767  VkDeviceSize allocSize,
    4768  VkDeviceSize allocAlignment,
    4769  bool upperAddress,
    4770  VmaSuballocationType allocType,
    4771  bool canMakeOtherLost,
    4772  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4773  VmaAllocationRequest* pAllocationRequest) = 0;
    4774 
    4775  virtual bool MakeRequestedAllocationsLost(
    4776  uint32_t currentFrameIndex,
    4777  uint32_t frameInUseCount,
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4781 
    4782  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4783 
    4784  // Makes actual allocation based on request. Request must already be checked and valid.
    4785  virtual void Alloc(
    4786  const VmaAllocationRequest& request,
    4787  VmaSuballocationType type,
    4788  VkDeviceSize allocSize,
    4789  bool upperAddress,
    4790  VmaAllocation hAllocation) = 0;
    4791 
    4792  // Frees suballocation assigned to given memory region.
    4793  virtual void Free(const VmaAllocation allocation) = 0;
    4794  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4795 
    4796  // Tries to resize (grow or shrink) space for given allocation, in place.
    4797  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4798 
    4799 protected:
    4800  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4801 
    4802 #if VMA_STATS_STRING_ENABLED
    4803  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4804  VkDeviceSize unusedBytes,
    4805  size_t allocationCount,
    4806  size_t unusedRangeCount) const;
    4807  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4808  VkDeviceSize offset,
    4809  VmaAllocation hAllocation) const;
    4810  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4811  VkDeviceSize offset,
    4812  VkDeviceSize size) const;
    4813  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4814 #endif
    4815 
    4816 private:
    4817  VkDeviceSize m_Size;
    4818  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4819 };
    4820 
    4821 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4822  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4823  return false; \
    4824  } } while(false)
    4825 
    4826 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4827 {
    4828  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4829 public:
    4830  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4831  virtual ~VmaBlockMetadata_Generic();
    4832  virtual void Init(VkDeviceSize size);
    4833 
    4834  virtual bool Validate() const;
    4835  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4836  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4837  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4838  virtual bool IsEmpty() const;
    4839 
    4840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4841  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4842 
    4843 #if VMA_STATS_STRING_ENABLED
    4844  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4845 #endif
    4846 
    4847  virtual bool CreateAllocationRequest(
    4848  uint32_t currentFrameIndex,
    4849  uint32_t frameInUseCount,
    4850  VkDeviceSize bufferImageGranularity,
    4851  VkDeviceSize allocSize,
    4852  VkDeviceSize allocAlignment,
    4853  bool upperAddress,
    4854  VmaSuballocationType allocType,
    4855  bool canMakeOtherLost,
    4856  uint32_t strategy,
    4857  VmaAllocationRequest* pAllocationRequest);
    4858 
    4859  virtual bool MakeRequestedAllocationsLost(
    4860  uint32_t currentFrameIndex,
    4861  uint32_t frameInUseCount,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4865 
    4866  virtual VkResult CheckCorruption(const void* pBlockData);
    4867 
    4868  virtual void Alloc(
    4869  const VmaAllocationRequest& request,
    4870  VmaSuballocationType type,
    4871  VkDeviceSize allocSize,
    4872  bool upperAddress,
    4873  VmaAllocation hAllocation);
    4874 
    4875  virtual void Free(const VmaAllocation allocation);
    4876  virtual void FreeAtOffset(VkDeviceSize offset);
    4877 
    4878  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4879 
    4880 private:
    4881  uint32_t m_FreeCount;
    4882  VkDeviceSize m_SumFreeSize;
    4883  VmaSuballocationList m_Suballocations;
    4884  // Suballocations that are free and have size greater than certain threshold.
    4885  // Sorted by size, ascending.
    4886  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4887 
    4888  bool ValidateFreeSuballocationList() const;
    4889 
    4890  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4891  // If yes, fills pOffset and returns true. If no, returns false.
    4892  bool CheckAllocation(
    4893  uint32_t currentFrameIndex,
    4894  uint32_t frameInUseCount,
    4895  VkDeviceSize bufferImageGranularity,
    4896  VkDeviceSize allocSize,
    4897  VkDeviceSize allocAlignment,
    4898  VmaSuballocationType allocType,
    4899  VmaSuballocationList::const_iterator suballocItem,
    4900  bool canMakeOtherLost,
    4901  VkDeviceSize* pOffset,
    4902  size_t* itemsToMakeLostCount,
    4903  VkDeviceSize* pSumFreeSize,
    4904  VkDeviceSize* pSumItemSize) const;
    4905  // Given free suballocation, it merges it with following one, which must also be free.
    4906  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4907  // Releases given suballocation, making it free.
    4908  // Merges it with adjacent free suballocations if applicable.
    4909  // Returns iterator to new free suballocation at this place.
    4910  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4911  // Given free suballocation, it inserts it into sorted list of
    4912  // m_FreeSuballocationsBySize if it's suitable.
    4913  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4914  // Given free suballocation, it removes it from sorted list of
    4915  // m_FreeSuballocationsBySize if it's suitable.
    4916  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4917 };
    4918 
    4919 /*
    4920 Allocations and their references in internal data structure look like this:
    4921 
    4922 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4923 
    4924  0 +-------+
    4925  | |
    4926  | |
    4927  | |
    4928  +-------+
    4929  | Alloc | 1st[m_1stNullItemsBeginCount]
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4932  +-------+
    4933  | ... |
    4934  +-------+
    4935  | Alloc | 1st[1st.size() - 1]
    4936  +-------+
    4937  | |
    4938  | |
    4939  | |
    4940 GetSize() +-------+
    4941 
    4942 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4943 
    4944  0 +-------+
    4945  | Alloc | 2nd[0]
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | ... |
    4950  +-------+
    4951  | Alloc | 2nd[2nd.size() - 1]
    4952  +-------+
    4953  | |
    4954  | |
    4955  | |
    4956  +-------+
    4957  | Alloc | 1st[m_1stNullItemsBeginCount]
    4958  +-------+
    4959  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4960  +-------+
    4961  | ... |
    4962  +-------+
    4963  | Alloc | 1st[1st.size() - 1]
    4964  +-------+
    4965  | |
    4966 GetSize() +-------+
    4967 
    4968 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4969 
    4970  0 +-------+
    4971  | |
    4972  | |
    4973  | |
    4974  +-------+
    4975  | Alloc | 1st[m_1stNullItemsBeginCount]
    4976  +-------+
    4977  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4978  +-------+
    4979  | ... |
    4980  +-------+
    4981  | Alloc | 1st[1st.size() - 1]
    4982  +-------+
    4983  | |
    4984  | |
    4985  | |
    4986  +-------+
    4987  | Alloc | 2nd[2nd.size() - 1]
    4988  +-------+
    4989  | ... |
    4990  +-------+
    4991  | Alloc | 2nd[1]
    4992  +-------+
    4993  | Alloc | 2nd[0]
    4994 GetSize() +-------+
    4995 
    4996 */
    4997 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4998 {
    4999  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5000 public:
    5001  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5002  virtual ~VmaBlockMetadata_Linear();
    5003  virtual void Init(VkDeviceSize size);
    5004 
    5005  virtual bool Validate() const;
    5006  virtual size_t GetAllocationCount() const;
    5007  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5008  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5009  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5010 
    5011  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5012  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5013 
    5014 #if VMA_STATS_STRING_ENABLED
    5015  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5016 #endif
    5017 
    5018  virtual bool CreateAllocationRequest(
    5019  uint32_t currentFrameIndex,
    5020  uint32_t frameInUseCount,
    5021  VkDeviceSize bufferImageGranularity,
    5022  VkDeviceSize allocSize,
    5023  VkDeviceSize allocAlignment,
    5024  bool upperAddress,
    5025  VmaSuballocationType allocType,
    5026  bool canMakeOtherLost,
    5027  uint32_t strategy,
    5028  VmaAllocationRequest* pAllocationRequest);
    5029 
    5030  virtual bool MakeRequestedAllocationsLost(
    5031  uint32_t currentFrameIndex,
    5032  uint32_t frameInUseCount,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5036 
    5037  virtual VkResult CheckCorruption(const void* pBlockData);
    5038 
    5039  virtual void Alloc(
    5040  const VmaAllocationRequest& request,
    5041  VmaSuballocationType type,
    5042  VkDeviceSize allocSize,
    5043  bool upperAddress,
    5044  VmaAllocation hAllocation);
    5045 
    5046  virtual void Free(const VmaAllocation allocation);
    5047  virtual void FreeAtOffset(VkDeviceSize offset);
    5048 
    5049 private:
    5050  /*
    5051  There are two suballocation vectors, used in ping-pong way.
    5052  The one with index m_1stVectorIndex is called 1st.
    5053  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5054  2nd can be non-empty only when 1st is not empty.
    5055  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5056  */
    5057  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5058 
    5059  enum SECOND_VECTOR_MODE
    5060  {
    5061  SECOND_VECTOR_EMPTY,
    5062  /*
    5063  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5064  all have smaller offset.
    5065  */
    5066  SECOND_VECTOR_RING_BUFFER,
    5067  /*
    5068  Suballocations in 2nd vector are upper side of double stack.
    5069  They all have offsets higher than those in 1st vector.
    5070  Top of this stack means smaller offsets, but higher indices in this vector.
    5071  */
    5072  SECOND_VECTOR_DOUBLE_STACK,
    5073  };
    5074 
    5075  VkDeviceSize m_SumFreeSize;
    5076  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5077  uint32_t m_1stVectorIndex;
    5078  SECOND_VECTOR_MODE m_2ndVectorMode;
    5079 
    5080  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5081  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5082  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5083  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5084 
    5085  // Number of items in 1st vector with hAllocation = null at the beginning.
    5086  size_t m_1stNullItemsBeginCount;
    5087  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5088  size_t m_1stNullItemsMiddleCount;
    5089  // Number of items in 2nd vector with hAllocation = null.
    5090  size_t m_2ndNullItemsCount;
    5091 
    5092  bool ShouldCompact1st() const;
    5093  void CleanupAfterFree();
    5094 };
    5095 
    5096 /*
    5097 - GetSize() is the original size of allocated memory block.
    5098 - m_UsableSize is this size aligned down to a power of two.
    5099  All allocations and calculations happen relative to m_UsableSize.
    5100 - GetUnusableSize() is the difference between them.
    5101  It is repoted as separate, unused range, not available for allocations.
    5102 
    5103 Node at level 0 has size = m_UsableSize.
    5104 Each next level contains nodes with size 2 times smaller than current level.
    5105 m_LevelCount is the maximum number of levels to use in the current object.
    5106 */
    5107 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5108 {
    5109  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5110 public:
    5111  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5112  virtual ~VmaBlockMetadata_Buddy();
    5113  virtual void Init(VkDeviceSize size);
    5114 
    5115  virtual bool Validate() const;
    5116  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5117  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5118  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5119  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5120 
    5121  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5122  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5123 
    5124 #if VMA_STATS_STRING_ENABLED
    5125  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5126 #endif
    5127 
    5128  virtual bool CreateAllocationRequest(
    5129  uint32_t currentFrameIndex,
    5130  uint32_t frameInUseCount,
    5131  VkDeviceSize bufferImageGranularity,
    5132  VkDeviceSize allocSize,
    5133  VkDeviceSize allocAlignment,
    5134  bool upperAddress,
    5135  VmaSuballocationType allocType,
    5136  bool canMakeOtherLost,
    5137  uint32_t strategy,
    5138  VmaAllocationRequest* pAllocationRequest);
    5139 
    5140  virtual bool MakeRequestedAllocationsLost(
    5141  uint32_t currentFrameIndex,
    5142  uint32_t frameInUseCount,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5146 
    5147  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5148 
    5149  virtual void Alloc(
    5150  const VmaAllocationRequest& request,
    5151  VmaSuballocationType type,
    5152  VkDeviceSize allocSize,
    5153  bool upperAddress,
    5154  VmaAllocation hAllocation);
    5155 
    5156  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5157  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5158 
    5159 private:
    5160  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5161  static const size_t MAX_LEVELS = 30;
    5162 
    5163  struct ValidationContext
    5164  {
    5165  size_t calculatedAllocationCount;
    5166  size_t calculatedFreeCount;
    5167  VkDeviceSize calculatedSumFreeSize;
    5168 
    5169  ValidationContext() :
    5170  calculatedAllocationCount(0),
    5171  calculatedFreeCount(0),
    5172  calculatedSumFreeSize(0) { }
    5173  };
    5174 
    5175  struct Node
    5176  {
    5177  VkDeviceSize offset;
    5178  enum TYPE
    5179  {
    5180  TYPE_FREE,
    5181  TYPE_ALLOCATION,
    5182  TYPE_SPLIT,
    5183  TYPE_COUNT
    5184  } type;
    5185  Node* parent;
    5186  Node* buddy;
    5187 
    5188  union
    5189  {
    5190  struct
    5191  {
    5192  Node* prev;
    5193  Node* next;
    5194  } free;
    5195  struct
    5196  {
    5197  VmaAllocation alloc;
    5198  } allocation;
    5199  struct
    5200  {
    5201  Node* leftChild;
    5202  } split;
    5203  };
    5204  };
    5205 
    5206  // Size of the memory block aligned down to a power of two.
    5207  VkDeviceSize m_UsableSize;
    5208  uint32_t m_LevelCount;
    5209 
    5210  Node* m_Root;
    5211  struct {
    5212  Node* front;
    5213  Node* back;
    5214  } m_FreeList[MAX_LEVELS];
    5215  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5216  size_t m_AllocationCount;
    5217  // Number of nodes in the tree with type == TYPE_FREE.
    5218  size_t m_FreeCount;
    5219  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5220  VkDeviceSize m_SumFreeSize;
    5221 
    5222  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5223  void DeleteNode(Node* node);
    5224  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5225  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5226  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5227  // Alloc passed just for validation. Can be null.
    5228  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5229  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5230  // Adds node to the front of FreeList at given level.
    5231  // node->type must be FREE.
    5232  // node->free.prev, next can be undefined.
    5233  void AddToFreeListFront(uint32_t level, Node* node);
    5234  // Removes node from FreeList at given level.
    5235  // node->type must be FREE.
    5236  // node->free.prev, next stay untouched.
    5237  void RemoveFromFreeList(uint32_t level, Node* node);
    5238 
    5239 #if VMA_STATS_STRING_ENABLED
    5240  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5241 #endif
    5242 };
    5243 
    5244 /*
    5245 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5246 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5247 
    5248 Thread-safety: This class must be externally synchronized.
    5249 */
    5250 class VmaDeviceMemoryBlock
    5251 {
    5252  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5253 public:
    5254  VmaBlockMetadata* m_pMetadata;
    5255 
    5256  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5257 
    5258  ~VmaDeviceMemoryBlock()
    5259  {
    5260  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5261  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5262  }
    5263 
    5264  // Always call after construction.
    5265  void Init(
    5266  VmaAllocator hAllocator,
    5267  uint32_t newMemoryTypeIndex,
    5268  VkDeviceMemory newMemory,
    5269  VkDeviceSize newSize,
    5270  uint32_t id,
    5271  uint32_t algorithm);
    5272  // Always call before destruction.
    5273  void Destroy(VmaAllocator allocator);
    5274 
    5275  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5277  uint32_t GetId() const { return m_Id; }
    5278  void* GetMappedData() const { return m_pMappedData; }
    5279 
    5280  // Validates all data structures inside this object. If not valid, returns false.
    5281  bool Validate() const;
    5282 
    5283  VkResult CheckCorruption(VmaAllocator hAllocator);
    5284 
    5285  // ppData can be null.
    5286  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5287  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5288 
    5289  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5290  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5291 
    5292  VkResult BindBufferMemory(
    5293  const VmaAllocator hAllocator,
    5294  const VmaAllocation hAllocation,
    5295  VkBuffer hBuffer);
    5296  VkResult BindImageMemory(
    5297  const VmaAllocator hAllocator,
    5298  const VmaAllocation hAllocation,
    5299  VkImage hImage);
    5300 
    5301 private:
    5302  uint32_t m_MemoryTypeIndex;
    5303  uint32_t m_Id;
    5304  VkDeviceMemory m_hMemory;
    5305 
    5306  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5307  // Also protects m_MapCount, m_pMappedData.
    5308  VMA_MUTEX m_Mutex;
    5309  uint32_t m_MapCount;
    5310  void* m_pMappedData;
    5311 };
    5312 
    5313 struct VmaPointerLess
    5314 {
    5315  bool operator()(const void* lhs, const void* rhs) const
    5316  {
    5317  return lhs < rhs;
    5318  }
    5319 };
    5320 
    5321 class VmaDefragmentator;
    5322 
    5323 /*
    5324 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5325 Vulkan memory type.
    5326 
    5327 Synchronized internally with a mutex.
    5328 */
    5329 struct VmaBlockVector
    5330 {
    5331  VMA_CLASS_NO_COPY(VmaBlockVector)
    5332 public:
    5333  VmaBlockVector(
    5334  VmaAllocator hAllocator,
    5335  uint32_t memoryTypeIndex,
    5336  VkDeviceSize preferredBlockSize,
    5337  size_t minBlockCount,
    5338  size_t maxBlockCount,
    5339  VkDeviceSize bufferImageGranularity,
    5340  uint32_t frameInUseCount,
    5341  bool isCustomPool,
    5342  bool explicitBlockSize,
    5343  uint32_t algorithm);
    5344  ~VmaBlockVector();
    5345 
    5346  VkResult CreateMinBlocks();
    5347 
    5348  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5349  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5350  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5351  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5352  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5353 
    5354  void GetPoolStats(VmaPoolStats* pStats);
    5355 
    5356  bool IsEmpty() const { return m_Blocks.empty(); }
    5357  bool IsCorruptionDetectionEnabled() const;
    5358 
    5359  VkResult Allocate(
    5360  VmaPool hCurrentPool,
    5361  uint32_t currentFrameIndex,
    5362  VkDeviceSize size,
    5363  VkDeviceSize alignment,
    5364  const VmaAllocationCreateInfo& createInfo,
    5365  VmaSuballocationType suballocType,
    5366  VmaAllocation* pAllocation);
    5367 
    5368  void Free(
    5369  VmaAllocation hAllocation);
    5370 
    5371  // Adds statistics of this BlockVector to pStats.
    5372  void AddStats(VmaStats* pStats);
    5373 
    5374 #if VMA_STATS_STRING_ENABLED
    5375  void PrintDetailedMap(class VmaJsonWriter& json);
    5376 #endif
    5377 
    5378  void MakePoolAllocationsLost(
    5379  uint32_t currentFrameIndex,
    5380  size_t* pLostAllocationCount);
    5381  VkResult CheckCorruption();
    5382 
    5383  VmaDefragmentator* EnsureDefragmentator(
    5384  VmaAllocator hAllocator,
    5385  uint32_t currentFrameIndex);
    5386 
    5387  VkResult Defragment(
    5388  VmaDefragmentationStats* pDefragmentationStats,
    5389  VkDeviceSize& maxBytesToMove,
    5390  uint32_t& maxAllocationsToMove);
    5391 
    5392  void DestroyDefragmentator();
    5393 
    5394 private:
    5395  friend class VmaDefragmentator;
    5396 
    5397  const VmaAllocator m_hAllocator;
    5398  const uint32_t m_MemoryTypeIndex;
    5399  const VkDeviceSize m_PreferredBlockSize;
    5400  const size_t m_MinBlockCount;
    5401  const size_t m_MaxBlockCount;
    5402  const VkDeviceSize m_BufferImageGranularity;
    5403  const uint32_t m_FrameInUseCount;
    5404  const bool m_IsCustomPool;
    5405  const bool m_ExplicitBlockSize;
    5406  const uint32_t m_Algorithm;
    5407  bool m_HasEmptyBlock;
    5408  VMA_MUTEX m_Mutex;
    5409  // Incrementally sorted by sumFreeSize, ascending.
    5410  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5411  /* There can be at most one allocation that is completely empty - a
    5412  hysteresis to avoid pessimistic case of alternating creation and destruction
    5413  of a VkDeviceMemory. */
    5414  VmaDefragmentator* m_pDefragmentator;
    5415  uint32_t m_NextBlockId;
    5416 
    5417  VkDeviceSize CalcMaxBlockSize() const;
    5418 
    5419  // Finds and removes given block from vector.
    5420  void Remove(VmaDeviceMemoryBlock* pBlock);
    5421 
    5422  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5423  // after this call.
    5424  void IncrementallySortBlocks();
    5425 
    5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5427  VkResult AllocateFromBlock(
    5428  VmaDeviceMemoryBlock* pBlock,
    5429  VmaPool hCurrentPool,
    5430  uint32_t currentFrameIndex,
    5431  VkDeviceSize size,
    5432  VkDeviceSize alignment,
    5433  VmaAllocationCreateFlags allocFlags,
    5434  void* pUserData,
    5435  VmaSuballocationType suballocType,
    5436  uint32_t strategy,
    5437  VmaAllocation* pAllocation);
    5438 
    5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5440 };
    5441 
    5442 struct VmaPool_T
    5443 {
    5444  VMA_CLASS_NO_COPY(VmaPool_T)
    5445 public:
    5446  VmaBlockVector m_BlockVector;
    5447 
    5448  VmaPool_T(
    5449  VmaAllocator hAllocator,
    5450  const VmaPoolCreateInfo& createInfo,
    5451  VkDeviceSize preferredBlockSize);
    5452  ~VmaPool_T();
    5453 
    5454  uint32_t GetId() const { return m_Id; }
    5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5456 
    5457 #if VMA_STATS_STRING_ENABLED
    5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5459 #endif
    5460 
    5461 private:
    5462  uint32_t m_Id;
    5463 };
    5464 
    5465 class VmaDefragmentator
    5466 {
    5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5468 private:
    5469  const VmaAllocator m_hAllocator;
    5470  VmaBlockVector* const m_pBlockVector;
    5471  uint32_t m_CurrentFrameIndex;
    5472  VkDeviceSize m_BytesMoved;
    5473  uint32_t m_AllocationsMoved;
    5474 
    5475  struct AllocationInfo
    5476  {
    5477  VmaAllocation m_hAllocation;
    5478  VkBool32* m_pChanged;
    5479 
    5480  AllocationInfo() :
    5481  m_hAllocation(VK_NULL_HANDLE),
    5482  m_pChanged(VMA_NULL)
    5483  {
    5484  }
    5485  };
    5486 
    5487  struct AllocationInfoSizeGreater
    5488  {
    5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5490  {
    5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5492  }
    5493  };
    5494 
    5495  // Used between AddAllocation and Defragment.
    5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5497 
    5498  struct BlockInfo
    5499  {
    5500  VmaDeviceMemoryBlock* m_pBlock;
    5501  bool m_HasNonMovableAllocations;
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5505  m_pBlock(VMA_NULL),
    5506  m_HasNonMovableAllocations(true),
    5507  m_Allocations(pAllocationCallbacks),
    5508  m_pMappedDataForDefragmentation(VMA_NULL)
    5509  {
    5510  }
    5511 
    5512  void CalcHasNonMovableAllocations()
    5513  {
    5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5515  const size_t defragmentAllocCount = m_Allocations.size();
    5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5517  }
    5518 
    5519  void SortAllocationsBySizeDescecnding()
    5520  {
    5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5522  }
    5523 
    5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5525  void Unmap(VmaAllocator hAllocator);
    5526 
    5527  private:
    5528  // Not null if mapped for defragmentation only, not originally mapped.
    5529  void* m_pMappedDataForDefragmentation;
    5530  };
    5531 
    5532  struct BlockPointerLess
    5533  {
    5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5535  {
    5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5537  }
    5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5539  {
    5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5541  }
    5542  };
    5543 
    5544  // 1. Blocks with some non-movable allocations go first.
    5545  // 2. Blocks with smaller sumFreeSize go first.
    5546  struct BlockInfoCompareMoveDestination
    5547  {
    5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5549  {
    5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5551  {
    5552  return true;
    5553  }
    5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5555  {
    5556  return false;
    5557  }
    5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5559  {
    5560  return true;
    5561  }
    5562  return false;
    5563  }
    5564  };
    5565 
    5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5567  BlockInfoVector m_Blocks;
    5568 
    5569  VkResult DefragmentRound(
    5570  VkDeviceSize maxBytesToMove,
    5571  uint32_t maxAllocationsToMove);
    5572 
    5573  static bool MoveMakesSense(
    5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5576 
    5577 public:
    5578  VmaDefragmentator(
    5579  VmaAllocator hAllocator,
    5580  VmaBlockVector* pBlockVector,
    5581  uint32_t currentFrameIndex);
    5582 
    5583  ~VmaDefragmentator();
    5584 
    5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5587 
    5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5589 
    5590  VkResult Defragment(
    5591  VkDeviceSize maxBytesToMove,
    5592  uint32_t maxAllocationsToMove);
    5593 };
    5594 
    5595 #if VMA_RECORDING_ENABLED
    5596 
    5597 class VmaRecorder
    5598 {
    5599 public:
    5600  VmaRecorder();
    5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5602  void WriteConfiguration(
    5603  const VkPhysicalDeviceProperties& devProps,
    5604  const VkPhysicalDeviceMemoryProperties& memProps,
    5605  bool dedicatedAllocationExtensionEnabled);
    5606  ~VmaRecorder();
    5607 
    5608  void RecordCreateAllocator(uint32_t frameIndex);
    5609  void RecordDestroyAllocator(uint32_t frameIndex);
    5610  void RecordCreatePool(uint32_t frameIndex,
    5611  const VmaPoolCreateInfo& createInfo,
    5612  VmaPool pool);
    5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5614  void RecordAllocateMemory(uint32_t frameIndex,
    5615  const VkMemoryRequirements& vkMemReq,
    5616  const VmaAllocationCreateInfo& createInfo,
    5617  VmaAllocation allocation);
    5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5619  const VkMemoryRequirements& vkMemReq,
    5620  bool requiresDedicatedAllocation,
    5621  bool prefersDedicatedAllocation,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordFreeMemory(uint32_t frameIndex,
    5631  VmaAllocation allocation);
    5632  void RecordResizeAllocation(
    5633  uint32_t frameIndex,
    5634  VmaAllocation allocation,
    5635  VkDeviceSize newSize);
    5636  void RecordSetAllocationUserData(uint32_t frameIndex,
    5637  VmaAllocation allocation,
    5638  const void* pUserData);
    5639  void RecordCreateLostAllocation(uint32_t frameIndex,
    5640  VmaAllocation allocation);
    5641  void RecordMapMemory(uint32_t frameIndex,
    5642  VmaAllocation allocation);
    5643  void RecordUnmapMemory(uint32_t frameIndex,
    5644  VmaAllocation allocation);
    5645  void RecordFlushAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5647  void RecordInvalidateAllocation(uint32_t frameIndex,
    5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5649  void RecordCreateBuffer(uint32_t frameIndex,
    5650  const VkBufferCreateInfo& bufCreateInfo,
    5651  const VmaAllocationCreateInfo& allocCreateInfo,
    5652  VmaAllocation allocation);
    5653  void RecordCreateImage(uint32_t frameIndex,
    5654  const VkImageCreateInfo& imageCreateInfo,
    5655  const VmaAllocationCreateInfo& allocCreateInfo,
    5656  VmaAllocation allocation);
    5657  void RecordDestroyBuffer(uint32_t frameIndex,
    5658  VmaAllocation allocation);
    5659  void RecordDestroyImage(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordTouchAllocation(uint32_t frameIndex,
    5662  VmaAllocation allocation);
    5663  void RecordGetAllocationInfo(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5666  VmaPool pool);
    5667 
    5668 private:
    5669  struct CallParams
    5670  {
    5671  uint32_t threadId;
    5672  double time;
    5673  };
    5674 
    5675  class UserDataString
    5676  {
    5677  public:
    5678  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5679  const char* GetString() const { return m_Str; }
    5680 
    5681  private:
    5682  char m_PtrStr[17];
    5683  const char* m_Str;
    5684  };
    5685 
    5686  bool m_UseMutex;
    5687  VmaRecordFlags m_Flags;
    5688  FILE* m_File;
    5689  VMA_MUTEX m_FileMutex;
    5690  int64_t m_Freq;
    5691  int64_t m_StartCounter;
    5692 
    5693  void GetBasicParams(CallParams& outParams);
    5694  void Flush();
    5695 };
    5696 
    5697 #endif // #if VMA_RECORDING_ENABLED
    5698 
    5699 // Main allocator object.
    5700 struct VmaAllocator_T
    5701 {
    5702  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5703 public:
    5704  bool m_UseMutex;
    5705  bool m_UseKhrDedicatedAllocation;
    5706  VkDevice m_hDevice;
    5707  bool m_AllocationCallbacksSpecified;
    5708  VkAllocationCallbacks m_AllocationCallbacks;
    5709  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5710 
    5711  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5712  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5713  VMA_MUTEX m_HeapSizeLimitMutex;
    5714 
    5715  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5716  VkPhysicalDeviceMemoryProperties m_MemProps;
    5717 
    5718  // Default pools.
    5719  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5720 
    5721  // Each vector is sorted by memory (handle value).
    5722  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5723  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5724  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5725 
    5726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5728  ~VmaAllocator_T();
    5729 
    5730  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5731  {
    5732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5733  }
    5734  const VmaVulkanFunctions& GetVulkanFunctions() const
    5735  {
    5736  return m_VulkanFunctions;
    5737  }
    5738 
    5739  VkDeviceSize GetBufferImageGranularity() const
    5740  {
    5741  return VMA_MAX(
    5742  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5743  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5744  }
    5745 
    5746  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5747  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5748 
    5749  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5750  {
    5751  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5752  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5753  }
    5754  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5755  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5756  {
    5757  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5758  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5759  }
    5760  // Minimum alignment for all allocations in specific memory type.
    5761  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5762  {
    5763  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5764  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5765  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5766  }
    5767 
    5768  bool IsIntegratedGpu() const
    5769  {
    5770  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5771  }
    5772 
    5773 #if VMA_RECORDING_ENABLED
    5774  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5775 #endif
    5776 
    5777  void GetBufferMemoryRequirements(
    5778  VkBuffer hBuffer,
    5779  VkMemoryRequirements& memReq,
    5780  bool& requiresDedicatedAllocation,
    5781  bool& prefersDedicatedAllocation) const;
    5782  void GetImageMemoryRequirements(
    5783  VkImage hImage,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787 
    5788  // Main allocation function.
    5789  VkResult AllocateMemory(
    5790  const VkMemoryRequirements& vkMemReq,
    5791  bool requiresDedicatedAllocation,
    5792  bool prefersDedicatedAllocation,
    5793  VkBuffer dedicatedBuffer,
    5794  VkImage dedicatedImage,
    5795  const VmaAllocationCreateInfo& createInfo,
    5796  VmaSuballocationType suballocType,
    5797  VmaAllocation* pAllocation);
    5798 
    5799  // Main deallocation function.
    5800  void FreeMemory(const VmaAllocation allocation);
    5801 
    5802  VkResult ResizeAllocation(
    5803  const VmaAllocation alloc,
    5804  VkDeviceSize newSize);
    5805 
    5806  void CalculateStats(VmaStats* pStats);
    5807 
    5808 #if VMA_STATS_STRING_ENABLED
    5809  void PrintDetailedMap(class VmaJsonWriter& json);
    5810 #endif
    5811 
    5812  VkResult Defragment(
    5813  VmaAllocation* pAllocations,
    5814  size_t allocationCount,
    5815  VkBool32* pAllocationsChanged,
    5816  const VmaDefragmentationInfo* pDefragmentationInfo,
    5817  VmaDefragmentationStats* pDefragmentationStats);
    5818 
    5819  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5820  bool TouchAllocation(VmaAllocation hAllocation);
    5821 
    5822  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5823  void DestroyPool(VmaPool pool);
    5824  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5825 
    5826  void SetCurrentFrameIndex(uint32_t frameIndex);
    5827  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5828 
    5829  void MakePoolAllocationsLost(
    5830  VmaPool hPool,
    5831  size_t* pLostAllocationCount);
    5832  VkResult CheckPoolCorruption(VmaPool hPool);
    5833  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5834 
    5835  void CreateLostAllocation(VmaAllocation* pAllocation);
    5836 
    5837  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5838  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5839 
    5840  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5841  void Unmap(VmaAllocation hAllocation);
    5842 
    5843  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5844  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5845 
    5846  void FlushOrInvalidateAllocation(
    5847  VmaAllocation hAllocation,
    5848  VkDeviceSize offset, VkDeviceSize size,
    5849  VMA_CACHE_OPERATION op);
    5850 
    5851  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5852 
    5853 private:
    5854  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5855 
    5856  VkPhysicalDevice m_PhysicalDevice;
    5857  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5858 
    5859  VMA_MUTEX m_PoolsMutex;
    5860  // Protected by m_PoolsMutex. Sorted by pointer value.
    5861  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5862  uint32_t m_NextPoolId;
    5863 
    5864  VmaVulkanFunctions m_VulkanFunctions;
    5865 
    5866 #if VMA_RECORDING_ENABLED
    5867  VmaRecorder* m_pRecorder;
    5868 #endif
    5869 
    5870  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5871 
    5872  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5873 
    5874  VkResult AllocateMemoryOfType(
    5875  VkDeviceSize size,
    5876  VkDeviceSize alignment,
    5877  bool dedicatedAllocation,
    5878  VkBuffer dedicatedBuffer,
    5879  VkImage dedicatedImage,
    5880  const VmaAllocationCreateInfo& createInfo,
    5881  uint32_t memTypeIndex,
    5882  VmaSuballocationType suballocType,
    5883  VmaAllocation* pAllocation);
    5884 
    5885  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5886  VkResult AllocateDedicatedMemory(
    5887  VkDeviceSize size,
    5888  VmaSuballocationType suballocType,
    5889  uint32_t memTypeIndex,
    5890  bool map,
    5891  bool isUserDataString,
    5892  void* pUserData,
    5893  VkBuffer dedicatedBuffer,
    5894  VkImage dedicatedImage,
    5895  VmaAllocation* pAllocation);
    5896 
    5897  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5898  void FreeDedicatedMemory(VmaAllocation allocation);
    5899 };
    5900 
    5902 // Memory allocation #2 after VmaAllocator_T definition
    5903 
    5904 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5905 {
    5906  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5907 }
    5908 
    5909 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5910 {
    5911  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5912 }
    5913 
    5914 template<typename T>
    5915 static T* VmaAllocate(VmaAllocator hAllocator)
    5916 {
    5917  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5928 {
    5929  if(ptr != VMA_NULL)
    5930  {
    5931  ptr->~T();
    5932  VmaFree(hAllocator, ptr);
    5933  }
    5934 }
    5935 
    5936 template<typename T>
    5937 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5938 {
    5939  if(ptr != VMA_NULL)
    5940  {
    5941  for(size_t i = count; i--; )
    5942  ptr[i].~T();
    5943  VmaFree(hAllocator, ptr);
    5944  }
    5945 }
    5946 
    5948 // VmaStringBuilder
    5949 
    5950 #if VMA_STATS_STRING_ENABLED
    5951 
    5952 class VmaStringBuilder
    5953 {
    5954 public:
    5955  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5956  size_t GetLength() const { return m_Data.size(); }
    5957  const char* GetData() const { return m_Data.data(); }
    5958 
    5959  void Add(char ch) { m_Data.push_back(ch); }
    5960  void Add(const char* pStr);
    5961  void AddNewLine() { Add('\n'); }
    5962  void AddNumber(uint32_t num);
    5963  void AddNumber(uint64_t num);
    5964  void AddPointer(const void* ptr);
    5965 
    5966 private:
    5967  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5968 };
    5969 
    5970 void VmaStringBuilder::Add(const char* pStr)
    5971 {
    5972  const size_t strLen = strlen(pStr);
    5973  if(strLen > 0)
    5974  {
    5975  const size_t oldCount = m_Data.size();
    5976  m_Data.resize(oldCount + strLen);
    5977  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5978  }
    5979 }
    5980 
    5981 void VmaStringBuilder::AddNumber(uint32_t num)
    5982 {
    5983  char buf[11];
    5984  VmaUint32ToStr(buf, sizeof(buf), num);
    5985  Add(buf);
    5986 }
    5987 
    5988 void VmaStringBuilder::AddNumber(uint64_t num)
    5989 {
    5990  char buf[21];
    5991  VmaUint64ToStr(buf, sizeof(buf), num);
    5992  Add(buf);
    5993 }
    5994 
    5995 void VmaStringBuilder::AddPointer(const void* ptr)
    5996 {
    5997  char buf[21];
    5998  VmaPtrToStr(buf, sizeof(buf), ptr);
    5999  Add(buf);
    6000 }
    6001 
    6002 #endif // #if VMA_STATS_STRING_ENABLED
    6003 
    6005 // VmaJsonWriter
    6006 
    6007 #if VMA_STATS_STRING_ENABLED
    6008 
    6009 class VmaJsonWriter
    6010 {
    6011  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6012 public:
    6013  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6014  ~VmaJsonWriter();
    6015 
    6016  void BeginObject(bool singleLine = false);
    6017  void EndObject();
    6018 
    6019  void BeginArray(bool singleLine = false);
    6020  void EndArray();
    6021 
    6022  void WriteString(const char* pStr);
    6023  void BeginString(const char* pStr = VMA_NULL);
    6024  void ContinueString(const char* pStr);
    6025  void ContinueString(uint32_t n);
    6026  void ContinueString(uint64_t n);
    6027  void ContinueString_Pointer(const void* ptr);
    6028  void EndString(const char* pStr = VMA_NULL);
    6029 
    6030  void WriteNumber(uint32_t n);
    6031  void WriteNumber(uint64_t n);
    6032  void WriteBool(bool b);
    6033  void WriteNull();
    6034 
    6035 private:
    6036  static const char* const INDENT;
    6037 
    6038  enum COLLECTION_TYPE
    6039  {
    6040  COLLECTION_TYPE_OBJECT,
    6041  COLLECTION_TYPE_ARRAY,
    6042  };
    6043  struct StackItem
    6044  {
    6045  COLLECTION_TYPE type;
    6046  uint32_t valueCount;
    6047  bool singleLineMode;
    6048  };
    6049 
    6050  VmaStringBuilder& m_SB;
    6051  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6052  bool m_InsideString;
    6053 
    6054  void BeginValue(bool isString);
    6055  void WriteIndent(bool oneLess = false);
    6056 };
    6057 
    6058 const char* const VmaJsonWriter::INDENT = " ";
    6059 
    6060 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6061  m_SB(sb),
    6062  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6063  m_InsideString(false)
    6064 {
    6065 }
    6066 
    6067 VmaJsonWriter::~VmaJsonWriter()
    6068 {
    6069  VMA_ASSERT(!m_InsideString);
    6070  VMA_ASSERT(m_Stack.empty());
    6071 }
    6072 
    6073 void VmaJsonWriter::BeginObject(bool singleLine)
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076 
    6077  BeginValue(false);
    6078  m_SB.Add('{');
    6079 
    6080  StackItem item;
    6081  item.type = COLLECTION_TYPE_OBJECT;
    6082  item.valueCount = 0;
    6083  item.singleLineMode = singleLine;
    6084  m_Stack.push_back(item);
    6085 }
    6086 
    6087 void VmaJsonWriter::EndObject()
    6088 {
    6089  VMA_ASSERT(!m_InsideString);
    6090 
    6091  WriteIndent(true);
    6092  m_SB.Add('}');
    6093 
    6094  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6095  m_Stack.pop_back();
    6096 }
    6097 
    6098 void VmaJsonWriter::BeginArray(bool singleLine)
    6099 {
    6100  VMA_ASSERT(!m_InsideString);
    6101 
    6102  BeginValue(false);
    6103  m_SB.Add('[');
    6104 
    6105  StackItem item;
    6106  item.type = COLLECTION_TYPE_ARRAY;
    6107  item.valueCount = 0;
    6108  item.singleLineMode = singleLine;
    6109  m_Stack.push_back(item);
    6110 }
    6111 
    6112 void VmaJsonWriter::EndArray()
    6113 {
    6114  VMA_ASSERT(!m_InsideString);
    6115 
    6116  WriteIndent(true);
    6117  m_SB.Add(']');
    6118 
    6119  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6120  m_Stack.pop_back();
    6121 }
    6122 
    6123 void VmaJsonWriter::WriteString(const char* pStr)
    6124 {
    6125  BeginString(pStr);
    6126  EndString();
    6127 }
    6128 
    6129 void VmaJsonWriter::BeginString(const char* pStr)
    6130 {
    6131  VMA_ASSERT(!m_InsideString);
    6132 
    6133  BeginValue(true);
    6134  m_SB.Add('"');
    6135  m_InsideString = true;
    6136  if(pStr != VMA_NULL && pStr[0] != '\0')
    6137  {
    6138  ContinueString(pStr);
    6139  }
    6140 }
    6141 
    6142 void VmaJsonWriter::ContinueString(const char* pStr)
    6143 {
    6144  VMA_ASSERT(m_InsideString);
    6145 
    6146  const size_t strLen = strlen(pStr);
    6147  for(size_t i = 0; i < strLen; ++i)
    6148  {
    6149  char ch = pStr[i];
    6150  if(ch == '\\')
    6151  {
    6152  m_SB.Add("\\\\");
    6153  }
    6154  else if(ch == '"')
    6155  {
    6156  m_SB.Add("\\\"");
    6157  }
    6158  else if(ch >= 32)
    6159  {
    6160  m_SB.Add(ch);
    6161  }
    6162  else switch(ch)
    6163  {
    6164  case '\b':
    6165  m_SB.Add("\\b");
    6166  break;
    6167  case '\f':
    6168  m_SB.Add("\\f");
    6169  break;
    6170  case '\n':
    6171  m_SB.Add("\\n");
    6172  break;
    6173  case '\r':
    6174  m_SB.Add("\\r");
    6175  break;
    6176  case '\t':
    6177  m_SB.Add("\\t");
    6178  break;
    6179  default:
    6180  VMA_ASSERT(0 && "Character not currently supported.");
    6181  break;
    6182  }
    6183  }
    6184 }
    6185 
    6186 void VmaJsonWriter::ContinueString(uint32_t n)
    6187 {
    6188  VMA_ASSERT(m_InsideString);
    6189  m_SB.AddNumber(n);
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint64_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddPointer(ptr);
    6202 }
    6203 
    6204 void VmaJsonWriter::EndString(const char* pStr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  if(pStr != VMA_NULL && pStr[0] != '\0')
    6208  {
    6209  ContinueString(pStr);
    6210  }
    6211  m_SB.Add('"');
    6212  m_InsideString = false;
    6213 }
    6214 
    6215 void VmaJsonWriter::WriteNumber(uint32_t n)
    6216 {
    6217  VMA_ASSERT(!m_InsideString);
    6218  BeginValue(false);
    6219  m_SB.AddNumber(n);
    6220 }
    6221 
    6222 void VmaJsonWriter::WriteNumber(uint64_t n)
    6223 {
    6224  VMA_ASSERT(!m_InsideString);
    6225  BeginValue(false);
    6226  m_SB.AddNumber(n);
    6227 }
    6228 
    6229 void VmaJsonWriter::WriteBool(bool b)
    6230 {
    6231  VMA_ASSERT(!m_InsideString);
    6232  BeginValue(false);
    6233  m_SB.Add(b ? "true" : "false");
    6234 }
    6235 
    6236 void VmaJsonWriter::WriteNull()
    6237 {
    6238  VMA_ASSERT(!m_InsideString);
    6239  BeginValue(false);
    6240  m_SB.Add("null");
    6241 }
    6242 
    6243 void VmaJsonWriter::BeginValue(bool isString)
    6244 {
    6245  if(!m_Stack.empty())
    6246  {
    6247  StackItem& currItem = m_Stack.back();
    6248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6249  currItem.valueCount % 2 == 0)
    6250  {
    6251  VMA_ASSERT(isString);
    6252  }
    6253 
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 != 0)
    6256  {
    6257  m_SB.Add(": ");
    6258  }
    6259  else if(currItem.valueCount > 0)
    6260  {
    6261  m_SB.Add(", ");
    6262  WriteIndent();
    6263  }
    6264  else
    6265  {
    6266  WriteIndent();
    6267  }
    6268  ++currItem.valueCount;
    6269  }
    6270 }
    6271 
    6272 void VmaJsonWriter::WriteIndent(bool oneLess)
    6273 {
    6274  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6275  {
    6276  m_SB.AddNewLine();
    6277 
    6278  size_t count = m_Stack.size();
    6279  if(count > 0 && oneLess)
    6280  {
    6281  --count;
    6282  }
    6283  for(size_t i = 0; i < count; ++i)
    6284  {
    6285  m_SB.Add(INDENT);
    6286  }
    6287  }
    6288 }
    6289 
    6290 #endif // #if VMA_STATS_STRING_ENABLED
    6291 
    6293 
    6294 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6295 {
    6296  if(IsUserDataString())
    6297  {
    6298  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6299 
    6300  FreeUserDataString(hAllocator);
    6301 
    6302  if(pUserData != VMA_NULL)
    6303  {
    6304  const char* const newStrSrc = (char*)pUserData;
    6305  const size_t newStrLen = strlen(newStrSrc);
    6306  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6307  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6308  m_pUserData = newStrDst;
    6309  }
    6310  }
    6311  else
    6312  {
    6313  m_pUserData = pUserData;
    6314  }
    6315 }
    6316 
    6317 void VmaAllocation_T::ChangeBlockAllocation(
    6318  VmaAllocator hAllocator,
    6319  VmaDeviceMemoryBlock* block,
    6320  VkDeviceSize offset)
    6321 {
    6322  VMA_ASSERT(block != VMA_NULL);
    6323  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6324 
    6325  // Move mapping reference counter from old block to new block.
    6326  if(block != m_BlockAllocation.m_Block)
    6327  {
    6328  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6329  if(IsPersistentMap())
    6330  ++mapRefCount;
    6331  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6332  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6333  }
    6334 
    6335  m_BlockAllocation.m_Block = block;
    6336  m_BlockAllocation.m_Offset = offset;
    6337 }
    6338 
    6339 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6340 {
    6341  VMA_ASSERT(newSize > 0);
    6342  m_Size = newSize;
    6343 }
    6344 
    6345 VkDeviceSize VmaAllocation_T::GetOffset() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_Offset;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return 0;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return 0;
    6356  }
    6357 }
    6358 
    6359 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6360 {
    6361  switch(m_Type)
    6362  {
    6363  case ALLOCATION_TYPE_BLOCK:
    6364  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6365  case ALLOCATION_TYPE_DEDICATED:
    6366  return m_DedicatedAllocation.m_hMemory;
    6367  default:
    6368  VMA_ASSERT(0);
    6369  return VK_NULL_HANDLE;
    6370  }
    6371 }
    6372 
    6373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6374 {
    6375  switch(m_Type)
    6376  {
    6377  case ALLOCATION_TYPE_BLOCK:
    6378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6379  case ALLOCATION_TYPE_DEDICATED:
    6380  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6381  default:
    6382  VMA_ASSERT(0);
    6383  return UINT32_MAX;
    6384  }
    6385 }
    6386 
    6387 void* VmaAllocation_T::GetMappedData() const
    6388 {
    6389  switch(m_Type)
    6390  {
    6391  case ALLOCATION_TYPE_BLOCK:
    6392  if(m_MapCount != 0)
    6393  {
    6394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6395  VMA_ASSERT(pBlockData != VMA_NULL);
    6396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6397  }
    6398  else
    6399  {
    6400  return VMA_NULL;
    6401  }
    6402  break;
    6403  case ALLOCATION_TYPE_DEDICATED:
    6404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6405  return m_DedicatedAllocation.m_pMappedData;
    6406  default:
    6407  VMA_ASSERT(0);
    6408  return VMA_NULL;
    6409  }
    6410 }
    6411 
    6412 bool VmaAllocation_T::CanBecomeLost() const
    6413 {
    6414  switch(m_Type)
    6415  {
    6416  case ALLOCATION_TYPE_BLOCK:
    6417  return m_BlockAllocation.m_CanBecomeLost;
    6418  case ALLOCATION_TYPE_DEDICATED:
    6419  return false;
    6420  default:
    6421  VMA_ASSERT(0);
    6422  return false;
    6423  }
    6424 }
    6425 
    6426 VmaPool VmaAllocation_T::GetPool() const
    6427 {
    6428  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6429  return m_BlockAllocation.m_hPool;
    6430 }
    6431 
    6432 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6433 {
    6434  VMA_ASSERT(CanBecomeLost());
    6435 
    6436  /*
    6437  Warning: This is a carefully designed algorithm.
    6438  Do not modify unless you really know what you're doing :)
    6439  */
    6440  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6441  for(;;)
    6442  {
    6443  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6444  {
    6445  VMA_ASSERT(0);
    6446  return false;
    6447  }
    6448  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6449  {
    6450  return false;
    6451  }
    6452  else // Last use time earlier than current time.
    6453  {
    6454  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6455  {
    6456  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6457  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6458  return true;
    6459  }
    6460  }
    6461  }
    6462 }
    6463 
    6464 #if VMA_STATS_STRING_ENABLED
    6465 
    6466 // Correspond to values of enum VmaSuballocationType.
    6467 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6468  "FREE",
    6469  "UNKNOWN",
    6470  "BUFFER",
    6471  "IMAGE_UNKNOWN",
    6472  "IMAGE_LINEAR",
    6473  "IMAGE_OPTIMAL",
    6474 };
    6475 
    6476 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6477 {
    6478  json.WriteString("Type");
    6479  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6480 
    6481  json.WriteString("Size");
    6482  json.WriteNumber(m_Size);
    6483 
    6484  if(m_pUserData != VMA_NULL)
    6485  {
    6486  json.WriteString("UserData");
    6487  if(IsUserDataString())
    6488  {
    6489  json.WriteString((const char*)m_pUserData);
    6490  }
    6491  else
    6492  {
    6493  json.BeginString();
    6494  json.ContinueString_Pointer(m_pUserData);
    6495  json.EndString();
    6496  }
    6497  }
    6498 
    6499  json.WriteString("CreationFrameIndex");
    6500  json.WriteNumber(m_CreationFrameIndex);
    6501 
    6502  json.WriteString("LastUseFrameIndex");
    6503  json.WriteNumber(GetLastUseFrameIndex());
    6504 
    6505  if(m_BufferImageUsage != 0)
    6506  {
    6507  json.WriteString("Usage");
    6508  json.WriteNumber(m_BufferImageUsage);
    6509  }
    6510 }
    6511 
    6512 #endif
    6513 
    6514 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6515 {
    6516  VMA_ASSERT(IsUserDataString());
    6517  if(m_pUserData != VMA_NULL)
    6518  {
    6519  char* const oldStr = (char*)m_pUserData;
    6520  const size_t oldStrLen = strlen(oldStr);
    6521  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6522  m_pUserData = VMA_NULL;
    6523  }
    6524 }
    6525 
    6526 void VmaAllocation_T::BlockAllocMap()
    6527 {
    6528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6529 
    6530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6531  {
    6532  ++m_MapCount;
    6533  }
    6534  else
    6535  {
    6536  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6537  }
    6538 }
    6539 
    6540 void VmaAllocation_T::BlockAllocUnmap()
    6541 {
    6542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6543 
    6544  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6545  {
    6546  --m_MapCount;
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6555 {
    6556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6557 
    6558  if(m_MapCount != 0)
    6559  {
    6560  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6561  {
    6562  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6563  *ppData = m_DedicatedAllocation.m_pMappedData;
    6564  ++m_MapCount;
    6565  return VK_SUCCESS;
    6566  }
    6567  else
    6568  {
    6569  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6570  return VK_ERROR_MEMORY_MAP_FAILED;
    6571  }
    6572  }
    6573  else
    6574  {
    6575  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6576  hAllocator->m_hDevice,
    6577  m_DedicatedAllocation.m_hMemory,
    6578  0, // offset
    6579  VK_WHOLE_SIZE,
    6580  0, // flags
    6581  ppData);
    6582  if(result == VK_SUCCESS)
    6583  {
    6584  m_DedicatedAllocation.m_pMappedData = *ppData;
    6585  m_MapCount = 1;
    6586  }
    6587  return result;
    6588  }
    6589 }
    6590 
    6591 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6592 {
    6593  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6594 
    6595  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6596  {
    6597  --m_MapCount;
    6598  if(m_MapCount == 0)
    6599  {
    6600  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6602  hAllocator->m_hDevice,
    6603  m_DedicatedAllocation.m_hMemory);
    6604  }
    6605  }
    6606  else
    6607  {
    6608  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6609  }
    6610 }
    6611 
    6612 #if VMA_STATS_STRING_ENABLED
    6613 
    6614 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6615 {
    6616  json.BeginObject();
    6617 
    6618  json.WriteString("Blocks");
    6619  json.WriteNumber(stat.blockCount);
    6620 
    6621  json.WriteString("Allocations");
    6622  json.WriteNumber(stat.allocationCount);
    6623 
    6624  json.WriteString("UnusedRanges");
    6625  json.WriteNumber(stat.unusedRangeCount);
    6626 
    6627  json.WriteString("UsedBytes");
    6628  json.WriteNumber(stat.usedBytes);
    6629 
    6630  json.WriteString("UnusedBytes");
    6631  json.WriteNumber(stat.unusedBytes);
    6632 
    6633  if(stat.allocationCount > 1)
    6634  {
    6635  json.WriteString("AllocationSize");
    6636  json.BeginObject(true);
    6637  json.WriteString("Min");
    6638  json.WriteNumber(stat.allocationSizeMin);
    6639  json.WriteString("Avg");
    6640  json.WriteNumber(stat.allocationSizeAvg);
    6641  json.WriteString("Max");
    6642  json.WriteNumber(stat.allocationSizeMax);
    6643  json.EndObject();
    6644  }
    6645 
    6646  if(stat.unusedRangeCount > 1)
    6647  {
    6648  json.WriteString("UnusedRangeSize");
    6649  json.BeginObject(true);
    6650  json.WriteString("Min");
    6651  json.WriteNumber(stat.unusedRangeSizeMin);
    6652  json.WriteString("Avg");
    6653  json.WriteNumber(stat.unusedRangeSizeAvg);
    6654  json.WriteString("Max");
    6655  json.WriteNumber(stat.unusedRangeSizeMax);
    6656  json.EndObject();
    6657  }
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 #endif // #if VMA_STATS_STRING_ENABLED
    6663 
    6664 struct VmaSuballocationItemSizeLess
    6665 {
    6666  bool operator()(
    6667  const VmaSuballocationList::iterator lhs,
    6668  const VmaSuballocationList::iterator rhs) const
    6669  {
    6670  return lhs->size < rhs->size;
    6671  }
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  VkDeviceSize rhsSize) const
    6675  {
    6676  return lhs->size < rhsSize;
    6677  }
    6678 };
    6679 
    6680 
    6682 // class VmaBlockMetadata
    6683 
    6684 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6685  m_Size(0),
    6686  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6687 {
    6688 }
    6689 
    6690 #if VMA_STATS_STRING_ENABLED
    6691 
    6692 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6693  VkDeviceSize unusedBytes,
    6694  size_t allocationCount,
    6695  size_t unusedRangeCount) const
    6696 {
    6697  json.BeginObject();
    6698 
    6699  json.WriteString("TotalBytes");
    6700  json.WriteNumber(GetSize());
    6701 
    6702  json.WriteString("UnusedBytes");
    6703  json.WriteNumber(unusedBytes);
    6704 
    6705  json.WriteString("Allocations");
    6706  json.WriteNumber((uint64_t)allocationCount);
    6707 
    6708  json.WriteString("UnusedRanges");
    6709  json.WriteNumber((uint64_t)unusedRangeCount);
    6710 
    6711  json.WriteString("Suballocations");
    6712  json.BeginArray();
    6713 }
    6714 
    6715 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6716  VkDeviceSize offset,
    6717  VmaAllocation hAllocation) const
    6718 {
    6719  json.BeginObject(true);
    6720 
    6721  json.WriteString("Offset");
    6722  json.WriteNumber(offset);
    6723 
    6724  hAllocation->PrintParameters(json);
    6725 
    6726  json.EndObject();
    6727 }
    6728 
    6729 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6730  VkDeviceSize offset,
    6731  VkDeviceSize size) const
    6732 {
    6733  json.BeginObject(true);
    6734 
    6735  json.WriteString("Offset");
    6736  json.WriteNumber(offset);
    6737 
    6738  json.WriteString("Type");
    6739  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6740 
    6741  json.WriteString("Size");
    6742  json.WriteNumber(size);
    6743 
    6744  json.EndObject();
    6745 }
    6746 
    6747 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6748 {
    6749  json.EndArray();
    6750  json.EndObject();
    6751 }
    6752 
    6753 #endif // #if VMA_STATS_STRING_ENABLED
    6754 
    6756 // class VmaBlockMetadata_Generic
    6757 
    6758 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6759  VmaBlockMetadata(hAllocator),
    6760  m_FreeCount(0),
    6761  m_SumFreeSize(0),
    6762  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6763  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6764 {
    6765 }
    6766 
    6767 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6768 {
    6769 }
    6770 
    6771 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6772 {
    6773  VmaBlockMetadata::Init(size);
    6774 
    6775  m_FreeCount = 1;
    6776  m_SumFreeSize = size;
    6777 
    6778  VmaSuballocation suballoc = {};
    6779  suballoc.offset = 0;
    6780  suballoc.size = size;
    6781  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6782  suballoc.hAllocation = VK_NULL_HANDLE;
    6783 
    6784  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6785  m_Suballocations.push_back(suballoc);
    6786  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6787  --suballocItem;
    6788  m_FreeSuballocationsBySize.push_back(suballocItem);
    6789 }
    6790 
    6791 bool VmaBlockMetadata_Generic::Validate() const
    6792 {
    6793  VMA_VALIDATE(!m_Suballocations.empty());
    6794 
    6795  // Expected offset of new suballocation as calculated from previous ones.
    6796  VkDeviceSize calculatedOffset = 0;
    6797  // Expected number of free suballocations as calculated from traversing their list.
    6798  uint32_t calculatedFreeCount = 0;
    6799  // Expected sum size of free suballocations as calculated from traversing their list.
    6800  VkDeviceSize calculatedSumFreeSize = 0;
    6801  // Expected number of free suballocations that should be registered in
    6802  // m_FreeSuballocationsBySize calculated from traversing their list.
    6803  size_t freeSuballocationsToRegister = 0;
    6804  // True if previous visited suballocation was free.
    6805  bool prevFree = false;
    6806 
    6807  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6808  suballocItem != m_Suballocations.cend();
    6809  ++suballocItem)
    6810  {
    6811  const VmaSuballocation& subAlloc = *suballocItem;
    6812 
    6813  // Actual offset of this suballocation doesn't match expected one.
    6814  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6815 
    6816  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6817  // Two adjacent free suballocations are invalid. They should be merged.
    6818  VMA_VALIDATE(!prevFree || !currFree);
    6819 
    6820  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6821 
    6822  if(currFree)
    6823  {
    6824  calculatedSumFreeSize += subAlloc.size;
    6825  ++calculatedFreeCount;
    6826  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6827  {
    6828  ++freeSuballocationsToRegister;
    6829  }
    6830 
    6831  // Margin required between allocations - every free space must be at least that large.
    6832  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6833  }
    6834  else
    6835  {
    6836  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6837  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6838 
    6839  // Margin required between allocations - previous allocation must be free.
    6840  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6841  }
    6842 
    6843  calculatedOffset += subAlloc.size;
    6844  prevFree = currFree;
    6845  }
    6846 
    6847  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6848  // match expected one.
    6849  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6850 
    6851  VkDeviceSize lastSize = 0;
    6852  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6853  {
    6854  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6855 
    6856  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6857  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6858  // They must be sorted by size ascending.
    6859  VMA_VALIDATE(suballocItem->size >= lastSize);
    6860 
    6861  lastSize = suballocItem->size;
    6862  }
    6863 
    6864  // Check if totals match calculacted values.
    6865  VMA_VALIDATE(ValidateFreeSuballocationList());
    6866  VMA_VALIDATE(calculatedOffset == GetSize());
    6867  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6868  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6869 
    6870  return true;
    6871 }
    6872 
    6873 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6874 {
    6875  if(!m_FreeSuballocationsBySize.empty())
    6876  {
    6877  return m_FreeSuballocationsBySize.back()->size;
    6878  }
    6879  else
    6880  {
    6881  return 0;
    6882  }
    6883 }
    6884 
    6885 bool VmaBlockMetadata_Generic::IsEmpty() const
    6886 {
    6887  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6888 }
    6889 
    6890 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6891 {
    6892  outInfo.blockCount = 1;
    6893 
    6894  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6895  outInfo.allocationCount = rangeCount - m_FreeCount;
    6896  outInfo.unusedRangeCount = m_FreeCount;
    6897 
    6898  outInfo.unusedBytes = m_SumFreeSize;
    6899  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6900 
    6901  outInfo.allocationSizeMin = UINT64_MAX;
    6902  outInfo.allocationSizeMax = 0;
    6903  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6904  outInfo.unusedRangeSizeMax = 0;
    6905 
    6906  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6907  suballocItem != m_Suballocations.cend();
    6908  ++suballocItem)
    6909  {
    6910  const VmaSuballocation& suballoc = *suballocItem;
    6911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6912  {
    6913  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6914  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6915  }
    6916  else
    6917  {
    6918  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6919  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6920  }
    6921  }
    6922 }
    6923 
    6924 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6925 {
    6926  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6927 
    6928  inoutStats.size += GetSize();
    6929  inoutStats.unusedSize += m_SumFreeSize;
    6930  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6931  inoutStats.unusedRangeCount += m_FreeCount;
    6932  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6933 }
    6934 
    6935 #if VMA_STATS_STRING_ENABLED
    6936 
    6937 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6938 {
    6939  PrintDetailedMap_Begin(json,
    6940  m_SumFreeSize, // unusedBytes
    6941  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6942  m_FreeCount); // unusedRangeCount
    6943 
    6944  size_t i = 0;
    6945  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6946  suballocItem != m_Suballocations.cend();
    6947  ++suballocItem, ++i)
    6948  {
    6949  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6950  {
    6951  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6952  }
    6953  else
    6954  {
    6955  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6956  }
    6957  }
    6958 
    6959  PrintDetailedMap_End(json);
    6960 }
    6961 
    6962 #endif // #if VMA_STATS_STRING_ENABLED
    6963 
    6964 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6965  uint32_t currentFrameIndex,
    6966  uint32_t frameInUseCount,
    6967  VkDeviceSize bufferImageGranularity,
    6968  VkDeviceSize allocSize,
    6969  VkDeviceSize allocAlignment,
    6970  bool upperAddress,
    6971  VmaSuballocationType allocType,
    6972  bool canMakeOtherLost,
    6973  uint32_t strategy,
    6974  VmaAllocationRequest* pAllocationRequest)
    6975 {
    6976  VMA_ASSERT(allocSize > 0);
    6977  VMA_ASSERT(!upperAddress);
    6978  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6979  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6980  VMA_HEAVY_ASSERT(Validate());
    6981 
    6982  // There is not enough total free space in this block to fullfill the request: Early return.
    6983  if(canMakeOtherLost == false &&
    6984  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6985  {
    6986  return false;
    6987  }
    6988 
    6989  // New algorithm, efficiently searching freeSuballocationsBySize.
    6990  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6991  if(freeSuballocCount > 0)
    6992  {
    6994  {
    6995  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6996  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6997  m_FreeSuballocationsBySize.data(),
    6998  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6999  allocSize + 2 * VMA_DEBUG_MARGIN,
    7000  VmaSuballocationItemSizeLess());
    7001  size_t index = it - m_FreeSuballocationsBySize.data();
    7002  for(; index < freeSuballocCount; ++index)
    7003  {
    7004  if(CheckAllocation(
    7005  currentFrameIndex,
    7006  frameInUseCount,
    7007  bufferImageGranularity,
    7008  allocSize,
    7009  allocAlignment,
    7010  allocType,
    7011  m_FreeSuballocationsBySize[index],
    7012  false, // canMakeOtherLost
    7013  &pAllocationRequest->offset,
    7014  &pAllocationRequest->itemsToMakeLostCount,
    7015  &pAllocationRequest->sumFreeSize,
    7016  &pAllocationRequest->sumItemSize))
    7017  {
    7018  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7019  return true;
    7020  }
    7021  }
    7022  }
    7023  else // WORST_FIT, FIRST_FIT
    7024  {
    7025  // Search staring from biggest suballocations.
    7026  for(size_t index = freeSuballocCount; index--; )
    7027  {
    7028  if(CheckAllocation(
    7029  currentFrameIndex,
    7030  frameInUseCount,
    7031  bufferImageGranularity,
    7032  allocSize,
    7033  allocAlignment,
    7034  allocType,
    7035  m_FreeSuballocationsBySize[index],
    7036  false, // canMakeOtherLost
    7037  &pAllocationRequest->offset,
    7038  &pAllocationRequest->itemsToMakeLostCount,
    7039  &pAllocationRequest->sumFreeSize,
    7040  &pAllocationRequest->sumItemSize))
    7041  {
    7042  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7043  return true;
    7044  }
    7045  }
    7046  }
    7047  }
    7048 
    7049  if(canMakeOtherLost)
    7050  {
    7051  // Brute-force algorithm. TODO: Come up with something better.
    7052 
    7053  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7054  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7055 
    7056  VmaAllocationRequest tmpAllocRequest = {};
    7057  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7058  suballocIt != m_Suballocations.end();
    7059  ++suballocIt)
    7060  {
    7061  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7062  suballocIt->hAllocation->CanBecomeLost())
    7063  {
    7064  if(CheckAllocation(
    7065  currentFrameIndex,
    7066  frameInUseCount,
    7067  bufferImageGranularity,
    7068  allocSize,
    7069  allocAlignment,
    7070  allocType,
    7071  suballocIt,
    7072  canMakeOtherLost,
    7073  &tmpAllocRequest.offset,
    7074  &tmpAllocRequest.itemsToMakeLostCount,
    7075  &tmpAllocRequest.sumFreeSize,
    7076  &tmpAllocRequest.sumItemSize))
    7077  {
    7078  tmpAllocRequest.item = suballocIt;
    7079 
    7080  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7082  {
    7083  *pAllocationRequest = tmpAllocRequest;
    7084  }
    7085  }
    7086  }
    7087  }
    7088 
    7089  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7090  {
    7091  return true;
    7092  }
    7093  }
    7094 
    7095  return false;
    7096 }
    7097 
    7098 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7099  uint32_t currentFrameIndex,
    7100  uint32_t frameInUseCount,
    7101  VmaAllocationRequest* pAllocationRequest)
    7102 {
    7103  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7104  {
    7105  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7106  {
    7107  ++pAllocationRequest->item;
    7108  }
    7109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7110  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7111  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7112  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7113  {
    7114  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7115  --pAllocationRequest->itemsToMakeLostCount;
    7116  }
    7117  else
    7118  {
    7119  return false;
    7120  }
    7121  }
    7122 
    7123  VMA_HEAVY_ASSERT(Validate());
    7124  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7125  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7126 
    7127  return true;
    7128 }
    7129 
    7130 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7131 {
    7132  uint32_t lostAllocationCount = 0;
    7133  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7134  it != m_Suballocations.end();
    7135  ++it)
    7136  {
    7137  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7138  it->hAllocation->CanBecomeLost() &&
    7139  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7140  {
    7141  it = FreeSuballocation(it);
    7142  ++lostAllocationCount;
    7143  }
    7144  }
    7145  return lostAllocationCount;
    7146 }
    7147 
    7148 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7149 {
    7150  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7151  it != m_Suballocations.end();
    7152  ++it)
    7153  {
    7154  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7155  {
    7156  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7157  {
    7158  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7159  return VK_ERROR_VALIDATION_FAILED_EXT;
    7160  }
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  }
    7167  }
    7168 
    7169  return VK_SUCCESS;
    7170 }
    7171 
    7172 void VmaBlockMetadata_Generic::Alloc(
    7173  const VmaAllocationRequest& request,
    7174  VmaSuballocationType type,
    7175  VkDeviceSize allocSize,
    7176  bool upperAddress,
    7177  VmaAllocation hAllocation)
    7178 {
    7179  VMA_ASSERT(!upperAddress);
    7180  VMA_ASSERT(request.item != m_Suballocations.end());
    7181  VmaSuballocation& suballoc = *request.item;
    7182  // Given suballocation is a free block.
    7183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7184  // Given offset is inside this suballocation.
    7185  VMA_ASSERT(request.offset >= suballoc.offset);
    7186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7189 
    7190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7191  // it to become used.
    7192  UnregisterFreeSuballocation(request.item);
    7193 
    7194  suballoc.offset = request.offset;
    7195  suballoc.size = allocSize;
    7196  suballoc.type = type;
    7197  suballoc.hAllocation = hAllocation;
    7198 
    7199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7200  if(paddingEnd)
    7201  {
    7202  VmaSuballocation paddingSuballoc = {};
    7203  paddingSuballoc.offset = request.offset + allocSize;
    7204  paddingSuballoc.size = paddingEnd;
    7205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7206  VmaSuballocationList::iterator next = request.item;
    7207  ++next;
    7208  const VmaSuballocationList::iterator paddingEndItem =
    7209  m_Suballocations.insert(next, paddingSuballoc);
    7210  RegisterFreeSuballocation(paddingEndItem);
    7211  }
    7212 
    7213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7214  if(paddingBegin)
    7215  {
    7216  VmaSuballocation paddingSuballoc = {};
    7217  paddingSuballoc.offset = request.offset - paddingBegin;
    7218  paddingSuballoc.size = paddingBegin;
    7219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7220  const VmaSuballocationList::iterator paddingBeginItem =
    7221  m_Suballocations.insert(request.item, paddingSuballoc);
    7222  RegisterFreeSuballocation(paddingBeginItem);
    7223  }
    7224 
    7225  // Update totals.
    7226  m_FreeCount = m_FreeCount - 1;
    7227  if(paddingBegin > 0)
    7228  {
    7229  ++m_FreeCount;
    7230  }
    7231  if(paddingEnd > 0)
    7232  {
    7233  ++m_FreeCount;
    7234  }
    7235  m_SumFreeSize -= allocSize;
    7236 }
    7237 
    7238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7239 {
    7240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7241  suballocItem != m_Suballocations.end();
    7242  ++suballocItem)
    7243  {
    7244  VmaSuballocation& suballoc = *suballocItem;
    7245  if(suballoc.hAllocation == allocation)
    7246  {
    7247  FreeSuballocation(suballocItem);
    7248  VMA_HEAVY_ASSERT(Validate());
    7249  return;
    7250  }
    7251  }
    7252  VMA_ASSERT(0 && "Not found!");
    7253 }
    7254 
    7255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7256 {
    7257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7258  suballocItem != m_Suballocations.end();
    7259  ++suballocItem)
    7260  {
    7261  VmaSuballocation& suballoc = *suballocItem;
    7262  if(suballoc.offset == offset)
    7263  {
    7264  FreeSuballocation(suballocItem);
    7265  return;
    7266  }
    7267  }
    7268  VMA_ASSERT(0 && "Not found!");
    7269 }
    7270 
    7271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7272 {
    7273  typedef VmaSuballocationList::iterator iter_type;
    7274  for(iter_type suballocItem = m_Suballocations.begin();
    7275  suballocItem != m_Suballocations.end();
    7276  ++suballocItem)
    7277  {
    7278  VmaSuballocation& suballoc = *suballocItem;
    7279  if(suballoc.hAllocation == alloc)
    7280  {
    7281  iter_type nextItem = suballocItem;
    7282  ++nextItem;
    7283 
    7284  // Should have been ensured on higher level.
    7285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7286 
    7287  // Shrinking.
    7288  if(newSize < alloc->GetSize())
    7289  {
    7290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7291 
    7292  // There is next item.
    7293  if(nextItem != m_Suballocations.end())
    7294  {
    7295  // Next item is free.
    7296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7297  {
    7298  // Grow this next item backward.
    7299  UnregisterFreeSuballocation(nextItem);
    7300  nextItem->offset -= sizeDiff;
    7301  nextItem->size += sizeDiff;
    7302  RegisterFreeSuballocation(nextItem);
    7303  }
    7304  // Next item is not free.
    7305  else
    7306  {
    7307  // Create free item after current one.
    7308  VmaSuballocation newFreeSuballoc;
    7309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7310  newFreeSuballoc.offset = suballoc.offset + newSize;
    7311  newFreeSuballoc.size = sizeDiff;
    7312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7314  RegisterFreeSuballocation(newFreeSuballocIt);
    7315 
    7316  ++m_FreeCount;
    7317  }
    7318  }
    7319  // This is the last item.
    7320  else
    7321  {
    7322  // Create free item at the end.
    7323  VmaSuballocation newFreeSuballoc;
    7324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7325  newFreeSuballoc.offset = suballoc.offset + newSize;
    7326  newFreeSuballoc.size = sizeDiff;
    7327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7328  m_Suballocations.push_back(newFreeSuballoc);
    7329 
    7330  iter_type newFreeSuballocIt = m_Suballocations.end();
    7331  RegisterFreeSuballocation(--newFreeSuballocIt);
    7332 
    7333  ++m_FreeCount;
    7334  }
    7335 
    7336  suballoc.size = newSize;
    7337  m_SumFreeSize += sizeDiff;
    7338  }
    7339  // Growing.
    7340  else
    7341  {
    7342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7343 
    7344  // There is next item.
    7345  if(nextItem != m_Suballocations.end())
    7346  {
    7347  // Next item is free.
    7348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7349  {
    7350  // There is not enough free space, including margin.
    7351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7352  {
    7353  return false;
    7354  }
    7355 
    7356  // There is more free space than required.
    7357  if(nextItem->size > sizeDiff)
    7358  {
    7359  // Move and shrink this next item.
    7360  UnregisterFreeSuballocation(nextItem);
    7361  nextItem->offset += sizeDiff;
    7362  nextItem->size -= sizeDiff;
    7363  RegisterFreeSuballocation(nextItem);
    7364  }
    7365  // There is exactly the amount of free space required.
    7366  else
    7367  {
    7368  // Remove this next free item.
    7369  UnregisterFreeSuballocation(nextItem);
    7370  m_Suballocations.erase(nextItem);
    7371  --m_FreeCount;
    7372  }
    7373  }
    7374  // Next item is not free - there is no space to grow.
    7375  else
    7376  {
    7377  return false;
    7378  }
    7379  }
    7380  // This is the last item - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385 
    7386  suballoc.size = newSize;
    7387  m_SumFreeSize -= sizeDiff;
    7388  }
    7389 
    7390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7391  return true;
    7392  }
    7393  }
    7394  VMA_ASSERT(0 && "Not found!");
    7395  return false;
    7396 }
    7397 
    7398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7399 {
    7400  VkDeviceSize lastSize = 0;
    7401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7402  {
    7403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7404 
    7405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7407  VMA_VALIDATE(it->size >= lastSize);
    7408  lastSize = it->size;
    7409  }
    7410  return true;
    7411 }
    7412 
    7413 bool VmaBlockMetadata_Generic::CheckAllocation(
    7414  uint32_t currentFrameIndex,
    7415  uint32_t frameInUseCount,
    7416  VkDeviceSize bufferImageGranularity,
    7417  VkDeviceSize allocSize,
    7418  VkDeviceSize allocAlignment,
    7419  VmaSuballocationType allocType,
    7420  VmaSuballocationList::const_iterator suballocItem,
    7421  bool canMakeOtherLost,
    7422  VkDeviceSize* pOffset,
    7423  size_t* itemsToMakeLostCount,
    7424  VkDeviceSize* pSumFreeSize,
    7425  VkDeviceSize* pSumItemSize) const
    7426 {
    7427  VMA_ASSERT(allocSize > 0);
    7428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7430  VMA_ASSERT(pOffset != VMA_NULL);
    7431 
    7432  *itemsToMakeLostCount = 0;
    7433  *pSumFreeSize = 0;
    7434  *pSumItemSize = 0;
    7435 
    7436  if(canMakeOtherLost)
    7437  {
    7438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7439  {
    7440  *pSumFreeSize = suballocItem->size;
    7441  }
    7442  else
    7443  {
    7444  if(suballocItem->hAllocation->CanBecomeLost() &&
    7445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7446  {
    7447  ++*itemsToMakeLostCount;
    7448  *pSumItemSize = suballocItem->size;
    7449  }
    7450  else
    7451  {
    7452  return false;
    7453  }
    7454  }
    7455 
    7456  // Remaining size is too small for this request: Early return.
    7457  if(GetSize() - suballocItem->offset < allocSize)
    7458  {
    7459  return false;
    7460  }
    7461 
    7462  // Start from offset equal to beginning of this suballocation.
    7463  *pOffset = suballocItem->offset;
    7464 
    7465  // Apply VMA_DEBUG_MARGIN at the beginning.
    7466  if(VMA_DEBUG_MARGIN > 0)
    7467  {
    7468  *pOffset += VMA_DEBUG_MARGIN;
    7469  }
    7470 
    7471  // Apply alignment.
    7472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7473 
    7474  // Check previous suballocations for BufferImageGranularity conflicts.
    7475  // Make bigger alignment if necessary.
    7476  if(bufferImageGranularity > 1)
    7477  {
    7478  bool bufferImageGranularityConflict = false;
    7479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7480  while(prevSuballocItem != m_Suballocations.cbegin())
    7481  {
    7482  --prevSuballocItem;
    7483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7485  {
    7486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7487  {
    7488  bufferImageGranularityConflict = true;
    7489  break;
    7490  }
    7491  }
    7492  else
    7493  // Already on previous page.
    7494  break;
    7495  }
    7496  if(bufferImageGranularityConflict)
    7497  {
    7498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7499  }
    7500  }
    7501 
    7502  // Now that we have final *pOffset, check if we are past suballocItem.
    7503  // If yes, return false - this function should be called for another suballocItem as starting point.
    7504  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7505  {
    7506  return false;
    7507  }
    7508 
    7509  // Calculate padding at the beginning based on current offset.
    7510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7511 
    7512  // Calculate required margin at the end.
    7513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7514 
    7515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7516  // Another early return check.
    7517  if(suballocItem->offset + totalSize > GetSize())
    7518  {
    7519  return false;
    7520  }
    7521 
    7522  // Advance lastSuballocItem until desired size is reached.
    7523  // Update itemsToMakeLostCount.
    7524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7525  if(totalSize > suballocItem->size)
    7526  {
    7527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7528  while(remainingSize > 0)
    7529  {
    7530  ++lastSuballocItem;
    7531  if(lastSuballocItem == m_Suballocations.cend())
    7532  {
    7533  return false;
    7534  }
    7535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7536  {
    7537  *pSumFreeSize += lastSuballocItem->size;
    7538  }
    7539  else
    7540  {
    7541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7544  {
    7545  ++*itemsToMakeLostCount;
    7546  *pSumItemSize += lastSuballocItem->size;
    7547  }
    7548  else
    7549  {
    7550  return false;
    7551  }
    7552  }
    7553  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7554  remainingSize - lastSuballocItem->size : 0;
    7555  }
    7556  }
    7557 
    7558  // Check next suballocations for BufferImageGranularity conflicts.
    7559  // If conflict exists, we must mark more allocations lost or fail.
    7560  if(bufferImageGranularity > 1)
    7561  {
    7562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7563  ++nextSuballocItem;
    7564  while(nextSuballocItem != m_Suballocations.cend())
    7565  {
    7566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7568  {
    7569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7570  {
    7571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7574  {
    7575  ++*itemsToMakeLostCount;
    7576  }
    7577  else
    7578  {
    7579  return false;
    7580  }
    7581  }
    7582  }
    7583  else
    7584  {
    7585  // Already on next page.
    7586  break;
    7587  }
    7588  ++nextSuballocItem;
    7589  }
    7590  }
    7591  }
    7592  else
    7593  {
    7594  const VmaSuballocation& suballoc = *suballocItem;
    7595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7596 
    7597  *pSumFreeSize = suballoc.size;
    7598 
    7599  // Size of this suballocation is too small for this request: Early return.
    7600  if(suballoc.size < allocSize)
    7601  {
    7602  return false;
    7603  }
    7604 
    7605  // Start from offset equal to beginning of this suballocation.
    7606  *pOffset = suballoc.offset;
    7607 
    7608  // Apply VMA_DEBUG_MARGIN at the beginning.
    7609  if(VMA_DEBUG_MARGIN > 0)
    7610  {
    7611  *pOffset += VMA_DEBUG_MARGIN;
    7612  }
    7613 
    7614  // Apply alignment.
    7615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7616 
    7617  // Check previous suballocations for BufferImageGranularity conflicts.
    7618  // Make bigger alignment if necessary.
    7619  if(bufferImageGranularity > 1)
    7620  {
    7621  bool bufferImageGranularityConflict = false;
    7622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7623  while(prevSuballocItem != m_Suballocations.cbegin())
    7624  {
    7625  --prevSuballocItem;
    7626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7628  {
    7629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7630  {
    7631  bufferImageGranularityConflict = true;
    7632  break;
    7633  }
    7634  }
    7635  else
    7636  // Already on previous page.
    7637  break;
    7638  }
    7639  if(bufferImageGranularityConflict)
    7640  {
    7641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7642  }
    7643  }
    7644 
    7645  // Calculate padding at the beginning based on current offset.
    7646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7647 
    7648  // Calculate required margin at the end.
    7649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7650 
    7651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7653  {
    7654  return false;
    7655  }
    7656 
    7657  // Check next suballocations for BufferImageGranularity conflicts.
    7658  // If conflict exists, allocation cannot be made here.
    7659  if(bufferImageGranularity > 1)
    7660  {
    7661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7662  ++nextSuballocItem;
    7663  while(nextSuballocItem != m_Suballocations.cend())
    7664  {
    7665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7667  {
    7668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7669  {
    7670  return false;
    7671  }
    7672  }
    7673  else
    7674  {
    7675  // Already on next page.
    7676  break;
    7677  }
    7678  ++nextSuballocItem;
    7679  }
    7680  }
    7681  }
    7682 
    7683  // All tests passed: Success. pOffset is already filled.
    7684  return true;
    7685 }
    7686 
    7687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7688 {
    7689  VMA_ASSERT(item != m_Suballocations.end());
    7690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VmaSuballocationList::iterator nextItem = item;
    7693  ++nextItem;
    7694  VMA_ASSERT(nextItem != m_Suballocations.end());
    7695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  item->size += nextItem->size;
    7698  --m_FreeCount;
    7699  m_Suballocations.erase(nextItem);
    7700 }
    7701 
    7702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7703 {
    7704  // Change this suballocation to be marked as free.
    7705  VmaSuballocation& suballoc = *suballocItem;
    7706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7707  suballoc.hAllocation = VK_NULL_HANDLE;
    7708 
    7709  // Update totals.
    7710  ++m_FreeCount;
    7711  m_SumFreeSize += suballoc.size;
    7712 
    7713  // Merge with previous and/or next suballocation if it's also free.
    7714  bool mergeWithNext = false;
    7715  bool mergeWithPrev = false;
    7716 
    7717  VmaSuballocationList::iterator nextItem = suballocItem;
    7718  ++nextItem;
    7719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7720  {
    7721  mergeWithNext = true;
    7722  }
    7723 
    7724  VmaSuballocationList::iterator prevItem = suballocItem;
    7725  if(suballocItem != m_Suballocations.begin())
    7726  {
    7727  --prevItem;
    7728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7729  {
    7730  mergeWithPrev = true;
    7731  }
    7732  }
    7733 
    7734  if(mergeWithNext)
    7735  {
    7736  UnregisterFreeSuballocation(nextItem);
    7737  MergeFreeWithNext(suballocItem);
    7738  }
    7739 
    7740  if(mergeWithPrev)
    7741  {
    7742  UnregisterFreeSuballocation(prevItem);
    7743  MergeFreeWithNext(prevItem);
    7744  RegisterFreeSuballocation(prevItem);
    7745  return prevItem;
    7746  }
    7747  else
    7748  {
    7749  RegisterFreeSuballocation(suballocItem);
    7750  return suballocItem;
    7751  }
    7752 }
    7753 
    7754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7755 {
    7756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7757  VMA_ASSERT(item->size > 0);
    7758 
    7759  // You may want to enable this validation at the beginning or at the end of
    7760  // this function, depending on what do you want to check.
    7761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7762 
    7763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7764  {
    7765  if(m_FreeSuballocationsBySize.empty())
    7766  {
    7767  m_FreeSuballocationsBySize.push_back(item);
    7768  }
    7769  else
    7770  {
    7771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7772  }
    7773  }
    7774 
    7775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7776 }
    7777 
    7778 
    7779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7780 {
    7781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7782  VMA_ASSERT(item->size > 0);
    7783 
    7784  // You may want to enable this validation at the beginning or at the end of
    7785  // this function, depending on what do you want to check.
    7786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7787 
    7788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7789  {
    7790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7791  m_FreeSuballocationsBySize.data(),
    7792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7793  item,
    7794  VmaSuballocationItemSizeLess());
    7795  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7796  index < m_FreeSuballocationsBySize.size();
    7797  ++index)
    7798  {
    7799  if(m_FreeSuballocationsBySize[index] == item)
    7800  {
    7801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7802  return;
    7803  }
    7804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7805  }
    7806  VMA_ASSERT(0 && "Not found.");
    7807  }
    7808 
    7809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7810 }
    7811 
    7813 // class VmaBlockMetadata_Linear
    7814 
    7815 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7816  VmaBlockMetadata(hAllocator),
    7817  m_SumFreeSize(0),
    7818  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7819  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7820  m_1stVectorIndex(0),
    7821  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7822  m_1stNullItemsBeginCount(0),
    7823  m_1stNullItemsMiddleCount(0),
    7824  m_2ndNullItemsCount(0)
    7825 {
    7826 }
    7827 
    7828 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7829 {
    7830 }
    7831 
    7832 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7833 {
    7834  VmaBlockMetadata::Init(size);
    7835  m_SumFreeSize = size;
    7836 }
    7837 
    7838 bool VmaBlockMetadata_Linear::Validate() const
    7839 {
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842 
    7843  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7844  VMA_VALIDATE(!suballocations1st.empty() ||
    7845  suballocations2nd.empty() ||
    7846  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7847 
    7848  if(!suballocations1st.empty())
    7849  {
    7850  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7851  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7852  // Null item at the end should be just pop_back().
    7853  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7854  }
    7855  if(!suballocations2nd.empty())
    7856  {
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860 
    7861  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7862  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7863 
    7864  VkDeviceSize sumUsedSize = 0;
    7865  const size_t suballoc1stCount = suballocations1st.size();
    7866  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7867 
    7868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7869  {
    7870  const size_t suballoc2ndCount = suballocations2nd.size();
    7871  size_t nullItem2ndCount = 0;
    7872  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7873  {
    7874  const VmaSuballocation& suballoc = suballocations2nd[i];
    7875  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7876 
    7877  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7878  VMA_VALIDATE(suballoc.offset >= offset);
    7879 
    7880  if(!currFree)
    7881  {
    7882  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7883  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7884  sumUsedSize += suballoc.size;
    7885  }
    7886  else
    7887  {
    7888  ++nullItem2ndCount;
    7889  }
    7890 
    7891  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7892  }
    7893 
    7894  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7895  }
    7896 
    7897  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7898  {
    7899  const VmaSuballocation& suballoc = suballocations1st[i];
    7900  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7901  suballoc.hAllocation == VK_NULL_HANDLE);
    7902  }
    7903 
    7904  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7905 
    7906  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7907  {
    7908  const VmaSuballocation& suballoc = suballocations1st[i];
    7909  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7910 
    7911  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7912  VMA_VALIDATE(suballoc.offset >= offset);
    7913  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7914 
    7915  if(!currFree)
    7916  {
    7917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7919  sumUsedSize += suballoc.size;
    7920  }
    7921  else
    7922  {
    7923  ++nullItem1stCount;
    7924  }
    7925 
    7926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7927  }
    7928  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7929 
    7930  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7931  {
    7932  const size_t suballoc2ndCount = suballocations2nd.size();
    7933  size_t nullItem2ndCount = 0;
    7934  for(size_t i = suballoc2ndCount; i--; )
    7935  {
    7936  const VmaSuballocation& suballoc = suballocations2nd[i];
    7937  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7938 
    7939  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7940  VMA_VALIDATE(suballoc.offset >= offset);
    7941 
    7942  if(!currFree)
    7943  {
    7944  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7945  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7946  sumUsedSize += suballoc.size;
    7947  }
    7948  else
    7949  {
    7950  ++nullItem2ndCount;
    7951  }
    7952 
    7953  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7954  }
    7955 
    7956  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7957  }
    7958 
    7959  VMA_VALIDATE(offset <= GetSize());
    7960  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7961 
    7962  return true;
    7963 }
    7964 
    7965 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7966 {
    7967  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7968  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7969 }
    7970 
    7971 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7972 {
    7973  const VkDeviceSize size = GetSize();
    7974 
    7975  /*
    7976  We don't consider gaps inside allocation vectors with freed allocations because
    7977  they are not suitable for reuse in linear allocator. We consider only space that
    7978  is available for new allocations.
    7979  */
    7980  if(IsEmpty())
    7981  {
    7982  return size;
    7983  }
    7984 
    7985  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7986 
    7987  switch(m_2ndVectorMode)
    7988  {
    7989  case SECOND_VECTOR_EMPTY:
    7990  /*
    7991  Available space is after end of 1st, as well as before beginning of 1st (which
    7992  whould make it a ring buffer).
    7993  */
    7994  {
    7995  const size_t suballocations1stCount = suballocations1st.size();
    7996  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7997  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7998  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7999  return VMA_MAX(
    8000  firstSuballoc.offset,
    8001  size - (lastSuballoc.offset + lastSuballoc.size));
    8002  }
    8003  break;
    8004 
    8005  case SECOND_VECTOR_RING_BUFFER:
    8006  /*
    8007  Available space is only between end of 2nd and beginning of 1st.
    8008  */
    8009  {
    8010  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8011  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8012  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8013  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8014  }
    8015  break;
    8016 
    8017  case SECOND_VECTOR_DOUBLE_STACK:
    8018  /*
    8019  Available space is only between end of 1st and top of 2nd.
    8020  */
    8021  {
    8022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8023  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8024  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8025  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8026  }
    8027  break;
    8028 
    8029  default:
    8030  VMA_ASSERT(0);
    8031  return 0;
    8032  }
    8033 }
    8034 
    8035 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8036 {
    8037  const VkDeviceSize size = GetSize();
    8038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8039  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8040  const size_t suballoc1stCount = suballocations1st.size();
    8041  const size_t suballoc2ndCount = suballocations2nd.size();
    8042 
    8043  outInfo.blockCount = 1;
    8044  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8045  outInfo.unusedRangeCount = 0;
    8046  outInfo.usedBytes = 0;
    8047  outInfo.allocationSizeMin = UINT64_MAX;
    8048  outInfo.allocationSizeMax = 0;
    8049  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8050  outInfo.unusedRangeSizeMax = 0;
    8051 
    8052  VkDeviceSize lastOffset = 0;
    8053 
    8054  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8055  {
    8056  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8057  size_t nextAlloc2ndIndex = 0;
    8058  while(lastOffset < freeSpace2ndTo1stEnd)
    8059  {
    8060  // Find next non-null allocation or move nextAllocIndex to the end.
    8061  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8062  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8063  {
    8064  ++nextAlloc2ndIndex;
    8065  }
    8066 
    8067  // Found non-null allocation.
    8068  if(nextAlloc2ndIndex < suballoc2ndCount)
    8069  {
    8070  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8071 
    8072  // 1. Process free space before this allocation.
    8073  if(lastOffset < suballoc.offset)
    8074  {
    8075  // There is free space from lastOffset to suballoc.offset.
    8076  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8077  ++outInfo.unusedRangeCount;
    8078  outInfo.unusedBytes += unusedRangeSize;
    8079  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8080  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  outInfo.usedBytes += suballoc.size;
    8086  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8087  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8088 
    8089  // 3. Prepare for next iteration.
    8090  lastOffset = suballoc.offset + suballoc.size;
    8091  ++nextAlloc2ndIndex;
    8092  }
    8093  // We are at the end.
    8094  else
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  if(lastOffset < freeSpace2ndTo1stEnd)
    8098  {
    8099  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8100  ++outInfo.unusedRangeCount;
    8101  outInfo.unusedBytes += unusedRangeSize;
    8102  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8103  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8104  }
    8105 
    8106  // End of loop.
    8107  lastOffset = freeSpace2ndTo1stEnd;
    8108  }
    8109  }
    8110  }
    8111 
    8112  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8113  const VkDeviceSize freeSpace1stTo2ndEnd =
    8114  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8115  while(lastOffset < freeSpace1stTo2ndEnd)
    8116  {
    8117  // Find next non-null allocation or move nextAllocIndex to the end.
    8118  while(nextAlloc1stIndex < suballoc1stCount &&
    8119  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8120  {
    8121  ++nextAlloc1stIndex;
    8122  }
    8123 
    8124  // Found non-null allocation.
    8125  if(nextAlloc1stIndex < suballoc1stCount)
    8126  {
    8127  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8128 
    8129  // 1. Process free space before this allocation.
    8130  if(lastOffset < suballoc.offset)
    8131  {
    8132  // There is free space from lastOffset to suballoc.offset.
    8133  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8134  ++outInfo.unusedRangeCount;
    8135  outInfo.unusedBytes += unusedRangeSize;
    8136  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8137  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8138  }
    8139 
    8140  // 2. Process this allocation.
    8141  // There is allocation with suballoc.offset, suballoc.size.
    8142  outInfo.usedBytes += suballoc.size;
    8143  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8144  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8145 
    8146  // 3. Prepare for next iteration.
    8147  lastOffset = suballoc.offset + suballoc.size;
    8148  ++nextAlloc1stIndex;
    8149  }
    8150  // We are at the end.
    8151  else
    8152  {
    8153  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8154  if(lastOffset < freeSpace1stTo2ndEnd)
    8155  {
    8156  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8157  ++outInfo.unusedRangeCount;
    8158  outInfo.unusedBytes += unusedRangeSize;
    8159  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8160  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8161  }
    8162 
    8163  // End of loop.
    8164  lastOffset = freeSpace1stTo2ndEnd;
    8165  }
    8166  }
    8167 
    8168  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8169  {
    8170  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8171  while(lastOffset < size)
    8172  {
    8173  // Find next non-null allocation or move nextAllocIndex to the end.
    8174  while(nextAlloc2ndIndex != SIZE_MAX &&
    8175  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8176  {
    8177  --nextAlloc2ndIndex;
    8178  }
    8179 
    8180  // Found non-null allocation.
    8181  if(nextAlloc2ndIndex != SIZE_MAX)
    8182  {
    8183  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8184 
    8185  // 1. Process free space before this allocation.
    8186  if(lastOffset < suballoc.offset)
    8187  {
    8188  // There is free space from lastOffset to suballoc.offset.
    8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8190  ++outInfo.unusedRangeCount;
    8191  outInfo.unusedBytes += unusedRangeSize;
    8192  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8193  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8194  }
    8195 
    8196  // 2. Process this allocation.
    8197  // There is allocation with suballoc.offset, suballoc.size.
    8198  outInfo.usedBytes += suballoc.size;
    8199  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8200  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8201 
    8202  // 3. Prepare for next iteration.
    8203  lastOffset = suballoc.offset + suballoc.size;
    8204  --nextAlloc2ndIndex;
    8205  }
    8206  // We are at the end.
    8207  else
    8208  {
    8209  // There is free space from lastOffset to size.
    8210  if(lastOffset < size)
    8211  {
    8212  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8213  ++outInfo.unusedRangeCount;
    8214  outInfo.unusedBytes += unusedRangeSize;
    8215  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8216  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8217  }
    8218 
    8219  // End of loop.
    8220  lastOffset = size;
    8221  }
    8222  }
    8223  }
    8224 
    8225  outInfo.unusedBytes = size - outInfo.usedBytes;
    8226 }
    8227 
    8228 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8229 {
    8230  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8231  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8232  const VkDeviceSize size = GetSize();
    8233  const size_t suballoc1stCount = suballocations1st.size();
    8234  const size_t suballoc2ndCount = suballocations2nd.size();
    8235 
    8236  inoutStats.size += size;
    8237 
    8238  VkDeviceSize lastOffset = 0;
    8239 
    8240  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8241  {
    8242  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8243  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8244  while(lastOffset < freeSpace2ndTo1stEnd)
    8245  {
    8246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8247  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8249  {
    8250  ++nextAlloc2ndIndex;
    8251  }
    8252 
    8253  // Found non-null allocation.
    8254  if(nextAlloc2ndIndex < suballoc2ndCount)
    8255  {
    8256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8257 
    8258  // 1. Process free space before this allocation.
    8259  if(lastOffset < suballoc.offset)
    8260  {
    8261  // There is free space from lastOffset to suballoc.offset.
    8262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8263  inoutStats.unusedSize += unusedRangeSize;
    8264  ++inoutStats.unusedRangeCount;
    8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8266  }
    8267 
    8268  // 2. Process this allocation.
    8269  // There is allocation with suballoc.offset, suballoc.size.
    8270  ++inoutStats.allocationCount;
    8271 
    8272  // 3. Prepare for next iteration.
    8273  lastOffset = suballoc.offset + suballoc.size;
    8274  ++nextAlloc2ndIndex;
    8275  }
    8276  // We are at the end.
    8277  else
    8278  {
    8279  if(lastOffset < freeSpace2ndTo1stEnd)
    8280  {
    8281  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8282  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8283  inoutStats.unusedSize += unusedRangeSize;
    8284  ++inoutStats.unusedRangeCount;
    8285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8286  }
    8287 
    8288  // End of loop.
    8289  lastOffset = freeSpace2ndTo1stEnd;
    8290  }
    8291  }
    8292  }
    8293 
    8294  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8295  const VkDeviceSize freeSpace1stTo2ndEnd =
    8296  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8297  while(lastOffset < freeSpace1stTo2ndEnd)
    8298  {
    8299  // Find next non-null allocation or move nextAllocIndex to the end.
    8300  while(nextAlloc1stIndex < suballoc1stCount &&
    8301  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8302  {
    8303  ++nextAlloc1stIndex;
    8304  }
    8305 
    8306  // Found non-null allocation.
    8307  if(nextAlloc1stIndex < suballoc1stCount)
    8308  {
    8309  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8310 
    8311  // 1. Process free space before this allocation.
    8312  if(lastOffset < suballoc.offset)
    8313  {
    8314  // There is free space from lastOffset to suballoc.offset.
    8315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8316  inoutStats.unusedSize += unusedRangeSize;
    8317  ++inoutStats.unusedRangeCount;
    8318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8319  }
    8320 
    8321  // 2. Process this allocation.
    8322  // There is allocation with suballoc.offset, suballoc.size.
    8323  ++inoutStats.allocationCount;
    8324 
    8325  // 3. Prepare for next iteration.
    8326  lastOffset = suballoc.offset + suballoc.size;
    8327  ++nextAlloc1stIndex;
    8328  }
    8329  // We are at the end.
    8330  else
    8331  {
    8332  if(lastOffset < freeSpace1stTo2ndEnd)
    8333  {
    8334  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8335  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8336  inoutStats.unusedSize += unusedRangeSize;
    8337  ++inoutStats.unusedRangeCount;
    8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8339  }
    8340 
    8341  // End of loop.
    8342  lastOffset = freeSpace1stTo2ndEnd;
    8343  }
    8344  }
    8345 
    8346  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8347  {
    8348  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8349  while(lastOffset < size)
    8350  {
    8351  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8352  while(nextAlloc2ndIndex != SIZE_MAX &&
    8353  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8354  {
    8355  --nextAlloc2ndIndex;
    8356  }
    8357 
    8358  // Found non-null allocation.
    8359  if(nextAlloc2ndIndex != SIZE_MAX)
    8360  {
    8361  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8362 
    8363  // 1. Process free space before this allocation.
    8364  if(lastOffset < suballoc.offset)
    8365  {
    8366  // There is free space from lastOffset to suballoc.offset.
    8367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8368  inoutStats.unusedSize += unusedRangeSize;
    8369  ++inoutStats.unusedRangeCount;
    8370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8371  }
    8372 
    8373  // 2. Process this allocation.
    8374  // There is allocation with suballoc.offset, suballoc.size.
    8375  ++inoutStats.allocationCount;
    8376 
    8377  // 3. Prepare for next iteration.
    8378  lastOffset = suballoc.offset + suballoc.size;
    8379  --nextAlloc2ndIndex;
    8380  }
    8381  // We are at the end.
    8382  else
    8383  {
    8384  if(lastOffset < size)
    8385  {
    8386  // There is free space from lastOffset to size.
    8387  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8388  inoutStats.unusedSize += unusedRangeSize;
    8389  ++inoutStats.unusedRangeCount;
    8390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8391  }
    8392 
    8393  // End of loop.
    8394  lastOffset = size;
    8395  }
    8396  }
    8397  }
    8398 }
    8399 
    8400 #if VMA_STATS_STRING_ENABLED
    8401 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8402 {
    8403  const VkDeviceSize size = GetSize();
    8404  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8405  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8406  const size_t suballoc1stCount = suballocations1st.size();
    8407  const size_t suballoc2ndCount = suballocations2nd.size();
    8408 
    8409  // FIRST PASS
    8410 
    8411  size_t unusedRangeCount = 0;
    8412  VkDeviceSize usedBytes = 0;
    8413 
    8414  VkDeviceSize lastOffset = 0;
    8415 
    8416  size_t alloc2ndCount = 0;
    8417  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8418  {
    8419  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8420  size_t nextAlloc2ndIndex = 0;
    8421  while(lastOffset < freeSpace2ndTo1stEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8424  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8425  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc2ndIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc2ndIndex < suballoc2ndCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  ++unusedRangeCount;
    8440  }
    8441 
    8442  // 2. Process this allocation.
    8443  // There is allocation with suballoc.offset, suballoc.size.
    8444  ++alloc2ndCount;
    8445  usedBytes += suballoc.size;
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc2ndIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace2ndTo1stEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8457  ++unusedRangeCount;
    8458  }
    8459 
    8460  // End of loop.
    8461  lastOffset = freeSpace2ndTo1stEnd;
    8462  }
    8463  }
    8464  }
    8465 
    8466  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8467  size_t alloc1stCount = 0;
    8468  const VkDeviceSize freeSpace1stTo2ndEnd =
    8469  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8470  while(lastOffset < freeSpace1stTo2ndEnd)
    8471  {
    8472  // Find next non-null allocation or move nextAllocIndex to the end.
    8473  while(nextAlloc1stIndex < suballoc1stCount &&
    8474  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8475  {
    8476  ++nextAlloc1stIndex;
    8477  }
    8478 
    8479  // Found non-null allocation.
    8480  if(nextAlloc1stIndex < suballoc1stCount)
    8481  {
    8482  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8483 
    8484  // 1. Process free space before this allocation.
    8485  if(lastOffset < suballoc.offset)
    8486  {
    8487  // There is free space from lastOffset to suballoc.offset.
    8488  ++unusedRangeCount;
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  ++alloc1stCount;
    8494  usedBytes += suballoc.size;
    8495 
    8496  // 3. Prepare for next iteration.
    8497  lastOffset = suballoc.offset + suballoc.size;
    8498  ++nextAlloc1stIndex;
    8499  }
    8500  // We are at the end.
    8501  else
    8502  {
    8503  if(lastOffset < size)
    8504  {
    8505  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8506  ++unusedRangeCount;
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = freeSpace1stTo2ndEnd;
    8511  }
    8512  }
    8513 
    8514  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8515  {
    8516  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8517  while(lastOffset < size)
    8518  {
    8519  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8520  while(nextAlloc2ndIndex != SIZE_MAX &&
    8521  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8522  {
    8523  --nextAlloc2ndIndex;
    8524  }
    8525 
    8526  // Found non-null allocation.
    8527  if(nextAlloc2ndIndex != SIZE_MAX)
    8528  {
    8529  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8530 
    8531  // 1. Process free space before this allocation.
    8532  if(lastOffset < suballoc.offset)
    8533  {
    8534  // There is free space from lastOffset to suballoc.offset.
    8535  ++unusedRangeCount;
    8536  }
    8537 
    8538  // 2. Process this allocation.
    8539  // There is allocation with suballoc.offset, suballoc.size.
    8540  ++alloc2ndCount;
    8541  usedBytes += suballoc.size;
    8542 
    8543  // 3. Prepare for next iteration.
    8544  lastOffset = suballoc.offset + suballoc.size;
    8545  --nextAlloc2ndIndex;
    8546  }
    8547  // We are at the end.
    8548  else
    8549  {
    8550  if(lastOffset < size)
    8551  {
    8552  // There is free space from lastOffset to size.
    8553  ++unusedRangeCount;
    8554  }
    8555 
    8556  // End of loop.
    8557  lastOffset = size;
    8558  }
    8559  }
    8560  }
    8561 
    8562  const VkDeviceSize unusedBytes = size - usedBytes;
    8563  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8564 
    8565  // SECOND PASS
    8566  lastOffset = 0;
    8567 
    8568  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8569  {
    8570  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8571  size_t nextAlloc2ndIndex = 0;
    8572  while(lastOffset < freeSpace2ndTo1stEnd)
    8573  {
    8574  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8575  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8576  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8577  {
    8578  ++nextAlloc2ndIndex;
    8579  }
    8580 
    8581  // Found non-null allocation.
    8582  if(nextAlloc2ndIndex < suballoc2ndCount)
    8583  {
    8584  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8585 
    8586  // 1. Process free space before this allocation.
    8587  if(lastOffset < suballoc.offset)
    8588  {
    8589  // There is free space from lastOffset to suballoc.offset.
    8590  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8592  }
    8593 
    8594  // 2. Process this allocation.
    8595  // There is allocation with suballoc.offset, suballoc.size.
    8596  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8597 
    8598  // 3. Prepare for next iteration.
    8599  lastOffset = suballoc.offset + suballoc.size;
    8600  ++nextAlloc2ndIndex;
    8601  }
    8602  // We are at the end.
    8603  else
    8604  {
    8605  if(lastOffset < freeSpace2ndTo1stEnd)
    8606  {
    8607  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8608  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8609  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8610  }
    8611 
    8612  // End of loop.
    8613  lastOffset = freeSpace2ndTo1stEnd;
    8614  }
    8615  }
    8616  }
    8617 
    8618  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8619  while(lastOffset < freeSpace1stTo2ndEnd)
    8620  {
    8621  // Find next non-null allocation or move nextAllocIndex to the end.
    8622  while(nextAlloc1stIndex < suballoc1stCount &&
    8623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8624  {
    8625  ++nextAlloc1stIndex;
    8626  }
    8627 
    8628  // Found non-null allocation.
    8629  if(nextAlloc1stIndex < suballoc1stCount)
    8630  {
    8631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8632 
    8633  // 1. Process free space before this allocation.
    8634  if(lastOffset < suballoc.offset)
    8635  {
    8636  // There is free space from lastOffset to suballoc.offset.
    8637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8639  }
    8640 
    8641  // 2. Process this allocation.
    8642  // There is allocation with suballoc.offset, suballoc.size.
    8643  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8644 
    8645  // 3. Prepare for next iteration.
    8646  lastOffset = suballoc.offset + suballoc.size;
    8647  ++nextAlloc1stIndex;
    8648  }
    8649  // We are at the end.
    8650  else
    8651  {
    8652  if(lastOffset < freeSpace1stTo2ndEnd)
    8653  {
    8654  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8655  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8656  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8657  }
    8658 
    8659  // End of loop.
    8660  lastOffset = freeSpace1stTo2ndEnd;
    8661  }
    8662  }
    8663 
    8664  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8665  {
    8666  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8667  while(lastOffset < size)
    8668  {
    8669  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8670  while(nextAlloc2ndIndex != SIZE_MAX &&
    8671  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8672  {
    8673  --nextAlloc2ndIndex;
    8674  }
    8675 
    8676  // Found non-null allocation.
    8677  if(nextAlloc2ndIndex != SIZE_MAX)
    8678  {
    8679  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8680 
    8681  // 1. Process free space before this allocation.
    8682  if(lastOffset < suballoc.offset)
    8683  {
    8684  // There is free space from lastOffset to suballoc.offset.
    8685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8687  }
    8688 
    8689  // 2. Process this allocation.
    8690  // There is allocation with suballoc.offset, suballoc.size.
    8691  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8692 
    8693  // 3. Prepare for next iteration.
    8694  lastOffset = suballoc.offset + suballoc.size;
    8695  --nextAlloc2ndIndex;
    8696  }
    8697  // We are at the end.
    8698  else
    8699  {
    8700  if(lastOffset < size)
    8701  {
    8702  // There is free space from lastOffset to size.
    8703  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8704  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8705  }
    8706 
    8707  // End of loop.
    8708  lastOffset = size;
    8709  }
    8710  }
    8711  }
    8712 
    8713  PrintDetailedMap_End(json);
    8714 }
    8715 #endif // #if VMA_STATS_STRING_ENABLED
    8716 
    8717 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8718  uint32_t currentFrameIndex,
    8719  uint32_t frameInUseCount,
    8720  VkDeviceSize bufferImageGranularity,
    8721  VkDeviceSize allocSize,
    8722  VkDeviceSize allocAlignment,
    8723  bool upperAddress,
    8724  VmaSuballocationType allocType,
    8725  bool canMakeOtherLost,
    8726  uint32_t strategy,
    8727  VmaAllocationRequest* pAllocationRequest)
    8728 {
    8729  VMA_ASSERT(allocSize > 0);
    8730  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8731  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8732  VMA_HEAVY_ASSERT(Validate());
    8733 
    8734  const VkDeviceSize size = GetSize();
    8735  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8736  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8737 
    8738  if(upperAddress)
    8739  {
    8740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8741  {
    8742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8743  return false;
    8744  }
    8745 
    8746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8747  if(allocSize > size)
    8748  {
    8749  return false;
    8750  }
    8751  VkDeviceSize resultBaseOffset = size - allocSize;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset - allocSize;
    8756  if(allocSize > lastSuballoc.offset)
    8757  {
    8758  return false;
    8759  }
    8760  }
    8761 
    8762  // Start from offset equal to end of free space.
    8763  VkDeviceSize resultOffset = resultBaseOffset;
    8764 
    8765  // Apply VMA_DEBUG_MARGIN at the end.
    8766  if(VMA_DEBUG_MARGIN > 0)
    8767  {
    8768  if(resultOffset < VMA_DEBUG_MARGIN)
    8769  {
    8770  return false;
    8771  }
    8772  resultOffset -= VMA_DEBUG_MARGIN;
    8773  }
    8774 
    8775  // Apply alignment.
    8776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8777 
    8778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8779  // Make bigger alignment if necessary.
    8780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8781  {
    8782  bool bufferImageGranularityConflict = false;
    8783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8784  {
    8785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8787  {
    8788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8789  {
    8790  bufferImageGranularityConflict = true;
    8791  break;
    8792  }
    8793  }
    8794  else
    8795  // Already on previous page.
    8796  break;
    8797  }
    8798  if(bufferImageGranularityConflict)
    8799  {
    8800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8801  }
    8802  }
    8803 
    8804  // There is enough free space.
    8805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8806  suballocations1st.back().offset + suballocations1st.back().size :
    8807  0;
    8808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8809  {
    8810  // Check previous suballocations for BufferImageGranularity conflicts.
    8811  // If conflict exists, allocation cannot be made here.
    8812  if(bufferImageGranularity > 1)
    8813  {
    8814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8815  {
    8816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8818  {
    8819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8820  {
    8821  return false;
    8822  }
    8823  }
    8824  else
    8825  {
    8826  // Already on next page.
    8827  break;
    8828  }
    8829  }
    8830  }
    8831 
    8832  // All tests passed: Success.
    8833  pAllocationRequest->offset = resultOffset;
    8834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8835  pAllocationRequest->sumItemSize = 0;
    8836  // pAllocationRequest->item unused.
    8837  pAllocationRequest->itemsToMakeLostCount = 0;
    8838  return true;
    8839  }
    8840  }
    8841  else // !upperAddress
    8842  {
    8843  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8844  {
    8845  // Try to allocate at the end of 1st vector.
    8846 
    8847  VkDeviceSize resultBaseOffset = 0;
    8848  if(!suballocations1st.empty())
    8849  {
    8850  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8851  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8852  }
    8853 
    8854  // Start from offset equal to beginning of free space.
    8855  VkDeviceSize resultOffset = resultBaseOffset;
    8856 
    8857  // Apply VMA_DEBUG_MARGIN at the beginning.
    8858  if(VMA_DEBUG_MARGIN > 0)
    8859  {
    8860  resultOffset += VMA_DEBUG_MARGIN;
    8861  }
    8862 
    8863  // Apply alignment.
    8864  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8865 
    8866  // Check previous suballocations for BufferImageGranularity conflicts.
    8867  // Make bigger alignment if necessary.
    8868  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8869  {
    8870  bool bufferImageGranularityConflict = false;
    8871  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8872  {
    8873  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8874  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8875  {
    8876  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8877  {
    8878  bufferImageGranularityConflict = true;
    8879  break;
    8880  }
    8881  }
    8882  else
    8883  // Already on previous page.
    8884  break;
    8885  }
    8886  if(bufferImageGranularityConflict)
    8887  {
    8888  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8889  }
    8890  }
    8891 
    8892  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8893  suballocations2nd.back().offset : size;
    8894 
    8895  // There is enough free space at the end after alignment.
    8896  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8897  {
    8898  // Check next suballocations for BufferImageGranularity conflicts.
    8899  // If conflict exists, allocation cannot be made here.
    8900  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8901  {
    8902  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8903  {
    8904  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8905  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8906  {
    8907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8908  {
    8909  return false;
    8910  }
    8911  }
    8912  else
    8913  {
    8914  // Already on previous page.
    8915  break;
    8916  }
    8917  }
    8918  }
    8919 
    8920  // All tests passed: Success.
    8921  pAllocationRequest->offset = resultOffset;
    8922  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8923  pAllocationRequest->sumItemSize = 0;
    8924  // pAllocationRequest->item unused.
    8925  pAllocationRequest->itemsToMakeLostCount = 0;
    8926  return true;
    8927  }
    8928  }
    8929 
    8930  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8931  // beginning of 1st vector as the end of free space.
    8932  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8933  {
    8934  VMA_ASSERT(!suballocations1st.empty());
    8935 
    8936  VkDeviceSize resultBaseOffset = 0;
    8937  if(!suballocations2nd.empty())
    8938  {
    8939  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8940  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8941  }
    8942 
    8943  // Start from offset equal to beginning of free space.
    8944  VkDeviceSize resultOffset = resultBaseOffset;
    8945 
    8946  // Apply VMA_DEBUG_MARGIN at the beginning.
    8947  if(VMA_DEBUG_MARGIN > 0)
    8948  {
    8949  resultOffset += VMA_DEBUG_MARGIN;
    8950  }
    8951 
    8952  // Apply alignment.
    8953  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8954 
    8955  // Check previous suballocations for BufferImageGranularity conflicts.
    8956  // Make bigger alignment if necessary.
    8957  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8958  {
    8959  bool bufferImageGranularityConflict = false;
    8960  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8961  {
    8962  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8963  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8964  {
    8965  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8966  {
    8967  bufferImageGranularityConflict = true;
    8968  break;
    8969  }
    8970  }
    8971  else
    8972  // Already on previous page.
    8973  break;
    8974  }
    8975  if(bufferImageGranularityConflict)
    8976  {
    8977  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8978  }
    8979  }
    8980 
    8981  pAllocationRequest->itemsToMakeLostCount = 0;
    8982  pAllocationRequest->sumItemSize = 0;
    8983  size_t index1st = m_1stNullItemsBeginCount;
    8984 
    8985  if(canMakeOtherLost)
    8986  {
    8987  while(index1st < suballocations1st.size() &&
    8988  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8989  {
    8990  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8991  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8992  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8993  {
    8994  // No problem.
    8995  }
    8996  else
    8997  {
    8998  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8999  if(suballoc.hAllocation->CanBecomeLost() &&
    9000  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9001  {
    9002  ++pAllocationRequest->itemsToMakeLostCount;
    9003  pAllocationRequest->sumItemSize += suballoc.size;
    9004  }
    9005  else
    9006  {
    9007  return false;
    9008  }
    9009  }
    9010  ++index1st;
    9011  }
    9012 
    9013  // Check next suballocations for BufferImageGranularity conflicts.
    9014  // If conflict exists, we must mark more allocations lost or fail.
    9015  if(bufferImageGranularity > 1)
    9016  {
    9017  while(index1st < suballocations1st.size())
    9018  {
    9019  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9020  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9021  {
    9022  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9023  {
    9024  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9025  if(suballoc.hAllocation->CanBecomeLost() &&
    9026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9027  {
    9028  ++pAllocationRequest->itemsToMakeLostCount;
    9029  pAllocationRequest->sumItemSize += suballoc.size;
    9030  }
    9031  else
    9032  {
    9033  return false;
    9034  }
    9035  }
    9036  }
    9037  else
    9038  {
    9039  // Already on next page.
    9040  break;
    9041  }
    9042  ++index1st;
    9043  }
    9044  }
    9045  }
    9046 
    9047  // There is enough free space at the end after alignment.
    9048  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9049  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9050  {
    9051  // Check next suballocations for BufferImageGranularity conflicts.
    9052  // If conflict exists, allocation cannot be made here.
    9053  if(bufferImageGranularity > 1)
    9054  {
    9055  for(size_t nextSuballocIndex = index1st;
    9056  nextSuballocIndex < suballocations1st.size();
    9057  nextSuballocIndex++)
    9058  {
    9059  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9060  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9061  {
    9062  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9063  {
    9064  return false;
    9065  }
    9066  }
    9067  else
    9068  {
    9069  // Already on next page.
    9070  break;
    9071  }
    9072  }
    9073  }
    9074 
    9075  // All tests passed: Success.
    9076  pAllocationRequest->offset = resultOffset;
    9077  pAllocationRequest->sumFreeSize =
    9078  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9079  - resultBaseOffset
    9080  - pAllocationRequest->sumItemSize;
    9081  // pAllocationRequest->item unused.
    9082  return true;
    9083  }
    9084  }
    9085  }
    9086 
    9087  return false;
    9088 }
    9089 
    9090 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9091  uint32_t currentFrameIndex,
    9092  uint32_t frameInUseCount,
    9093  VmaAllocationRequest* pAllocationRequest)
    9094 {
    9095  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9096  {
    9097  return true;
    9098  }
    9099 
    9100  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9101 
    9102  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9103  size_t index1st = m_1stNullItemsBeginCount;
    9104  size_t madeLostCount = 0;
    9105  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9106  {
    9107  VMA_ASSERT(index1st < suballocations1st.size());
    9108  VmaSuballocation& suballoc = suballocations1st[index1st];
    9109  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9110  {
    9111  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9112  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9113  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9114  {
    9115  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9116  suballoc.hAllocation = VK_NULL_HANDLE;
    9117  m_SumFreeSize += suballoc.size;
    9118  ++m_1stNullItemsMiddleCount;
    9119  ++madeLostCount;
    9120  }
    9121  else
    9122  {
    9123  return false;
    9124  }
    9125  }
    9126  ++index1st;
    9127  }
    9128 
    9129  CleanupAfterFree();
    9130  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9131 
    9132  return true;
    9133 }
    9134 
    9135 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9136 {
    9137  uint32_t lostAllocationCount = 0;
    9138 
    9139  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9140  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9141  {
    9142  VmaSuballocation& suballoc = suballocations1st[i];
    9143  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9144  suballoc.hAllocation->CanBecomeLost() &&
    9145  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9146  {
    9147  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9148  suballoc.hAllocation = VK_NULL_HANDLE;
    9149  ++m_1stNullItemsMiddleCount;
    9150  m_SumFreeSize += suballoc.size;
    9151  ++lostAllocationCount;
    9152  }
    9153  }
    9154 
    9155  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9156  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9157  {
    9158  VmaSuballocation& suballoc = suballocations2nd[i];
    9159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9160  suballoc.hAllocation->CanBecomeLost() &&
    9161  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9162  {
    9163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9164  suballoc.hAllocation = VK_NULL_HANDLE;
    9165  ++m_2ndNullItemsCount;
    9166  ++lostAllocationCount;
    9167  }
    9168  }
    9169 
    9170  if(lostAllocationCount)
    9171  {
    9172  CleanupAfterFree();
    9173  }
    9174 
    9175  return lostAllocationCount;
    9176 }
    9177 
    9178 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9179 {
    9180  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9181  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9182  {
    9183  const VmaSuballocation& suballoc = suballocations1st[i];
    9184  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9185  {
    9186  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9187  {
    9188  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9189  return VK_ERROR_VALIDATION_FAILED_EXT;
    9190  }
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  }
    9197  }
    9198 
    9199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9201  {
    9202  const VmaSuballocation& suballoc = suballocations2nd[i];
    9203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9204  {
    9205  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9206  {
    9207  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9208  return VK_ERROR_VALIDATION_FAILED_EXT;
    9209  }
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  }
    9216  }
    9217 
    9218  return VK_SUCCESS;
    9219 }
    9220 
    9221 void VmaBlockMetadata_Linear::Alloc(
    9222  const VmaAllocationRequest& request,
    9223  VmaSuballocationType type,
    9224  VkDeviceSize allocSize,
    9225  bool upperAddress,
    9226  VmaAllocation hAllocation)
    9227 {
    9228  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9229 
    9230  if(upperAddress)
    9231  {
    9232  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9233  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9235  suballocations2nd.push_back(newSuballoc);
    9236  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9237  }
    9238  else
    9239  {
    9240  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9241 
    9242  // First allocation.
    9243  if(suballocations1st.empty())
    9244  {
    9245  suballocations1st.push_back(newSuballoc);
    9246  }
    9247  else
    9248  {
    9249  // New allocation at the end of 1st vector.
    9250  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9251  {
    9252  // Check if it fits before the end of the block.
    9253  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9254  suballocations1st.push_back(newSuballoc);
    9255  }
    9256  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9257  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9258  {
    9259  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9260 
    9261  switch(m_2ndVectorMode)
    9262  {
    9263  case SECOND_VECTOR_EMPTY:
    9264  // First allocation from second part ring buffer.
    9265  VMA_ASSERT(suballocations2nd.empty());
    9266  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9267  break;
    9268  case SECOND_VECTOR_RING_BUFFER:
    9269  // 2-part ring buffer is already started.
    9270  VMA_ASSERT(!suballocations2nd.empty());
    9271  break;
    9272  case SECOND_VECTOR_DOUBLE_STACK:
    9273  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9274  break;
    9275  default:
    9276  VMA_ASSERT(0);
    9277  }
    9278 
    9279  suballocations2nd.push_back(newSuballoc);
    9280  }
    9281  else
    9282  {
    9283  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9284  }
    9285  }
    9286  }
    9287 
    9288  m_SumFreeSize -= newSuballoc.size;
    9289 }
    9290 
    9291 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9292 {
    9293  FreeAtOffset(allocation->GetOffset());
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9297 {
    9298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9300 
    9301  if(!suballocations1st.empty())
    9302  {
    9303  // First allocation: Mark it as next empty at the beginning.
    9304  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9305  if(firstSuballoc.offset == offset)
    9306  {
    9307  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9308  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9309  m_SumFreeSize += firstSuballoc.size;
    9310  ++m_1stNullItemsBeginCount;
    9311  CleanupAfterFree();
    9312  return;
    9313  }
    9314  }
    9315 
    9316  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9317  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9318  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9319  {
    9320  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9321  if(lastSuballoc.offset == offset)
    9322  {
    9323  m_SumFreeSize += lastSuballoc.size;
    9324  suballocations2nd.pop_back();
    9325  CleanupAfterFree();
    9326  return;
    9327  }
    9328  }
    9329  // Last allocation in 1st vector.
    9330  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9331  {
    9332  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9333  if(lastSuballoc.offset == offset)
    9334  {
    9335  m_SumFreeSize += lastSuballoc.size;
    9336  suballocations1st.pop_back();
    9337  CleanupAfterFree();
    9338  return;
    9339  }
    9340  }
    9341 
    9342  // Item from the middle of 1st vector.
    9343  {
    9344  VmaSuballocation refSuballoc;
    9345  refSuballoc.offset = offset;
    9346  // Rest of members stays uninitialized intentionally for better performance.
    9347  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9348  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9349  suballocations1st.end(),
    9350  refSuballoc);
    9351  if(it != suballocations1st.end())
    9352  {
    9353  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9354  it->hAllocation = VK_NULL_HANDLE;
    9355  ++m_1stNullItemsMiddleCount;
    9356  m_SumFreeSize += it->size;
    9357  CleanupAfterFree();
    9358  return;
    9359  }
    9360  }
    9361 
    9362  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9363  {
    9364  // Item from the middle of 2nd vector.
    9365  VmaSuballocation refSuballoc;
    9366  refSuballoc.offset = offset;
    9367  // Rest of members stays uninitialized intentionally for better performance.
    9368  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9369  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9370  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9371  if(it != suballocations2nd.end())
    9372  {
    9373  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9374  it->hAllocation = VK_NULL_HANDLE;
    9375  ++m_2ndNullItemsCount;
    9376  m_SumFreeSize += it->size;
    9377  CleanupAfterFree();
    9378  return;
    9379  }
    9380  }
    9381 
    9382  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9383 }
    9384 
    9385 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9386 {
    9387  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9388  const size_t suballocCount = AccessSuballocations1st().size();
    9389  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9390 }
    9391 
    9392 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9393 {
    9394  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9395  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9396 
    9397  if(IsEmpty())
    9398  {
    9399  suballocations1st.clear();
    9400  suballocations2nd.clear();
    9401  m_1stNullItemsBeginCount = 0;
    9402  m_1stNullItemsMiddleCount = 0;
    9403  m_2ndNullItemsCount = 0;
    9404  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9405  }
    9406  else
    9407  {
    9408  const size_t suballoc1stCount = suballocations1st.size();
    9409  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9410  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9411 
    9412  // Find more null items at the beginning of 1st vector.
    9413  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9414  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9415  {
    9416  ++m_1stNullItemsBeginCount;
    9417  --m_1stNullItemsMiddleCount;
    9418  }
    9419 
    9420  // Find more null items at the end of 1st vector.
    9421  while(m_1stNullItemsMiddleCount > 0 &&
    9422  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9423  {
    9424  --m_1stNullItemsMiddleCount;
    9425  suballocations1st.pop_back();
    9426  }
    9427 
    9428  // Find more null items at the end of 2nd vector.
    9429  while(m_2ndNullItemsCount > 0 &&
    9430  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9431  {
    9432  --m_2ndNullItemsCount;
    9433  suballocations2nd.pop_back();
    9434  }
    9435 
    9436  if(ShouldCompact1st())
    9437  {
    9438  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9439  size_t srcIndex = m_1stNullItemsBeginCount;
    9440  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9441  {
    9442  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9443  {
    9444  ++srcIndex;
    9445  }
    9446  if(dstIndex != srcIndex)
    9447  {
    9448  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9449  }
    9450  ++srcIndex;
    9451  }
    9452  suballocations1st.resize(nonNullItemCount);
    9453  m_1stNullItemsBeginCount = 0;
    9454  m_1stNullItemsMiddleCount = 0;
    9455  }
    9456 
    9457  // 2nd vector became empty.
    9458  if(suballocations2nd.empty())
    9459  {
    9460  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9461  }
    9462 
    9463  // 1st vector became empty.
    9464  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9465  {
    9466  suballocations1st.clear();
    9467  m_1stNullItemsBeginCount = 0;
    9468 
    9469  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9470  {
    9471  // Swap 1st with 2nd. Now 2nd is empty.
    9472  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9473  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9474  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9475  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9476  {
    9477  ++m_1stNullItemsBeginCount;
    9478  --m_1stNullItemsMiddleCount;
    9479  }
    9480  m_2ndNullItemsCount = 0;
    9481  m_1stVectorIndex ^= 1;
    9482  }
    9483  }
    9484  }
    9485 
    9486  VMA_HEAVY_ASSERT(Validate());
    9487 }
    9488 
    9489 
    9491 // class VmaBlockMetadata_Buddy
    9492 
    9493 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9494  VmaBlockMetadata(hAllocator),
    9495  m_Root(VMA_NULL),
    9496  m_AllocationCount(0),
    9497  m_FreeCount(1),
    9498  m_SumFreeSize(0)
    9499 {
    9500  memset(m_FreeList, 0, sizeof(m_FreeList));
    9501 }
    9502 
    9503 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9504 {
    9505  DeleteNode(m_Root);
    9506 }
    9507 
    9508 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9509 {
    9510  VmaBlockMetadata::Init(size);
    9511 
    9512  m_UsableSize = VmaPrevPow2(size);
    9513  m_SumFreeSize = m_UsableSize;
    9514 
    9515  // Calculate m_LevelCount.
    9516  m_LevelCount = 1;
    9517  while(m_LevelCount < MAX_LEVELS &&
    9518  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9519  {
    9520  ++m_LevelCount;
    9521  }
    9522 
    9523  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9524  rootNode->offset = 0;
    9525  rootNode->type = Node::TYPE_FREE;
    9526  rootNode->parent = VMA_NULL;
    9527  rootNode->buddy = VMA_NULL;
    9528 
    9529  m_Root = rootNode;
    9530  AddToFreeListFront(0, rootNode);
    9531 }
    9532 
    9533 bool VmaBlockMetadata_Buddy::Validate() const
    9534 {
    9535  // Validate tree.
    9536  ValidationContext ctx;
    9537  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9538  {
    9539  VMA_VALIDATE(false && "ValidateNode failed.");
    9540  }
    9541  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9542  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9543 
    9544  // Validate free node lists.
    9545  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9546  {
    9547  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9548  m_FreeList[level].front->free.prev == VMA_NULL);
    9549 
    9550  for(Node* node = m_FreeList[level].front;
    9551  node != VMA_NULL;
    9552  node = node->free.next)
    9553  {
    9554  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9555 
    9556  if(node->free.next == VMA_NULL)
    9557  {
    9558  VMA_VALIDATE(m_FreeList[level].back == node);
    9559  }
    9560  else
    9561  {
    9562  VMA_VALIDATE(node->free.next->free.prev == node);
    9563  }
    9564  }
    9565  }
    9566 
    9567  // Validate that free lists ar higher levels are empty.
    9568  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9569  {
    9570  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9571  }
    9572 
    9573  return true;
    9574 }
    9575 
    9576 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9577 {
    9578  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9579  {
    9580  if(m_FreeList[level].front != VMA_NULL)
    9581  {
    9582  return LevelToNodeSize(level);
    9583  }
    9584  }
    9585  return 0;
    9586 }
    9587 
    9588 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9589 {
    9590  const VkDeviceSize unusableSize = GetUnusableSize();
    9591 
    9592  outInfo.blockCount = 1;
    9593 
    9594  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9595  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9596 
    9597  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9598  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9599  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9600 
    9601  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9602 
    9603  if(unusableSize > 0)
    9604  {
    9605  ++outInfo.unusedRangeCount;
    9606  outInfo.unusedBytes += unusableSize;
    9607  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9608  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9609  }
    9610 }
    9611 
    9612 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9613 {
    9614  const VkDeviceSize unusableSize = GetUnusableSize();
    9615 
    9616  inoutStats.size += GetSize();
    9617  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9618  inoutStats.allocationCount += m_AllocationCount;
    9619  inoutStats.unusedRangeCount += m_FreeCount;
    9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9621 
    9622  if(unusableSize > 0)
    9623  {
    9624  ++inoutStats.unusedRangeCount;
    9625  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9626  }
    9627 }
    9628 
    9629 #if VMA_STATS_STRING_ENABLED
    9630 
    9631 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9632 {
    9633  // TODO optimize
    9634  VmaStatInfo stat;
    9635  CalcAllocationStatInfo(stat);
    9636 
    9637  PrintDetailedMap_Begin(
    9638  json,
    9639  stat.unusedBytes,
    9640  stat.allocationCount,
    9641  stat.unusedRangeCount);
    9642 
    9643  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9644 
    9645  const VkDeviceSize unusableSize = GetUnusableSize();
    9646  if(unusableSize > 0)
    9647  {
    9648  PrintDetailedMap_UnusedRange(json,
    9649  m_UsableSize, // offset
    9650  unusableSize); // size
    9651  }
    9652 
    9653  PrintDetailedMap_End(json);
    9654 }
    9655 
    9656 #endif // #if VMA_STATS_STRING_ENABLED
    9657 
    9658 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9659  uint32_t currentFrameIndex,
    9660  uint32_t frameInUseCount,
    9661  VkDeviceSize bufferImageGranularity,
    9662  VkDeviceSize allocSize,
    9663  VkDeviceSize allocAlignment,
    9664  bool upperAddress,
    9665  VmaSuballocationType allocType,
    9666  bool canMakeOtherLost,
    9667  uint32_t strategy,
    9668  VmaAllocationRequest* pAllocationRequest)
    9669 {
    9670  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9671 
    9672  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9673  // Whenever it might be an OPTIMAL image...
    9674  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9675  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9676  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9677  {
    9678  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9679  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9680  }
    9681 
    9682  if(allocSize > m_UsableSize)
    9683  {
    9684  return false;
    9685  }
    9686 
    9687  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9688  for(uint32_t level = targetLevel + 1; level--; )
    9689  {
    9690  for(Node* freeNode = m_FreeList[level].front;
    9691  freeNode != VMA_NULL;
    9692  freeNode = freeNode->free.next)
    9693  {
    9694  if(freeNode->offset % allocAlignment == 0)
    9695  {
    9696  pAllocationRequest->offset = freeNode->offset;
    9697  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9698  pAllocationRequest->sumItemSize = 0;
    9699  pAllocationRequest->itemsToMakeLostCount = 0;
    9700  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9701  return true;
    9702  }
    9703  }
    9704  }
    9705 
    9706  return false;
    9707 }
    9708 
    9709 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9710  uint32_t currentFrameIndex,
    9711  uint32_t frameInUseCount,
    9712  VmaAllocationRequest* pAllocationRequest)
    9713 {
    9714  /*
    9715  Lost allocations are not supported in buddy allocator at the moment.
    9716  Support might be added in the future.
    9717  */
    9718  return pAllocationRequest->itemsToMakeLostCount == 0;
    9719 }
    9720 
    9721 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9722 {
    9723  /*
    9724  Lost allocations are not supported in buddy allocator at the moment.
    9725  Support might be added in the future.
    9726  */
    9727  return 0;
    9728 }
    9729 
    9730 void VmaBlockMetadata_Buddy::Alloc(
    9731  const VmaAllocationRequest& request,
    9732  VmaSuballocationType type,
    9733  VkDeviceSize allocSize,
    9734  bool upperAddress,
    9735  VmaAllocation hAllocation)
    9736 {
    9737  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9738  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9739 
    9740  Node* currNode = m_FreeList[currLevel].front;
    9741  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9742  while(currNode->offset != request.offset)
    9743  {
    9744  currNode = currNode->free.next;
    9745  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9746  }
    9747 
    9748  // Go down, splitting free nodes.
    9749  while(currLevel < targetLevel)
    9750  {
    9751  // currNode is already first free node at currLevel.
    9752  // Remove it from list of free nodes at this currLevel.
    9753  RemoveFromFreeList(currLevel, currNode);
    9754 
    9755  const uint32_t childrenLevel = currLevel + 1;
    9756 
    9757  // Create two free sub-nodes.
    9758  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9759  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9760 
    9761  leftChild->offset = currNode->offset;
    9762  leftChild->type = Node::TYPE_FREE;
    9763  leftChild->parent = currNode;
    9764  leftChild->buddy = rightChild;
    9765 
    9766  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9767  rightChild->type = Node::TYPE_FREE;
    9768  rightChild->parent = currNode;
    9769  rightChild->buddy = leftChild;
    9770 
    9771  // Convert current currNode to split type.
    9772  currNode->type = Node::TYPE_SPLIT;
    9773  currNode->split.leftChild = leftChild;
    9774 
    9775  // Add child nodes to free list. Order is important!
    9776  AddToFreeListFront(childrenLevel, rightChild);
    9777  AddToFreeListFront(childrenLevel, leftChild);
    9778 
    9779  ++m_FreeCount;
    9780  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9781  ++currLevel;
    9782  currNode = m_FreeList[currLevel].front;
    9783 
    9784  /*
    9785  We can be sure that currNode, as left child of node previously split,
    9786  also fullfills the alignment requirement.
    9787  */
    9788  }
    9789 
    9790  // Remove from free list.
    9791  VMA_ASSERT(currLevel == targetLevel &&
    9792  currNode != VMA_NULL &&
    9793  currNode->type == Node::TYPE_FREE);
    9794  RemoveFromFreeList(currLevel, currNode);
    9795 
    9796  // Convert to allocation node.
    9797  currNode->type = Node::TYPE_ALLOCATION;
    9798  currNode->allocation.alloc = hAllocation;
    9799 
    9800  ++m_AllocationCount;
    9801  --m_FreeCount;
    9802  m_SumFreeSize -= allocSize;
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9806 {
    9807  if(node->type == Node::TYPE_SPLIT)
    9808  {
    9809  DeleteNode(node->split.leftChild->buddy);
    9810  DeleteNode(node->split.leftChild);
    9811  }
    9812 
    9813  vma_delete(GetAllocationCallbacks(), node);
    9814 }
    9815 
    9816 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9817 {
    9818  VMA_VALIDATE(level < m_LevelCount);
    9819  VMA_VALIDATE(curr->parent == parent);
    9820  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9821  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9822  switch(curr->type)
    9823  {
    9824  case Node::TYPE_FREE:
    9825  // curr->free.prev, next are validated separately.
    9826  ctx.calculatedSumFreeSize += levelNodeSize;
    9827  ++ctx.calculatedFreeCount;
    9828  break;
    9829  case Node::TYPE_ALLOCATION:
    9830  ++ctx.calculatedAllocationCount;
    9831  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9832  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9833  break;
    9834  case Node::TYPE_SPLIT:
    9835  {
    9836  const uint32_t childrenLevel = level + 1;
    9837  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9838  const Node* const leftChild = curr->split.leftChild;
    9839  VMA_VALIDATE(leftChild != VMA_NULL);
    9840  VMA_VALIDATE(leftChild->offset == curr->offset);
    9841  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9842  {
    9843  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9844  }
    9845  const Node* const rightChild = leftChild->buddy;
    9846  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9847  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9850  }
    9851  }
    9852  break;
    9853  default:
    9854  return false;
    9855  }
    9856 
    9857  return true;
    9858 }
    9859 
    9860 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9861 {
    9862  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9863  uint32_t level = 0;
    9864  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9865  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9866  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9867  {
    9868  ++level;
    9869  currLevelNodeSize = nextLevelNodeSize;
    9870  nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  }
    9872  return level;
    9873 }
    9874 
    9875 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9876 {
    9877  // Find node and level.
    9878  Node* node = m_Root;
    9879  VkDeviceSize nodeOffset = 0;
    9880  uint32_t level = 0;
    9881  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9882  while(node->type == Node::TYPE_SPLIT)
    9883  {
    9884  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9885  if(offset < nodeOffset + nextLevelSize)
    9886  {
    9887  node = node->split.leftChild;
    9888  }
    9889  else
    9890  {
    9891  node = node->split.leftChild->buddy;
    9892  nodeOffset += nextLevelSize;
    9893  }
    9894  ++level;
    9895  levelNodeSize = nextLevelSize;
    9896  }
    9897 
    9898  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9899  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9900 
    9901  ++m_FreeCount;
    9902  --m_AllocationCount;
    9903  m_SumFreeSize += alloc->GetSize();
    9904 
    9905  node->type = Node::TYPE_FREE;
    9906 
    9907  // Join free nodes if possible.
    9908  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9909  {
    9910  RemoveFromFreeList(level, node->buddy);
    9911  Node* const parent = node->parent;
    9912 
    9913  vma_delete(GetAllocationCallbacks(), node->buddy);
    9914  vma_delete(GetAllocationCallbacks(), node);
    9915  parent->type = Node::TYPE_FREE;
    9916 
    9917  node = parent;
    9918  --level;
    9919  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9920  --m_FreeCount;
    9921  }
    9922 
    9923  AddToFreeListFront(level, node);
    9924 }
    9925 
    9926 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9927 {
    9928  switch(node->type)
    9929  {
    9930  case Node::TYPE_FREE:
    9931  ++outInfo.unusedRangeCount;
    9932  outInfo.unusedBytes += levelNodeSize;
    9933  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9934  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9935  break;
    9936  case Node::TYPE_ALLOCATION:
    9937  {
    9938  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9939  ++outInfo.allocationCount;
    9940  outInfo.usedBytes += allocSize;
    9941  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9942  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9943 
    9944  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9945  if(unusedRangeSize > 0)
    9946  {
    9947  ++outInfo.unusedRangeCount;
    9948  outInfo.unusedBytes += unusedRangeSize;
    9949  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9950  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9951  }
    9952  }
    9953  break;
    9954  case Node::TYPE_SPLIT:
    9955  {
    9956  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9957  const Node* const leftChild = node->split.leftChild;
    9958  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9959  const Node* const rightChild = leftChild->buddy;
    9960  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9961  }
    9962  break;
    9963  default:
    9964  VMA_ASSERT(0);
    9965  }
    9966 }
    9967 
    9968 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9969 {
    9970  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9971 
    9972  // List is empty.
    9973  Node* const frontNode = m_FreeList[level].front;
    9974  if(frontNode == VMA_NULL)
    9975  {
    9976  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9977  node->free.prev = node->free.next = VMA_NULL;
    9978  m_FreeList[level].front = m_FreeList[level].back = node;
    9979  }
    9980  else
    9981  {
    9982  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9983  node->free.prev = VMA_NULL;
    9984  node->free.next = frontNode;
    9985  frontNode->free.prev = node;
    9986  m_FreeList[level].front = node;
    9987  }
    9988 }
    9989 
    9990 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9991 {
    9992  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9993 
    9994  // It is at the front.
    9995  if(node->free.prev == VMA_NULL)
    9996  {
    9997  VMA_ASSERT(m_FreeList[level].front == node);
    9998  m_FreeList[level].front = node->free.next;
    9999  }
    10000  else
    10001  {
    10002  Node* const prevFreeNode = node->free.prev;
    10003  VMA_ASSERT(prevFreeNode->free.next == node);
    10004  prevFreeNode->free.next = node->free.next;
    10005  }
    10006 
    10007  // It is at the back.
    10008  if(node->free.next == VMA_NULL)
    10009  {
    10010  VMA_ASSERT(m_FreeList[level].back == node);
    10011  m_FreeList[level].back = node->free.prev;
    10012  }
    10013  else
    10014  {
    10015  Node* const nextFreeNode = node->free.next;
    10016  VMA_ASSERT(nextFreeNode->free.prev == node);
    10017  nextFreeNode->free.prev = node->free.prev;
    10018  }
    10019 }
    10020 
    10021 #if VMA_STATS_STRING_ENABLED
    10022 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10023 {
    10024  switch(node->type)
    10025  {
    10026  case Node::TYPE_FREE:
    10027  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10028  break;
    10029  case Node::TYPE_ALLOCATION:
    10030  {
    10031  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10032  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10033  if(allocSize < levelNodeSize)
    10034  {
    10035  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10036  }
    10037  }
    10038  break;
    10039  case Node::TYPE_SPLIT:
    10040  {
    10041  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10042  const Node* const leftChild = node->split.leftChild;
    10043  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10044  const Node* const rightChild = leftChild->buddy;
    10045  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10046  }
    10047  break;
    10048  default:
    10049  VMA_ASSERT(0);
    10050  }
    10051 }
    10052 #endif // #if VMA_STATS_STRING_ENABLED
    10053 
    10054 
    10056 // class VmaDeviceMemoryBlock
    10057 
    10058 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10059  m_pMetadata(VMA_NULL),
    10060  m_MemoryTypeIndex(UINT32_MAX),
    10061  m_Id(0),
    10062  m_hMemory(VK_NULL_HANDLE),
    10063  m_MapCount(0),
    10064  m_pMappedData(VMA_NULL)
    10065 {
    10066 }
    10067 
    10068 void VmaDeviceMemoryBlock::Init(
    10069  VmaAllocator hAllocator,
    10070  uint32_t newMemoryTypeIndex,
    10071  VkDeviceMemory newMemory,
    10072  VkDeviceSize newSize,
    10073  uint32_t id,
    10074  uint32_t algorithm)
    10075 {
    10076  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10077 
    10078  m_MemoryTypeIndex = newMemoryTypeIndex;
    10079  m_Id = id;
    10080  m_hMemory = newMemory;
    10081 
    10082  switch(algorithm)
    10083  {
    10085  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10086  break;
    10088  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10089  break;
    10090  default:
    10091  VMA_ASSERT(0);
    10092  // Fall-through.
    10093  case 0:
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10095  }
    10096  m_pMetadata->Init(newSize);
    10097 }
    10098 
    10099 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10100 {
    10101  // This is the most important assert in the entire library.
    10102  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10103  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10104 
    10105  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10106  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10107  m_hMemory = VK_NULL_HANDLE;
    10108 
    10109  vma_delete(allocator, m_pMetadata);
    10110  m_pMetadata = VMA_NULL;
    10111 }
    10112 
    10113 bool VmaDeviceMemoryBlock::Validate() const
    10114 {
    10115  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10116  (m_pMetadata->GetSize() != 0));
    10117 
    10118  return m_pMetadata->Validate();
    10119 }
    10120 
    10121 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10122 {
    10123  void* pData = nullptr;
    10124  VkResult res = Map(hAllocator, 1, &pData);
    10125  if(res != VK_SUCCESS)
    10126  {
    10127  return res;
    10128  }
    10129 
    10130  res = m_pMetadata->CheckCorruption(pData);
    10131 
    10132  Unmap(hAllocator, 1);
    10133 
    10134  return res;
    10135 }
    10136 
    10137 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10138 {
    10139  if(count == 0)
    10140  {
    10141  return VK_SUCCESS;
    10142  }
    10143 
    10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10145  if(m_MapCount != 0)
    10146  {
    10147  m_MapCount += count;
    10148  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10149  if(ppData != VMA_NULL)
    10150  {
    10151  *ppData = m_pMappedData;
    10152  }
    10153  return VK_SUCCESS;
    10154  }
    10155  else
    10156  {
    10157  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10158  hAllocator->m_hDevice,
    10159  m_hMemory,
    10160  0, // offset
    10161  VK_WHOLE_SIZE,
    10162  0, // flags
    10163  &m_pMappedData);
    10164  if(result == VK_SUCCESS)
    10165  {
    10166  if(ppData != VMA_NULL)
    10167  {
    10168  *ppData = m_pMappedData;
    10169  }
    10170  m_MapCount = count;
    10171  }
    10172  return result;
    10173  }
    10174 }
    10175 
    10176 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10177 {
    10178  if(count == 0)
    10179  {
    10180  return;
    10181  }
    10182 
    10183  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10184  if(m_MapCount >= count)
    10185  {
    10186  m_MapCount -= count;
    10187  if(m_MapCount == 0)
    10188  {
    10189  m_pMappedData = VMA_NULL;
    10190  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10191  }
    10192  }
    10193  else
    10194  {
    10195  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10196  }
    10197 }
    10198 
    10199 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10200 {
    10201  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10202  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10203 
    10204  void* pData;
    10205  VkResult res = Map(hAllocator, 1, &pData);
    10206  if(res != VK_SUCCESS)
    10207  {
    10208  return res;
    10209  }
    10210 
    10211  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10212  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10213 
    10214  Unmap(hAllocator, 1);
    10215 
    10216  return VK_SUCCESS;
    10217 }
    10218 
    10219 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10220 {
    10221  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10222  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10223 
    10224  void* pData;
    10225  VkResult res = Map(hAllocator, 1, &pData);
    10226  if(res != VK_SUCCESS)
    10227  {
    10228  return res;
    10229  }
    10230 
    10231  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10232  {
    10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10234  }
    10235  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10236  {
    10237  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10238  }
    10239 
    10240  Unmap(hAllocator, 1);
    10241 
    10242  return VK_SUCCESS;
    10243 }
    10244 
    10245 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10246  const VmaAllocator hAllocator,
    10247  const VmaAllocation hAllocation,
    10248  VkBuffer hBuffer)
    10249 {
    10250  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10251  hAllocation->GetBlock() == this);
    10252  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10253  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10254  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10255  hAllocator->m_hDevice,
    10256  hBuffer,
    10257  m_hMemory,
    10258  hAllocation->GetOffset());
    10259 }
    10260 
    10261 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10262  const VmaAllocator hAllocator,
    10263  const VmaAllocation hAllocation,
    10264  VkImage hImage)
    10265 {
    10266  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10267  hAllocation->GetBlock() == this);
    10268  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10270  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10271  hAllocator->m_hDevice,
    10272  hImage,
    10273  m_hMemory,
    10274  hAllocation->GetOffset());
    10275 }
    10276 
    10277 static void InitStatInfo(VmaStatInfo& outInfo)
    10278 {
    10279  memset(&outInfo, 0, sizeof(outInfo));
    10280  outInfo.allocationSizeMin = UINT64_MAX;
    10281  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10282 }
    10283 
    10284 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10285 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10286 {
    10287  inoutInfo.blockCount += srcInfo.blockCount;
    10288  inoutInfo.allocationCount += srcInfo.allocationCount;
    10289  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10290  inoutInfo.usedBytes += srcInfo.usedBytes;
    10291  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10292  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10293  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10294  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10295  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10296 }
    10297 
    10298 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10299 {
    10300  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10301  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10302  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10304 }
    10305 
    10306 VmaPool_T::VmaPool_T(
    10307  VmaAllocator hAllocator,
    10308  const VmaPoolCreateInfo& createInfo,
    10309  VkDeviceSize preferredBlockSize) :
    10310  m_BlockVector(
    10311  hAllocator,
    10312  createInfo.memoryTypeIndex,
    10313  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10314  createInfo.minBlockCount,
    10315  createInfo.maxBlockCount,
    10316  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10317  createInfo.frameInUseCount,
    10318  true, // isCustomPool
    10319  createInfo.blockSize != 0, // explicitBlockSize
    10320  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10321  m_Id(0)
    10322 {
    10323 }
    10324 
    10325 VmaPool_T::~VmaPool_T()
    10326 {
    10327 }
    10328 
    10329 #if VMA_STATS_STRING_ENABLED
    10330 
    10331 #endif // #if VMA_STATS_STRING_ENABLED
    10332 
    10333 VmaBlockVector::VmaBlockVector(
    10334  VmaAllocator hAllocator,
    10335  uint32_t memoryTypeIndex,
    10336  VkDeviceSize preferredBlockSize,
    10337  size_t minBlockCount,
    10338  size_t maxBlockCount,
    10339  VkDeviceSize bufferImageGranularity,
    10340  uint32_t frameInUseCount,
    10341  bool isCustomPool,
    10342  bool explicitBlockSize,
    10343  uint32_t algorithm) :
    10344  m_hAllocator(hAllocator),
    10345  m_MemoryTypeIndex(memoryTypeIndex),
    10346  m_PreferredBlockSize(preferredBlockSize),
    10347  m_MinBlockCount(minBlockCount),
    10348  m_MaxBlockCount(maxBlockCount),
    10349  m_BufferImageGranularity(bufferImageGranularity),
    10350  m_FrameInUseCount(frameInUseCount),
    10351  m_IsCustomPool(isCustomPool),
    10352  m_ExplicitBlockSize(explicitBlockSize),
    10353  m_Algorithm(algorithm),
    10354  m_HasEmptyBlock(false),
    10355  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10356  m_pDefragmentator(VMA_NULL),
    10357  m_NextBlockId(0)
    10358 {
    10359 }
    10360 
    10361 VmaBlockVector::~VmaBlockVector()
    10362 {
    10363  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10364 
    10365  for(size_t i = m_Blocks.size(); i--; )
    10366  {
    10367  m_Blocks[i]->Destroy(m_hAllocator);
    10368  vma_delete(m_hAllocator, m_Blocks[i]);
    10369  }
    10370 }
    10371 
    10372 VkResult VmaBlockVector::CreateMinBlocks()
    10373 {
    10374  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10375  {
    10376  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10377  if(res != VK_SUCCESS)
    10378  {
    10379  return res;
    10380  }
    10381  }
    10382  return VK_SUCCESS;
    10383 }
    10384 
    10385 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10386 {
    10387  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10388 
    10389  const size_t blockCount = m_Blocks.size();
    10390 
    10391  pStats->size = 0;
    10392  pStats->unusedSize = 0;
    10393  pStats->allocationCount = 0;
    10394  pStats->unusedRangeCount = 0;
    10395  pStats->unusedRangeSizeMax = 0;
    10396  pStats->blockCount = blockCount;
    10397 
    10398  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10399  {
    10400  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10401  VMA_ASSERT(pBlock);
    10402  VMA_HEAVY_ASSERT(pBlock->Validate());
    10403  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10404  }
    10405 }
    10406 
    10407 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10408 {
    10409  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10410  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10411  (VMA_DEBUG_MARGIN > 0) &&
    10412  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10413 }
    10414 
    10415 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10416 
    10417 VkResult VmaBlockVector::Allocate(
    10418  VmaPool hCurrentPool,
    10419  uint32_t currentFrameIndex,
    10420  VkDeviceSize size,
    10421  VkDeviceSize alignment,
    10422  const VmaAllocationCreateInfo& createInfo,
    10423  VmaSuballocationType suballocType,
    10424  VmaAllocation* pAllocation)
    10425 {
    10426  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10427  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10428  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10429  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10430  const bool canCreateNewBlock =
    10431  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10432  (m_Blocks.size() < m_MaxBlockCount);
    10433  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10434 
    10435  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10436  // Which in turn is available only when maxBlockCount = 1.
    10437  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10438  {
    10439  canMakeOtherLost = false;
    10440  }
    10441 
    10442  // Upper address can only be used with linear allocator and within single memory block.
    10443  if(isUpperAddress &&
    10444  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10445  {
    10446  return VK_ERROR_FEATURE_NOT_PRESENT;
    10447  }
    10448 
    10449  // Validate strategy.
    10450  switch(strategy)
    10451  {
    10452  case 0:
    10454  break;
    10458  break;
    10459  default:
    10460  return VK_ERROR_FEATURE_NOT_PRESENT;
    10461  }
    10462 
    10463  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10464  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10465  {
    10466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10467  }
    10468 
    10469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10470 
    10471  /*
    10472  Under certain condition, this whole section can be skipped for optimization, so
    10473  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10474  e.g. for custom pools with linear algorithm.
    10475  */
    10476  if(!canMakeOtherLost || canCreateNewBlock)
    10477  {
    10478  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10479  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10481 
    10482  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10483  {
    10484  // Use only last block.
    10485  if(!m_Blocks.empty())
    10486  {
    10487  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10488  VMA_ASSERT(pCurrBlock);
    10489  VkResult res = AllocateFromBlock(
    10490  pCurrBlock,
    10491  hCurrentPool,
    10492  currentFrameIndex,
    10493  size,
    10494  alignment,
    10495  allocFlagsCopy,
    10496  createInfo.pUserData,
    10497  suballocType,
    10498  strategy,
    10499  pAllocation);
    10500  if(res == VK_SUCCESS)
    10501  {
    10502  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10503  return VK_SUCCESS;
    10504  }
    10505  }
    10506  }
    10507  else
    10508  {
    10510  {
    10511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10513  {
    10514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10515  VMA_ASSERT(pCurrBlock);
    10516  VkResult res = AllocateFromBlock(
    10517  pCurrBlock,
    10518  hCurrentPool,
    10519  currentFrameIndex,
    10520  size,
    10521  alignment,
    10522  allocFlagsCopy,
    10523  createInfo.pUserData,
    10524  suballocType,
    10525  strategy,
    10526  pAllocation);
    10527  if(res == VK_SUCCESS)
    10528  {
    10529  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10530  return VK_SUCCESS;
    10531  }
    10532  }
    10533  }
    10534  else // WORST_FIT, FIRST_FIT
    10535  {
    10536  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10537  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10538  {
    10539  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10540  VMA_ASSERT(pCurrBlock);
    10541  VkResult res = AllocateFromBlock(
    10542  pCurrBlock,
    10543  hCurrentPool,
    10544  currentFrameIndex,
    10545  size,
    10546  alignment,
    10547  allocFlagsCopy,
    10548  createInfo.pUserData,
    10549  suballocType,
    10550  strategy,
    10551  pAllocation);
    10552  if(res == VK_SUCCESS)
    10553  {
    10554  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10555  return VK_SUCCESS;
    10556  }
    10557  }
    10558  }
    10559  }
    10560 
    10561  // 2. Try to create new block.
    10562  if(canCreateNewBlock)
    10563  {
    10564  // Calculate optimal size for new block.
    10565  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10566  uint32_t newBlockSizeShift = 0;
    10567  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10568 
    10569  if(!m_ExplicitBlockSize)
    10570  {
    10571  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10572  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10573  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10574  {
    10575  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10576  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10577  {
    10578  newBlockSize = smallerNewBlockSize;
    10579  ++newBlockSizeShift;
    10580  }
    10581  else
    10582  {
    10583  break;
    10584  }
    10585  }
    10586  }
    10587 
    10588  size_t newBlockIndex = 0;
    10589  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10590  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10591  if(!m_ExplicitBlockSize)
    10592  {
    10593  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10594  {
    10595  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10596  if(smallerNewBlockSize >= size)
    10597  {
    10598  newBlockSize = smallerNewBlockSize;
    10599  ++newBlockSizeShift;
    10600  res = CreateBlock(newBlockSize, &newBlockIndex);
    10601  }
    10602  else
    10603  {
    10604  break;
    10605  }
    10606  }
    10607  }
    10608 
    10609  if(res == VK_SUCCESS)
    10610  {
    10611  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10612  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10613 
    10614  res = AllocateFromBlock(
    10615  pBlock,
    10616  hCurrentPool,
    10617  currentFrameIndex,
    10618  size,
    10619  alignment,
    10620  allocFlagsCopy,
    10621  createInfo.pUserData,
    10622  suballocType,
    10623  strategy,
    10624  pAllocation);
    10625  if(res == VK_SUCCESS)
    10626  {
    10627  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10628  return VK_SUCCESS;
    10629  }
    10630  else
    10631  {
    10632  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10633  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10634  }
    10635  }
    10636  }
    10637  }
    10638 
    10639  // 3. Try to allocate from existing blocks with making other allocations lost.
    10640  if(canMakeOtherLost)
    10641  {
    10642  uint32_t tryIndex = 0;
    10643  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10644  {
    10645  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10646  VmaAllocationRequest bestRequest = {};
    10647  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10648 
    10649  // 1. Search existing allocations.
    10651  {
    10652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10654  {
    10655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10656  VMA_ASSERT(pCurrBlock);
    10657  VmaAllocationRequest currRequest = {};
    10658  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10659  currentFrameIndex,
    10660  m_FrameInUseCount,
    10661  m_BufferImageGranularity,
    10662  size,
    10663  alignment,
    10664  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10665  suballocType,
    10666  canMakeOtherLost,
    10667  strategy,
    10668  &currRequest))
    10669  {
    10670  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10671  if(pBestRequestBlock == VMA_NULL ||
    10672  currRequestCost < bestRequestCost)
    10673  {
    10674  pBestRequestBlock = pCurrBlock;
    10675  bestRequest = currRequest;
    10676  bestRequestCost = currRequestCost;
    10677 
    10678  if(bestRequestCost == 0)
    10679  {
    10680  break;
    10681  }
    10682  }
    10683  }
    10684  }
    10685  }
    10686  else // WORST_FIT, FIRST_FIT
    10687  {
    10688  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10689  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10690  {
    10691  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10692  VMA_ASSERT(pCurrBlock);
    10693  VmaAllocationRequest currRequest = {};
    10694  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10695  currentFrameIndex,
    10696  m_FrameInUseCount,
    10697  m_BufferImageGranularity,
    10698  size,
    10699  alignment,
    10700  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10701  suballocType,
    10702  canMakeOtherLost,
    10703  strategy,
    10704  &currRequest))
    10705  {
    10706  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10707  if(pBestRequestBlock == VMA_NULL ||
    10708  currRequestCost < bestRequestCost ||
    10710  {
    10711  pBestRequestBlock = pCurrBlock;
    10712  bestRequest = currRequest;
    10713  bestRequestCost = currRequestCost;
    10714 
    10715  if(bestRequestCost == 0 ||
    10717  {
    10718  break;
    10719  }
    10720  }
    10721  }
    10722  }
    10723  }
    10724 
    10725  if(pBestRequestBlock != VMA_NULL)
    10726  {
    10727  if(mapped)
    10728  {
    10729  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10730  if(res != VK_SUCCESS)
    10731  {
    10732  return res;
    10733  }
    10734  }
    10735 
    10736  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  &bestRequest))
    10740  {
    10741  // We no longer have an empty Allocation.
    10742  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10743  {
    10744  m_HasEmptyBlock = false;
    10745  }
    10746  // Allocate from this pBlock.
    10747  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10748  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10749  (*pAllocation)->InitBlockAllocation(
    10750  hCurrentPool,
    10751  pBestRequestBlock,
    10752  bestRequest.offset,
    10753  alignment,
    10754  size,
    10755  suballocType,
    10756  mapped,
    10757  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10758  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10759  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10760  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10761  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10762  {
    10763  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10764  }
    10765  if(IsCorruptionDetectionEnabled())
    10766  {
    10767  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10768  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10769  }
    10770  return VK_SUCCESS;
    10771  }
    10772  // else: Some allocations must have been touched while we are here. Next try.
    10773  }
    10774  else
    10775  {
    10776  // Could not find place in any of the blocks - break outer loop.
    10777  break;
    10778  }
    10779  }
    10780  /* Maximum number of tries exceeded - a very unlike event when many other
    10781  threads are simultaneously touching allocations making it impossible to make
    10782  lost at the same time as we try to allocate. */
    10783  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10784  {
    10785  return VK_ERROR_TOO_MANY_OBJECTS;
    10786  }
    10787  }
    10788 
    10789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10790 }
    10791 
    10792 void VmaBlockVector::Free(
    10793  VmaAllocation hAllocation)
    10794 {
    10795  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10796 
    10797  // Scope for lock.
    10798  {
    10799  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10800 
    10801  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10802 
    10803  if(IsCorruptionDetectionEnabled())
    10804  {
    10805  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10806  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10807  }
    10808 
    10809  if(hAllocation->IsPersistentMap())
    10810  {
    10811  pBlock->Unmap(m_hAllocator, 1);
    10812  }
    10813 
    10814  pBlock->m_pMetadata->Free(hAllocation);
    10815  VMA_HEAVY_ASSERT(pBlock->Validate());
    10816 
    10817  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10818 
    10819  // pBlock became empty after this deallocation.
    10820  if(pBlock->m_pMetadata->IsEmpty())
    10821  {
    10822  // Already has empty Allocation. We don't want to have two, so delete this one.
    10823  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10824  {
    10825  pBlockToDelete = pBlock;
    10826  Remove(pBlock);
    10827  }
    10828  // We now have first empty block.
    10829  else
    10830  {
    10831  m_HasEmptyBlock = true;
    10832  }
    10833  }
    10834  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10835  // (This is optional, heuristics.)
    10836  else if(m_HasEmptyBlock)
    10837  {
    10838  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10839  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10840  {
    10841  pBlockToDelete = pLastBlock;
    10842  m_Blocks.pop_back();
    10843  m_HasEmptyBlock = false;
    10844  }
    10845  }
    10846 
    10847  IncrementallySortBlocks();
    10848  }
    10849 
    10850  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10851  // lock, for performance reason.
    10852  if(pBlockToDelete != VMA_NULL)
    10853  {
    10854  VMA_DEBUG_LOG(" Deleted empty allocation");
    10855  pBlockToDelete->Destroy(m_hAllocator);
    10856  vma_delete(m_hAllocator, pBlockToDelete);
    10857  }
    10858 }
    10859 
    10860 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10861 {
    10862  VkDeviceSize result = 0;
    10863  for(size_t i = m_Blocks.size(); i--; )
    10864  {
    10865  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10866  if(result >= m_PreferredBlockSize)
    10867  {
    10868  break;
    10869  }
    10870  }
    10871  return result;
    10872 }
    10873 
    10874 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10875 {
    10876  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10877  {
    10878  if(m_Blocks[blockIndex] == pBlock)
    10879  {
    10880  VmaVectorRemove(m_Blocks, blockIndex);
    10881  return;
    10882  }
    10883  }
    10884  VMA_ASSERT(0);
    10885 }
    10886 
    10887 void VmaBlockVector::IncrementallySortBlocks()
    10888 {
    10889  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10890  {
    10891  // Bubble sort only until first swap.
    10892  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10893  {
    10894  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10895  {
    10896  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10897  return;
    10898  }
    10899  }
    10900  }
    10901 }
    10902 
    10903 VkResult VmaBlockVector::AllocateFromBlock(
    10904  VmaDeviceMemoryBlock* pBlock,
    10905  VmaPool hCurrentPool,
    10906  uint32_t currentFrameIndex,
    10907  VkDeviceSize size,
    10908  VkDeviceSize alignment,
    10909  VmaAllocationCreateFlags allocFlags,
    10910  void* pUserData,
    10911  VmaSuballocationType suballocType,
    10912  uint32_t strategy,
    10913  VmaAllocation* pAllocation)
    10914 {
    10915  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10916  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10917  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10918  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10919 
    10920  VmaAllocationRequest currRequest = {};
    10921  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10922  currentFrameIndex,
    10923  m_FrameInUseCount,
    10924  m_BufferImageGranularity,
    10925  size,
    10926  alignment,
    10927  isUpperAddress,
    10928  suballocType,
    10929  false, // canMakeOtherLost
    10930  strategy,
    10931  &currRequest))
    10932  {
    10933  // Allocate from pCurrBlock.
    10934  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10935 
    10936  if(mapped)
    10937  {
    10938  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10939  if(res != VK_SUCCESS)
    10940  {
    10941  return res;
    10942  }
    10943  }
    10944 
    10945  // We no longer have an empty Allocation.
    10946  if(pBlock->m_pMetadata->IsEmpty())
    10947  {
    10948  m_HasEmptyBlock = false;
    10949  }
    10950 
    10951  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10952  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10953  (*pAllocation)->InitBlockAllocation(
    10954  hCurrentPool,
    10955  pBlock,
    10956  currRequest.offset,
    10957  alignment,
    10958  size,
    10959  suballocType,
    10960  mapped,
    10961  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10962  VMA_HEAVY_ASSERT(pBlock->Validate());
    10963  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10965  {
    10966  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10967  }
    10968  if(IsCorruptionDetectionEnabled())
    10969  {
    10970  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10971  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10972  }
    10973  return VK_SUCCESS;
    10974  }
    10975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10976 }
    10977 
    10978 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10979 {
    10980  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10981  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10982  allocInfo.allocationSize = blockSize;
    10983  VkDeviceMemory mem = VK_NULL_HANDLE;
    10984  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10985  if(res < 0)
    10986  {
    10987  return res;
    10988  }
    10989 
    10990  // New VkDeviceMemory successfully created.
    10991 
    10992  // Create new Allocation for it.
    10993  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10994  pBlock->Init(
    10995  m_hAllocator,
    10996  m_MemoryTypeIndex,
    10997  mem,
    10998  allocInfo.allocationSize,
    10999  m_NextBlockId++,
    11000  m_Algorithm);
    11001 
    11002  m_Blocks.push_back(pBlock);
    11003  if(pNewBlockIndex != VMA_NULL)
    11004  {
    11005  *pNewBlockIndex = m_Blocks.size() - 1;
    11006  }
    11007 
    11008  return VK_SUCCESS;
    11009 }
    11010 
    11011 #if VMA_STATS_STRING_ENABLED
    11012 
    11013 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11014 {
    11015  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11016 
    11017  json.BeginObject();
    11018 
    11019  if(m_IsCustomPool)
    11020  {
    11021  json.WriteString("MemoryTypeIndex");
    11022  json.WriteNumber(m_MemoryTypeIndex);
    11023 
    11024  json.WriteString("BlockSize");
    11025  json.WriteNumber(m_PreferredBlockSize);
    11026 
    11027  json.WriteString("BlockCount");
    11028  json.BeginObject(true);
    11029  if(m_MinBlockCount > 0)
    11030  {
    11031  json.WriteString("Min");
    11032  json.WriteNumber((uint64_t)m_MinBlockCount);
    11033  }
    11034  if(m_MaxBlockCount < SIZE_MAX)
    11035  {
    11036  json.WriteString("Max");
    11037  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11038  }
    11039  json.WriteString("Cur");
    11040  json.WriteNumber((uint64_t)m_Blocks.size());
    11041  json.EndObject();
    11042 
    11043  if(m_FrameInUseCount > 0)
    11044  {
    11045  json.WriteString("FrameInUseCount");
    11046  json.WriteNumber(m_FrameInUseCount);
    11047  }
    11048 
    11049  if(m_Algorithm != 0)
    11050  {
    11051  json.WriteString("Algorithm");
    11052  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11053  }
    11054  }
    11055  else
    11056  {
    11057  json.WriteString("PreferredBlockSize");
    11058  json.WriteNumber(m_PreferredBlockSize);
    11059  }
    11060 
    11061  json.WriteString("Blocks");
    11062  json.BeginObject();
    11063  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11064  {
    11065  json.BeginString();
    11066  json.ContinueString(m_Blocks[i]->GetId());
    11067  json.EndString();
    11068 
    11069  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11070  }
    11071  json.EndObject();
    11072 
    11073  json.EndObject();
    11074 }
    11075 
    11076 #endif // #if VMA_STATS_STRING_ENABLED
    11077 
    11078 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11079  VmaAllocator hAllocator,
    11080  uint32_t currentFrameIndex)
    11081 {
    11082  if(m_pDefragmentator == VMA_NULL)
    11083  {
    11084  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11085  hAllocator,
    11086  this,
    11087  currentFrameIndex);
    11088  }
    11089 
    11090  return m_pDefragmentator;
    11091 }
    11092 
    11093 VkResult VmaBlockVector::Defragment(
    11094  VmaDefragmentationStats* pDefragmentationStats,
    11095  VkDeviceSize& maxBytesToMove,
    11096  uint32_t& maxAllocationsToMove)
    11097 {
    11098  if(m_pDefragmentator == VMA_NULL)
    11099  {
    11100  return VK_SUCCESS;
    11101  }
    11102 
    11103  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11104 
    11105  // Defragment.
    11106  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11107 
    11108  // Accumulate statistics.
    11109  if(pDefragmentationStats != VMA_NULL)
    11110  {
    11111  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11112  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11113  pDefragmentationStats->bytesMoved += bytesMoved;
    11114  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11115  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11116  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11117  maxBytesToMove -= bytesMoved;
    11118  maxAllocationsToMove -= allocationsMoved;
    11119  }
    11120 
    11121  // Free empty blocks.
    11122  m_HasEmptyBlock = false;
    11123  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11124  {
    11125  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11126  if(pBlock->m_pMetadata->IsEmpty())
    11127  {
    11128  if(m_Blocks.size() > m_MinBlockCount)
    11129  {
    11130  if(pDefragmentationStats != VMA_NULL)
    11131  {
    11132  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11133  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11134  }
    11135 
    11136  VmaVectorRemove(m_Blocks, blockIndex);
    11137  pBlock->Destroy(m_hAllocator);
    11138  vma_delete(m_hAllocator, pBlock);
    11139  }
    11140  else
    11141  {
    11142  m_HasEmptyBlock = true;
    11143  }
    11144  }
    11145  }
    11146 
    11147  return result;
    11148 }
    11149 
    11150 void VmaBlockVector::DestroyDefragmentator()
    11151 {
    11152  if(m_pDefragmentator != VMA_NULL)
    11153  {
    11154  vma_delete(m_hAllocator, m_pDefragmentator);
    11155  m_pDefragmentator = VMA_NULL;
    11156  }
    11157 }
    11158 
    11159 void VmaBlockVector::MakePoolAllocationsLost(
    11160  uint32_t currentFrameIndex,
    11161  size_t* pLostAllocationCount)
    11162 {
    11163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11164  size_t lostAllocationCount = 0;
    11165  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11166  {
    11167  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11168  VMA_ASSERT(pBlock);
    11169  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11170  }
    11171  if(pLostAllocationCount != VMA_NULL)
    11172  {
    11173  *pLostAllocationCount = lostAllocationCount;
    11174  }
    11175 }
    11176 
    11177 VkResult VmaBlockVector::CheckCorruption()
    11178 {
    11179  if(!IsCorruptionDetectionEnabled())
    11180  {
    11181  return VK_ERROR_FEATURE_NOT_PRESENT;
    11182  }
    11183 
    11184  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11185  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11186  {
    11187  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11188  VMA_ASSERT(pBlock);
    11189  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11190  if(res != VK_SUCCESS)
    11191  {
    11192  return res;
    11193  }
    11194  }
    11195  return VK_SUCCESS;
    11196 }
    11197 
    11198 void VmaBlockVector::AddStats(VmaStats* pStats)
    11199 {
    11200  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11201  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11202 
    11203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11204 
    11205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11206  {
    11207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11208  VMA_ASSERT(pBlock);
    11209  VMA_HEAVY_ASSERT(pBlock->Validate());
    11210  VmaStatInfo allocationStatInfo;
    11211  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11212  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11213  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11214  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11215  }
    11216 }
    11217 
    11219 // VmaDefragmentator members definition
    11220 
    11221 VmaDefragmentator::VmaDefragmentator(
    11222  VmaAllocator hAllocator,
    11223  VmaBlockVector* pBlockVector,
    11224  uint32_t currentFrameIndex) :
    11225  m_hAllocator(hAllocator),
    11226  m_pBlockVector(pBlockVector),
    11227  m_CurrentFrameIndex(currentFrameIndex),
    11228  m_BytesMoved(0),
    11229  m_AllocationsMoved(0),
    11230  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11231  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11232 {
    11233  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11234 }
    11235 
    11236 VmaDefragmentator::~VmaDefragmentator()
    11237 {
    11238  for(size_t i = m_Blocks.size(); i--; )
    11239  {
    11240  vma_delete(m_hAllocator, m_Blocks[i]);
    11241  }
    11242 }
    11243 
    11244 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11245 {
    11246  AllocationInfo allocInfo;
    11247  allocInfo.m_hAllocation = hAlloc;
    11248  allocInfo.m_pChanged = pChanged;
    11249  m_Allocations.push_back(allocInfo);
    11250 }
    11251 
    11252 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11253 {
    11254  // It has already been mapped for defragmentation.
    11255  if(m_pMappedDataForDefragmentation)
    11256  {
    11257  *ppMappedData = m_pMappedDataForDefragmentation;
    11258  return VK_SUCCESS;
    11259  }
    11260 
    11261  // It is originally mapped.
    11262  if(m_pBlock->GetMappedData())
    11263  {
    11264  *ppMappedData = m_pBlock->GetMappedData();
    11265  return VK_SUCCESS;
    11266  }
    11267 
    11268  // Map on first usage.
    11269  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11270  *ppMappedData = m_pMappedDataForDefragmentation;
    11271  return res;
    11272 }
    11273 
    11274 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11275 {
    11276  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11277  {
    11278  m_pBlock->Unmap(hAllocator, 1);
    11279  }
    11280 }
    11281 
    11282 VkResult VmaDefragmentator::DefragmentRound(
    11283  VkDeviceSize maxBytesToMove,
    11284  uint32_t maxAllocationsToMove)
    11285 {
    11286  if(m_Blocks.empty())
    11287  {
    11288  return VK_SUCCESS;
    11289  }
    11290 
    11291  size_t srcBlockIndex = m_Blocks.size() - 1;
    11292  size_t srcAllocIndex = SIZE_MAX;
    11293  for(;;)
    11294  {
    11295  // 1. Find next allocation to move.
    11296  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11297  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11298  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11299  {
    11300  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11301  {
    11302  // Finished: no more allocations to process.
    11303  if(srcBlockIndex == 0)
    11304  {
    11305  return VK_SUCCESS;
    11306  }
    11307  else
    11308  {
    11309  --srcBlockIndex;
    11310  srcAllocIndex = SIZE_MAX;
    11311  }
    11312  }
    11313  else
    11314  {
    11315  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11316  }
    11317  }
    11318 
    11319  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11320  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11321 
    11322  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11323  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11324  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11325  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11326 
    11327  // 2. Try to find new place for this allocation in preceding or current block.
    11328  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11329  {
    11330  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11331  VmaAllocationRequest dstAllocRequest;
    11332  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11333  m_CurrentFrameIndex,
    11334  m_pBlockVector->GetFrameInUseCount(),
    11335  m_pBlockVector->GetBufferImageGranularity(),
    11336  size,
    11337  alignment,
    11338  false, // upperAddress
    11339  suballocType,
    11340  false, // canMakeOtherLost
    11342  &dstAllocRequest) &&
    11343  MoveMakesSense(
    11344  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11345  {
    11346  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11347 
    11348  // Reached limit on number of allocations or bytes to move.
    11349  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11350  (m_BytesMoved + size > maxBytesToMove))
    11351  {
    11352  return VK_INCOMPLETE;
    11353  }
    11354 
    11355  void* pDstMappedData = VMA_NULL;
    11356  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11357  if(res != VK_SUCCESS)
    11358  {
    11359  return res;
    11360  }
    11361 
    11362  void* pSrcMappedData = VMA_NULL;
    11363  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11364  if(res != VK_SUCCESS)
    11365  {
    11366  return res;
    11367  }
    11368 
    11369  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11370  memcpy(
    11371  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11372  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11373  static_cast<size_t>(size));
    11374 
    11375  if(VMA_DEBUG_MARGIN > 0)
    11376  {
    11377  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11378  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11379  }
    11380 
    11381  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11382  dstAllocRequest,
    11383  suballocType,
    11384  size,
    11385  false, // upperAddress
    11386  allocInfo.m_hAllocation);
    11387  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11388 
    11389  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11390 
    11391  if(allocInfo.m_pChanged != VMA_NULL)
    11392  {
    11393  *allocInfo.m_pChanged = VK_TRUE;
    11394  }
    11395 
    11396  ++m_AllocationsMoved;
    11397  m_BytesMoved += size;
    11398 
    11399  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11400 
    11401  break;
    11402  }
    11403  }
    11404 
    11405  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11406 
    11407  if(srcAllocIndex > 0)
    11408  {
    11409  --srcAllocIndex;
    11410  }
    11411  else
    11412  {
    11413  if(srcBlockIndex > 0)
    11414  {
    11415  --srcBlockIndex;
    11416  srcAllocIndex = SIZE_MAX;
    11417  }
    11418  else
    11419  {
    11420  return VK_SUCCESS;
    11421  }
    11422  }
    11423  }
    11424 }
    11425 
    11426 VkResult VmaDefragmentator::Defragment(
    11427  VkDeviceSize maxBytesToMove,
    11428  uint32_t maxAllocationsToMove)
    11429 {
    11430  if(m_Allocations.empty())
    11431  {
    11432  return VK_SUCCESS;
    11433  }
    11434 
    11435  // Create block info for each block.
    11436  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11437  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11438  {
    11439  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11440  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11441  m_Blocks.push_back(pBlockInfo);
    11442  }
    11443 
    11444  // Sort them by m_pBlock pointer value.
    11445  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11446 
    11447  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11448  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11449  {
    11450  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11451  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11452  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11453  {
    11454  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11455  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11456  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11457  {
    11458  (*it)->m_Allocations.push_back(allocInfo);
    11459  }
    11460  else
    11461  {
    11462  VMA_ASSERT(0);
    11463  }
    11464  }
    11465  }
    11466  m_Allocations.clear();
    11467 
    11468  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11469  {
    11470  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11471  pBlockInfo->CalcHasNonMovableAllocations();
    11472  pBlockInfo->SortAllocationsBySizeDescecnding();
    11473  }
    11474 
    11475  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11476  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11477 
    11478  // Execute defragmentation rounds (the main part).
    11479  VkResult result = VK_SUCCESS;
    11480  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11481  {
    11482  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11483  }
    11484 
    11485  // Unmap blocks that were mapped for defragmentation.
    11486  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11487  {
    11488  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11489  }
    11490 
    11491  return result;
    11492 }
    11493 
    11494 bool VmaDefragmentator::MoveMakesSense(
    11495  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11496  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11497 {
    11498  if(dstBlockIndex < srcBlockIndex)
    11499  {
    11500  return true;
    11501  }
    11502  if(dstBlockIndex > srcBlockIndex)
    11503  {
    11504  return false;
    11505  }
    11506  if(dstOffset < srcOffset)
    11507  {
    11508  return true;
    11509  }
    11510  return false;
    11511 }
    11512 
    11514 // VmaRecorder
    11515 
    11516 #if VMA_RECORDING_ENABLED
    11517 
    11518 VmaRecorder::VmaRecorder() :
    11519  m_UseMutex(true),
    11520  m_Flags(0),
    11521  m_File(VMA_NULL),
    11522  m_Freq(INT64_MAX),
    11523  m_StartCounter(INT64_MAX)
    11524 {
    11525 }
    11526 
    11527 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11528 {
    11529  m_UseMutex = useMutex;
    11530  m_Flags = settings.flags;
    11531 
    11532  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11533  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11534 
    11535  // Open file for writing.
    11536  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11537  if(err != 0)
    11538  {
    11539  return VK_ERROR_INITIALIZATION_FAILED;
    11540  }
    11541 
    11542  // Write header.
    11543  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11544  fprintf(m_File, "%s\n", "1,4");
    11545 
    11546  return VK_SUCCESS;
    11547 }
    11548 
    11549 VmaRecorder::~VmaRecorder()
    11550 {
    11551  if(m_File != VMA_NULL)
    11552  {
    11553  fclose(m_File);
    11554  }
    11555 }
    11556 
    11557 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11564  Flush();
    11565 }
    11566 
    11567 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11568 {
    11569  CallParams callParams;
    11570  GetBasicParams(callParams);
    11571 
    11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11573  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11574  Flush();
    11575 }
    11576 
    11577 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11578 {
    11579  CallParams callParams;
    11580  GetBasicParams(callParams);
    11581 
    11582  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11583  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11584  createInfo.memoryTypeIndex,
    11585  createInfo.flags,
    11586  createInfo.blockSize,
    11587  (uint64_t)createInfo.minBlockCount,
    11588  (uint64_t)createInfo.maxBlockCount,
    11589  createInfo.frameInUseCount,
    11590  pool);
    11591  Flush();
    11592 }
    11593 
    11594 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11595 {
    11596  CallParams callParams;
    11597  GetBasicParams(callParams);
    11598 
    11599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11600  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11601  pool);
    11602  Flush();
    11603 }
    11604 
    11605 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11606  const VkMemoryRequirements& vkMemReq,
    11607  const VmaAllocationCreateInfo& createInfo,
    11608  VmaAllocation allocation)
    11609 {
    11610  CallParams callParams;
    11611  GetBasicParams(callParams);
    11612 
    11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11614  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11615  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11616  vkMemReq.size,
    11617  vkMemReq.alignment,
    11618  vkMemReq.memoryTypeBits,
    11619  createInfo.flags,
    11620  createInfo.usage,
    11621  createInfo.requiredFlags,
    11622  createInfo.preferredFlags,
    11623  createInfo.memoryTypeBits,
    11624  createInfo.pool,
    11625  allocation,
    11626  userDataStr.GetString());
    11627  Flush();
    11628 }
    11629 
    11630 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11631  const VkMemoryRequirements& vkMemReq,
    11632  bool requiresDedicatedAllocation,
    11633  bool prefersDedicatedAllocation,
    11634  const VmaAllocationCreateInfo& createInfo,
    11635  VmaAllocation allocation)
    11636 {
    11637  CallParams callParams;
    11638  GetBasicParams(callParams);
    11639 
    11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11641  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11642  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11643  vkMemReq.size,
    11644  vkMemReq.alignment,
    11645  vkMemReq.memoryTypeBits,
    11646  requiresDedicatedAllocation ? 1 : 0,
    11647  prefersDedicatedAllocation ? 1 : 0,
    11648  createInfo.flags,
    11649  createInfo.usage,
    11650  createInfo.requiredFlags,
    11651  createInfo.preferredFlags,
    11652  createInfo.memoryTypeBits,
    11653  createInfo.pool,
    11654  allocation,
    11655  userDataStr.GetString());
    11656  Flush();
    11657 }
    11658 
    11659 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11660  const VkMemoryRequirements& vkMemReq,
    11661  bool requiresDedicatedAllocation,
    11662  bool prefersDedicatedAllocation,
    11663  const VmaAllocationCreateInfo& createInfo,
    11664  VmaAllocation allocation)
    11665 {
    11666  CallParams callParams;
    11667  GetBasicParams(callParams);
    11668 
    11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11672  vkMemReq.size,
    11673  vkMemReq.alignment,
    11674  vkMemReq.memoryTypeBits,
    11675  requiresDedicatedAllocation ? 1 : 0,
    11676  prefersDedicatedAllocation ? 1 : 0,
    11677  createInfo.flags,
    11678  createInfo.usage,
    11679  createInfo.requiredFlags,
    11680  createInfo.preferredFlags,
    11681  createInfo.memoryTypeBits,
    11682  createInfo.pool,
    11683  allocation,
    11684  userDataStr.GetString());
    11685  Flush();
    11686 }
    11687 
    11688 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11689  VmaAllocation allocation)
    11690 {
    11691  CallParams callParams;
    11692  GetBasicParams(callParams);
    11693 
    11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11695  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11696  allocation);
    11697  Flush();
    11698 }
    11699 
    11700 void VmaRecorder::RecordResizeAllocation(
    11701  uint32_t frameIndex,
    11702  VmaAllocation allocation,
    11703  VkDeviceSize newSize)
    11704 {
    11705  CallParams callParams;
    11706  GetBasicParams(callParams);
    11707 
    11708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11709  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11710  allocation, newSize);
    11711  Flush();
    11712 }
    11713 
    11714 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11715  VmaAllocation allocation,
    11716  const void* pUserData)
    11717 {
    11718  CallParams callParams;
    11719  GetBasicParams(callParams);
    11720 
    11721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11722  UserDataString userDataStr(
    11723  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11724  pUserData);
    11725  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11726  allocation,
    11727  userDataStr.GetString());
    11728  Flush();
    11729 }
    11730 
    11731 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11732  VmaAllocation allocation)
    11733 {
    11734  CallParams callParams;
    11735  GetBasicParams(callParams);
    11736 
    11737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11738  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11739  allocation);
    11740  Flush();
    11741 }
    11742 
    11743 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11744  VmaAllocation allocation)
    11745 {
    11746  CallParams callParams;
    11747  GetBasicParams(callParams);
    11748 
    11749  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11750  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11751  allocation);
    11752  Flush();
    11753 }
    11754 
    11755 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11756  VmaAllocation allocation)
    11757 {
    11758  CallParams callParams;
    11759  GetBasicParams(callParams);
    11760 
    11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11762  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11763  allocation);
    11764  Flush();
    11765 }
    11766 
    11767 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11768  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11769 {
    11770  CallParams callParams;
    11771  GetBasicParams(callParams);
    11772 
    11773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11774  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11775  allocation,
    11776  offset,
    11777  size);
    11778  Flush();
    11779 }
    11780 
    11781 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11782  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11783 {
    11784  CallParams callParams;
    11785  GetBasicParams(callParams);
    11786 
    11787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11788  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11789  allocation,
    11790  offset,
    11791  size);
    11792  Flush();
    11793 }
    11794 
    11795 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11796  const VkBufferCreateInfo& bufCreateInfo,
    11797  const VmaAllocationCreateInfo& allocCreateInfo,
    11798  VmaAllocation allocation)
    11799 {
    11800  CallParams callParams;
    11801  GetBasicParams(callParams);
    11802 
    11803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11804  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11805  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11806  bufCreateInfo.flags,
    11807  bufCreateInfo.size,
    11808  bufCreateInfo.usage,
    11809  bufCreateInfo.sharingMode,
    11810  allocCreateInfo.flags,
    11811  allocCreateInfo.usage,
    11812  allocCreateInfo.requiredFlags,
    11813  allocCreateInfo.preferredFlags,
    11814  allocCreateInfo.memoryTypeBits,
    11815  allocCreateInfo.pool,
    11816  allocation,
    11817  userDataStr.GetString());
    11818  Flush();
    11819 }
    11820 
    11821 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11822  const VkImageCreateInfo& imageCreateInfo,
    11823  const VmaAllocationCreateInfo& allocCreateInfo,
    11824  VmaAllocation allocation)
    11825 {
    11826  CallParams callParams;
    11827  GetBasicParams(callParams);
    11828 
    11829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11830  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11831  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11832  imageCreateInfo.flags,
    11833  imageCreateInfo.imageType,
    11834  imageCreateInfo.format,
    11835  imageCreateInfo.extent.width,
    11836  imageCreateInfo.extent.height,
    11837  imageCreateInfo.extent.depth,
    11838  imageCreateInfo.mipLevels,
    11839  imageCreateInfo.arrayLayers,
    11840  imageCreateInfo.samples,
    11841  imageCreateInfo.tiling,
    11842  imageCreateInfo.usage,
    11843  imageCreateInfo.sharingMode,
    11844  imageCreateInfo.initialLayout,
    11845  allocCreateInfo.flags,
    11846  allocCreateInfo.usage,
    11847  allocCreateInfo.requiredFlags,
    11848  allocCreateInfo.preferredFlags,
    11849  allocCreateInfo.memoryTypeBits,
    11850  allocCreateInfo.pool,
    11851  allocation,
    11852  userDataStr.GetString());
    11853  Flush();
    11854 }
    11855 
    11856 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11857  VmaAllocation allocation)
    11858 {
    11859  CallParams callParams;
    11860  GetBasicParams(callParams);
    11861 
    11862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11863  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11864  allocation);
    11865  Flush();
    11866 }
    11867 
    11868 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11869  VmaAllocation allocation)
    11870 {
    11871  CallParams callParams;
    11872  GetBasicParams(callParams);
    11873 
    11874  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11875  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11876  allocation);
    11877  Flush();
    11878 }
    11879 
    11880 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11881  VmaAllocation allocation)
    11882 {
    11883  CallParams callParams;
    11884  GetBasicParams(callParams);
    11885 
    11886  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11887  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11888  allocation);
    11889  Flush();
    11890 }
    11891 
    11892 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11893  VmaAllocation allocation)
    11894 {
    11895  CallParams callParams;
    11896  GetBasicParams(callParams);
    11897 
    11898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11899  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11900  allocation);
    11901  Flush();
    11902 }
    11903 
    11904 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11905  VmaPool pool)
    11906 {
    11907  CallParams callParams;
    11908  GetBasicParams(callParams);
    11909 
    11910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11911  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11912  pool);
    11913  Flush();
    11914 }
    11915 
    11916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11917 {
    11918  if(pUserData != VMA_NULL)
    11919  {
    11920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11921  {
    11922  m_Str = (const char*)pUserData;
    11923  }
    11924  else
    11925  {
    11926  sprintf_s(m_PtrStr, "%p", pUserData);
    11927  m_Str = m_PtrStr;
    11928  }
    11929  }
    11930  else
    11931  {
    11932  m_Str = "";
    11933  }
    11934 }
    11935 
    11936 void VmaRecorder::WriteConfiguration(
    11937  const VkPhysicalDeviceProperties& devProps,
    11938  const VkPhysicalDeviceMemoryProperties& memProps,
    11939  bool dedicatedAllocationExtensionEnabled)
    11940 {
    11941  fprintf(m_File, "Config,Begin\n");
    11942 
    11943  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11944  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11945  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11946  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11947  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11948  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11949 
    11950  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11951  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11952  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11953 
    11954  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11955  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11956  {
    11957  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11958  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11959  }
    11960  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11961  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11965  }
    11966 
    11967  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11968 
    11969  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11970  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11971  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11972  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11973  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11974  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11976  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11977  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11978 
    11979  fprintf(m_File, "Config,End\n");
    11980 }
    11981 
    11982 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11983 {
    11984  outParams.threadId = GetCurrentThreadId();
    11985 
    11986  LARGE_INTEGER counter;
    11987  QueryPerformanceCounter(&counter);
    11988  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11989 }
    11990 
    11991 void VmaRecorder::Flush()
    11992 {
    11993  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11994  {
    11995  fflush(m_File);
    11996  }
    11997 }
    11998 
    11999 #endif // #if VMA_RECORDING_ENABLED
    12000 
    12002 // VmaAllocator_T
    12003 
    12004 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12005  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12006  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12007  m_hDevice(pCreateInfo->device),
    12008  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12009  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12010  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12011  m_PreferredLargeHeapBlockSize(0),
    12012  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12013  m_CurrentFrameIndex(0),
    12014  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12015  m_NextPoolId(0)
    12017  ,m_pRecorder(VMA_NULL)
    12018 #endif
    12019 {
    12020  if(VMA_DEBUG_DETECT_CORRUPTION)
    12021  {
    12022  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12023  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12024  }
    12025 
    12026  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12027 
    12028 #if !(VMA_DEDICATED_ALLOCATION)
    12030  {
    12031  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12032  }
    12033 #endif
    12034 
    12035  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12036  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12037  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12038 
    12039  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12040  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12041 
    12042  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12043  {
    12044  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12045  }
    12046 
    12047  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12048  {
    12049  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12050  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12051  }
    12052 
    12053  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12054 
    12055  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12056  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12057 
    12058  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12059  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12060  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12061  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12062 
    12063  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12064  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12065 
    12066  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12067  {
    12068  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12069  {
    12070  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12071  if(limit != VK_WHOLE_SIZE)
    12072  {
    12073  m_HeapSizeLimit[heapIndex] = limit;
    12074  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12075  {
    12076  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12077  }
    12078  }
    12079  }
    12080  }
    12081 
    12082  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12083  {
    12084  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12085 
    12086  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12087  this,
    12088  memTypeIndex,
    12089  preferredBlockSize,
    12090  0,
    12091  SIZE_MAX,
    12092  GetBufferImageGranularity(),
    12093  pCreateInfo->frameInUseCount,
    12094  false, // isCustomPool
    12095  false, // explicitBlockSize
    12096  false); // linearAlgorithm
    12097  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12098  // becase minBlockCount is 0.
    12099  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12100 
    12101  }
    12102 }
    12103 
    12104 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12105 {
    12106  VkResult res = VK_SUCCESS;
    12107 
    12108  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12109  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12110  {
    12111 #if VMA_RECORDING_ENABLED
    12112  m_pRecorder = vma_new(this, VmaRecorder)();
    12113  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12114  if(res != VK_SUCCESS)
    12115  {
    12116  return res;
    12117  }
    12118  m_pRecorder->WriteConfiguration(
    12119  m_PhysicalDeviceProperties,
    12120  m_MemProps,
    12121  m_UseKhrDedicatedAllocation);
    12122  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12123 #else
    12124  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12125  return VK_ERROR_FEATURE_NOT_PRESENT;
    12126 #endif
    12127  }
    12128 
    12129  return res;
    12130 }
    12131 
    12132 VmaAllocator_T::~VmaAllocator_T()
    12133 {
    12134 #if VMA_RECORDING_ENABLED
    12135  if(m_pRecorder != VMA_NULL)
    12136  {
    12137  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12138  vma_delete(this, m_pRecorder);
    12139  }
    12140 #endif
    12141 
    12142  VMA_ASSERT(m_Pools.empty());
    12143 
    12144  for(size_t i = GetMemoryTypeCount(); i--; )
    12145  {
    12146  vma_delete(this, m_pDedicatedAllocations[i]);
    12147  vma_delete(this, m_pBlockVectors[i]);
    12148  }
    12149 }
    12150 
    12151 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12152 {
    12153 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12154  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12155  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12156  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12157  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12158  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12159  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12160  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12161  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12162  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12163  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12164  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12165  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12166  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12167  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12168  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12169  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12170 #if VMA_DEDICATED_ALLOCATION
    12171  if(m_UseKhrDedicatedAllocation)
    12172  {
    12173  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12174  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12175  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12176  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12180 
    12181 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12182  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12183 
    12184  if(pVulkanFunctions != VMA_NULL)
    12185  {
    12186  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12187  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12188  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12189  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12190  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12191  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12192  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12193  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12194  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12197  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12198  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12199  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12200  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12201  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12202 #if VMA_DEDICATED_ALLOCATION
    12203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12205 #endif
    12206  }
    12207 
    12208 #undef VMA_COPY_IF_NOT_NULL
    12209 
    12210  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12211  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12212  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12213  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12214  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12215  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12216  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12217  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12228 #if VMA_DEDICATED_ALLOCATION
    12229  if(m_UseKhrDedicatedAllocation)
    12230  {
    12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12233  }
    12234 #endif
    12235 }
    12236 
    12237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12238 {
    12239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12243 }
    12244 
    12245 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12246  VkDeviceSize size,
    12247  VkDeviceSize alignment,
    12248  bool dedicatedAllocation,
    12249  VkBuffer dedicatedBuffer,
    12250  VkImage dedicatedImage,
    12251  const VmaAllocationCreateInfo& createInfo,
    12252  uint32_t memTypeIndex,
    12253  VmaSuballocationType suballocType,
    12254  VmaAllocation* pAllocation)
    12255 {
    12256  VMA_ASSERT(pAllocation != VMA_NULL);
    12257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12258 
    12259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12260 
    12261  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12264  {
    12265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12266  }
    12267 
    12268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12269  VMA_ASSERT(blockVector);
    12270 
    12271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12272  bool preferDedicatedMemory =
    12273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12274  dedicatedAllocation ||
    12275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12276  size > preferredBlockSize / 2;
    12277 
    12278  if(preferDedicatedMemory &&
    12279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12280  finalCreateInfo.pool == VK_NULL_HANDLE)
    12281  {
    12283  }
    12284 
    12285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12286  {
    12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12288  {
    12289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12290  }
    12291  else
    12292  {
    12293  return AllocateDedicatedMemory(
    12294  size,
    12295  suballocType,
    12296  memTypeIndex,
    12297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12299  finalCreateInfo.pUserData,
    12300  dedicatedBuffer,
    12301  dedicatedImage,
    12302  pAllocation);
    12303  }
    12304  }
    12305  else
    12306  {
    12307  VkResult res = blockVector->Allocate(
    12308  VK_NULL_HANDLE, // hCurrentPool
    12309  m_CurrentFrameIndex.load(),
    12310  size,
    12311  alignment,
    12312  finalCreateInfo,
    12313  suballocType,
    12314  pAllocation);
    12315  if(res == VK_SUCCESS)
    12316  {
    12317  return res;
    12318  }
    12319 
    12320  // 5. Try dedicated memory.
    12321  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12322  {
    12323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12324  }
    12325  else
    12326  {
    12327  res = AllocateDedicatedMemory(
    12328  size,
    12329  suballocType,
    12330  memTypeIndex,
    12331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12332  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12333  finalCreateInfo.pUserData,
    12334  dedicatedBuffer,
    12335  dedicatedImage,
    12336  pAllocation);
    12337  if(res == VK_SUCCESS)
    12338  {
    12339  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12340  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12341  return VK_SUCCESS;
    12342  }
    12343  else
    12344  {
    12345  // Everything failed: Return error code.
    12346  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12347  return res;
    12348  }
    12349  }
    12350  }
    12351 }
    12352 
    12353 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12354  VkDeviceSize size,
    12355  VmaSuballocationType suballocType,
    12356  uint32_t memTypeIndex,
    12357  bool map,
    12358  bool isUserDataString,
    12359  void* pUserData,
    12360  VkBuffer dedicatedBuffer,
    12361  VkImage dedicatedImage,
    12362  VmaAllocation* pAllocation)
    12363 {
    12364  VMA_ASSERT(pAllocation);
    12365 
    12366  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12367  allocInfo.memoryTypeIndex = memTypeIndex;
    12368  allocInfo.allocationSize = size;
    12369 
    12370 #if VMA_DEDICATED_ALLOCATION
    12371  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12372  if(m_UseKhrDedicatedAllocation)
    12373  {
    12374  if(dedicatedBuffer != VK_NULL_HANDLE)
    12375  {
    12376  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12377  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12378  allocInfo.pNext = &dedicatedAllocInfo;
    12379  }
    12380  else if(dedicatedImage != VK_NULL_HANDLE)
    12381  {
    12382  dedicatedAllocInfo.image = dedicatedImage;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  }
    12386 #endif // #if VMA_DEDICATED_ALLOCATION
    12387 
    12388  // Allocate VkDeviceMemory.
    12389  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12390  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12391  if(res < 0)
    12392  {
    12393  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12394  return res;
    12395  }
    12396 
    12397  void* pMappedData = VMA_NULL;
    12398  if(map)
    12399  {
    12400  res = (*m_VulkanFunctions.vkMapMemory)(
    12401  m_hDevice,
    12402  hMemory,
    12403  0,
    12404  VK_WHOLE_SIZE,
    12405  0,
    12406  &pMappedData);
    12407  if(res < 0)
    12408  {
    12409  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12410  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12411  return res;
    12412  }
    12413  }
    12414 
    12415  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12416  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12417  (*pAllocation)->SetUserData(this, pUserData);
    12418  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12419  {
    12420  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12421  }
    12422 
    12423  // Register it in m_pDedicatedAllocations.
    12424  {
    12425  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12426  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12427  VMA_ASSERT(pDedicatedAllocations);
    12428  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12429  }
    12430 
    12431  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12432 
    12433  return VK_SUCCESS;
    12434 }
    12435 
    12436 void VmaAllocator_T::GetBufferMemoryRequirements(
    12437  VkBuffer hBuffer,
    12438  VkMemoryRequirements& memReq,
    12439  bool& requiresDedicatedAllocation,
    12440  bool& prefersDedicatedAllocation) const
    12441 {
    12442 #if VMA_DEDICATED_ALLOCATION
    12443  if(m_UseKhrDedicatedAllocation)
    12444  {
    12445  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12446  memReqInfo.buffer = hBuffer;
    12447 
    12448  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12449 
    12450  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12451  memReq2.pNext = &memDedicatedReq;
    12452 
    12453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12454 
    12455  memReq = memReq2.memoryRequirements;
    12456  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12457  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12458  }
    12459  else
    12460 #endif // #if VMA_DEDICATED_ALLOCATION
    12461  {
    12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12463  requiresDedicatedAllocation = false;
    12464  prefersDedicatedAllocation = false;
    12465  }
    12466 }
    12467 
    12468 void VmaAllocator_T::GetImageMemoryRequirements(
    12469  VkImage hImage,
    12470  VkMemoryRequirements& memReq,
    12471  bool& requiresDedicatedAllocation,
    12472  bool& prefersDedicatedAllocation) const
    12473 {
    12474 #if VMA_DEDICATED_ALLOCATION
    12475  if(m_UseKhrDedicatedAllocation)
    12476  {
    12477  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12478  memReqInfo.image = hImage;
    12479 
    12480  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12481 
    12482  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12483  memReq2.pNext = &memDedicatedReq;
    12484 
    12485  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12486 
    12487  memReq = memReq2.memoryRequirements;
    12488  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12489  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12490  }
    12491  else
    12492 #endif // #if VMA_DEDICATED_ALLOCATION
    12493  {
    12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12495  requiresDedicatedAllocation = false;
    12496  prefersDedicatedAllocation = false;
    12497  }
    12498 }
    12499 
    12500 VkResult VmaAllocator_T::AllocateMemory(
    12501  const VkMemoryRequirements& vkMemReq,
    12502  bool requiresDedicatedAllocation,
    12503  bool prefersDedicatedAllocation,
    12504  VkBuffer dedicatedBuffer,
    12505  VkImage dedicatedImage,
    12506  const VmaAllocationCreateInfo& createInfo,
    12507  VmaSuballocationType suballocType,
    12508  VmaAllocation* pAllocation)
    12509 {
    12510  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12511 
    12512  if(vkMemReq.size == 0)
    12513  {
    12514  return VK_ERROR_VALIDATION_FAILED_EXT;
    12515  }
    12516  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12517  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12518  {
    12519  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if(requiresDedicatedAllocation)
    12529  {
    12530  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12531  {
    12532  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12534  }
    12535  if(createInfo.pool != VK_NULL_HANDLE)
    12536  {
    12537  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  }
    12541  if((createInfo.pool != VK_NULL_HANDLE) &&
    12542  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12543  {
    12544  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12546  }
    12547 
    12548  if(createInfo.pool != VK_NULL_HANDLE)
    12549  {
    12550  const VkDeviceSize alignmentForPool = VMA_MAX(
    12551  vkMemReq.alignment,
    12552  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12553  return createInfo.pool->m_BlockVector.Allocate(
    12554  createInfo.pool,
    12555  m_CurrentFrameIndex.load(),
    12556  vkMemReq.size,
    12557  alignmentForPool,
    12558  createInfo,
    12559  suballocType,
    12560  pAllocation);
    12561  }
    12562  else
    12563  {
    12564  // Bit mask of memory Vulkan types acceptable for this allocation.
    12565  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12566  uint32_t memTypeIndex = UINT32_MAX;
    12567  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12568  if(res == VK_SUCCESS)
    12569  {
    12570  VkDeviceSize alignmentForMemType = VMA_MAX(
    12571  vkMemReq.alignment,
    12572  GetMemoryTypeMinAlignment(memTypeIndex));
    12573 
    12574  res = AllocateMemoryOfType(
    12575  vkMemReq.size,
    12576  alignmentForMemType,
    12577  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12578  dedicatedBuffer,
    12579  dedicatedImage,
    12580  createInfo,
    12581  memTypeIndex,
    12582  suballocType,
    12583  pAllocation);
    12584  // Succeeded on first try.
    12585  if(res == VK_SUCCESS)
    12586  {
    12587  return res;
    12588  }
    12589  // Allocation from this memory type failed. Try other compatible memory types.
    12590  else
    12591  {
    12592  for(;;)
    12593  {
    12594  // Remove old memTypeIndex from list of possibilities.
    12595  memoryTypeBits &= ~(1u << memTypeIndex);
    12596  // Find alternative memTypeIndex.
    12597  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12598  if(res == VK_SUCCESS)
    12599  {
    12600  alignmentForMemType = VMA_MAX(
    12601  vkMemReq.alignment,
    12602  GetMemoryTypeMinAlignment(memTypeIndex));
    12603 
    12604  res = AllocateMemoryOfType(
    12605  vkMemReq.size,
    12606  alignmentForMemType,
    12607  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12608  dedicatedBuffer,
    12609  dedicatedImage,
    12610  createInfo,
    12611  memTypeIndex,
    12612  suballocType,
    12613  pAllocation);
    12614  // Allocation from this alternative memory type succeeded.
    12615  if(res == VK_SUCCESS)
    12616  {
    12617  return res;
    12618  }
    12619  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12620  }
    12621  // No other matching memory type index could be found.
    12622  else
    12623  {
    12624  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12625  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12626  }
    12627  }
    12628  }
    12629  }
    12630  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12631  else
    12632  return res;
    12633  }
    12634 }
    12635 
    12636 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12637 {
    12638  VMA_ASSERT(allocation);
    12639 
    12640  if(TouchAllocation(allocation))
    12641  {
    12642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12643  {
    12644  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12645  }
    12646 
    12647  switch(allocation->GetType())
    12648  {
    12649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12650  {
    12651  VmaBlockVector* pBlockVector = VMA_NULL;
    12652  VmaPool hPool = allocation->GetPool();
    12653  if(hPool != VK_NULL_HANDLE)
    12654  {
    12655  pBlockVector = &hPool->m_BlockVector;
    12656  }
    12657  else
    12658  {
    12659  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12660  pBlockVector = m_pBlockVectors[memTypeIndex];
    12661  }
    12662  pBlockVector->Free(allocation);
    12663  }
    12664  break;
    12665  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12666  FreeDedicatedMemory(allocation);
    12667  break;
    12668  default:
    12669  VMA_ASSERT(0);
    12670  }
    12671  }
    12672 
    12673  allocation->SetUserData(this, VMA_NULL);
    12674  vma_delete(this, allocation);
    12675 }
    12676 
    12677 VkResult VmaAllocator_T::ResizeAllocation(
    12678  const VmaAllocation alloc,
    12679  VkDeviceSize newSize)
    12680 {
    12681  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12682  {
    12683  return VK_ERROR_VALIDATION_FAILED_EXT;
    12684  }
    12685  if(newSize == alloc->GetSize())
    12686  {
    12687  return VK_SUCCESS;
    12688  }
    12689 
    12690  switch(alloc->GetType())
    12691  {
    12692  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12693  return VK_ERROR_FEATURE_NOT_PRESENT;
    12694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12695  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12696  {
    12697  alloc->ChangeSize(newSize);
    12698  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12699  return VK_SUCCESS;
    12700  }
    12701  else
    12702  {
    12703  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12704  }
    12705  default:
    12706  VMA_ASSERT(0);
    12707  return VK_ERROR_VALIDATION_FAILED_EXT;
    12708  }
    12709 }
    12710 
    12711 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12712 {
    12713  // Initialize.
    12714  InitStatInfo(pStats->total);
    12715  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12716  InitStatInfo(pStats->memoryType[i]);
    12717  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12718  InitStatInfo(pStats->memoryHeap[i]);
    12719 
    12720  // Process default pools.
    12721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12722  {
    12723  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12724  VMA_ASSERT(pBlockVector);
    12725  pBlockVector->AddStats(pStats);
    12726  }
    12727 
    12728  // Process custom pools.
    12729  {
    12730  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12731  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12732  {
    12733  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12734  }
    12735  }
    12736 
    12737  // Process dedicated allocations.
    12738  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12739  {
    12740  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12741  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12742  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12743  VMA_ASSERT(pDedicatedAllocVector);
    12744  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12745  {
    12746  VmaStatInfo allocationStatInfo;
    12747  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12748  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12749  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12750  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12751  }
    12752  }
    12753 
    12754  // Postprocess.
    12755  VmaPostprocessCalcStatInfo(pStats->total);
    12756  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12757  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12758  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12759  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12760 }
    12761 
    12762 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12763 
    12764 VkResult VmaAllocator_T::Defragment(
    12765  VmaAllocation* pAllocations,
    12766  size_t allocationCount,
    12767  VkBool32* pAllocationsChanged,
    12768  const VmaDefragmentationInfo* pDefragmentationInfo,
    12769  VmaDefragmentationStats* pDefragmentationStats)
    12770 {
    12771  if(pAllocationsChanged != VMA_NULL)
    12772  {
    12773  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12774  }
    12775  if(pDefragmentationStats != VMA_NULL)
    12776  {
    12777  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12778  }
    12779 
    12780  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12781 
    12782  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12783 
    12784  const size_t poolCount = m_Pools.size();
    12785 
    12786  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12787  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12788  {
    12789  VmaAllocation hAlloc = pAllocations[allocIndex];
    12790  VMA_ASSERT(hAlloc);
    12791  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12792  // DedicatedAlloc cannot be defragmented.
    12793  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12794  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12795  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12796  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12797  // Lost allocation cannot be defragmented.
    12798  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12799  {
    12800  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12801 
    12802  const VmaPool hAllocPool = hAlloc->GetPool();
    12803  // This allocation belongs to custom pool.
    12804  if(hAllocPool != VK_NULL_HANDLE)
    12805  {
    12806  // Pools with linear or buddy algorithm are not defragmented.
    12807  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12808  {
    12809  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12810  }
    12811  }
    12812  // This allocation belongs to general pool.
    12813  else
    12814  {
    12815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12816  }
    12817 
    12818  if(pAllocBlockVector != VMA_NULL)
    12819  {
    12820  VmaDefragmentator* const pDefragmentator =
    12821  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12822  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12823  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12824  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12825  }
    12826  }
    12827  }
    12828 
    12829  VkResult result = VK_SUCCESS;
    12830 
    12831  // ======== Main processing.
    12832 
    12833  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12834  uint32_t maxAllocationsToMove = UINT32_MAX;
    12835  if(pDefragmentationInfo != VMA_NULL)
    12836  {
    12837  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12838  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12839  }
    12840 
    12841  // Process standard memory.
    12842  for(uint32_t memTypeIndex = 0;
    12843  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12844  ++memTypeIndex)
    12845  {
    12846  // Only HOST_VISIBLE memory types can be defragmented.
    12847  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12848  {
    12849  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12850  pDefragmentationStats,
    12851  maxBytesToMove,
    12852  maxAllocationsToMove);
    12853  }
    12854  }
    12855 
    12856  // Process custom pools.
    12857  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12858  {
    12859  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12860  pDefragmentationStats,
    12861  maxBytesToMove,
    12862  maxAllocationsToMove);
    12863  }
    12864 
    12865  // ======== Destroy defragmentators.
    12866 
    12867  // Process custom pools.
    12868  for(size_t poolIndex = poolCount; poolIndex--; )
    12869  {
    12870  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12871  }
    12872 
    12873  // Process standard memory.
    12874  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12875  {
    12876  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12877  {
    12878  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12879  }
    12880  }
    12881 
    12882  return result;
    12883 }
    12884 
    12885 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12886 {
    12887  if(hAllocation->CanBecomeLost())
    12888  {
    12889  /*
    12890  Warning: This is a carefully designed algorithm.
    12891  Do not modify unless you really know what you're doing :)
    12892  */
    12893  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12894  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12895  for(;;)
    12896  {
    12897  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12898  {
    12899  pAllocationInfo->memoryType = UINT32_MAX;
    12900  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12901  pAllocationInfo->offset = 0;
    12902  pAllocationInfo->size = hAllocation->GetSize();
    12903  pAllocationInfo->pMappedData = VMA_NULL;
    12904  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12905  return;
    12906  }
    12907  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12908  {
    12909  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12910  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12911  pAllocationInfo->offset = hAllocation->GetOffset();
    12912  pAllocationInfo->size = hAllocation->GetSize();
    12913  pAllocationInfo->pMappedData = VMA_NULL;
    12914  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12915  return;
    12916  }
    12917  else // Last use time earlier than current time.
    12918  {
    12919  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12920  {
    12921  localLastUseFrameIndex = localCurrFrameIndex;
    12922  }
    12923  }
    12924  }
    12925  }
    12926  else
    12927  {
    12928 #if VMA_STATS_STRING_ENABLED
    12929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12931  for(;;)
    12932  {
    12933  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12934  if(localLastUseFrameIndex == localCurrFrameIndex)
    12935  {
    12936  break;
    12937  }
    12938  else // Last use time earlier than current time.
    12939  {
    12940  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12941  {
    12942  localLastUseFrameIndex = localCurrFrameIndex;
    12943  }
    12944  }
    12945  }
    12946 #endif
    12947 
    12948  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12949  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12950  pAllocationInfo->offset = hAllocation->GetOffset();
    12951  pAllocationInfo->size = hAllocation->GetSize();
    12952  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12953  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12954  }
    12955 }
    12956 
    12957 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12958 {
    12959  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12960  if(hAllocation->CanBecomeLost())
    12961  {
    12962  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12963  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12964  for(;;)
    12965  {
    12966  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12967  {
    12968  return false;
    12969  }
    12970  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12971  {
    12972  return true;
    12973  }
    12974  else // Last use time earlier than current time.
    12975  {
    12976  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12977  {
    12978  localLastUseFrameIndex = localCurrFrameIndex;
    12979  }
    12980  }
    12981  }
    12982  }
    12983  else
    12984  {
    12985 #if VMA_STATS_STRING_ENABLED
    12986  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12987  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12988  for(;;)
    12989  {
    12990  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12991  if(localLastUseFrameIndex == localCurrFrameIndex)
    12992  {
    12993  break;
    12994  }
    12995  else // Last use time earlier than current time.
    12996  {
    12997  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12998  {
    12999  localLastUseFrameIndex = localCurrFrameIndex;
    13000  }
    13001  }
    13002  }
    13003 #endif
    13004 
    13005  return true;
    13006  }
    13007 }
    13008 
    13009 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13010 {
    13011  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13012 
    13013  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13014 
    13015  if(newCreateInfo.maxBlockCount == 0)
    13016  {
    13017  newCreateInfo.maxBlockCount = SIZE_MAX;
    13018  }
    13019  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13020  {
    13021  return VK_ERROR_INITIALIZATION_FAILED;
    13022  }
    13023 
    13024  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13025 
    13026  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13027 
    13028  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13029  if(res != VK_SUCCESS)
    13030  {
    13031  vma_delete(this, *pPool);
    13032  *pPool = VMA_NULL;
    13033  return res;
    13034  }
    13035 
    13036  // Add to m_Pools.
    13037  {
    13038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13039  (*pPool)->SetId(m_NextPoolId++);
    13040  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13041  }
    13042 
    13043  return VK_SUCCESS;
    13044 }
    13045 
    13046 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13047 {
    13048  // Remove from m_Pools.
    13049  {
    13050  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13052  VMA_ASSERT(success && "Pool not found in Allocator.");
    13053  }
    13054 
    13055  vma_delete(this, pool);
    13056 }
    13057 
    13058 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13059 {
    13060  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13061 }
    13062 
    13063 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13064 {
    13065  m_CurrentFrameIndex.store(frameIndex);
    13066 }
    13067 
    13068 void VmaAllocator_T::MakePoolAllocationsLost(
    13069  VmaPool hPool,
    13070  size_t* pLostAllocationCount)
    13071 {
    13072  hPool->m_BlockVector.MakePoolAllocationsLost(
    13073  m_CurrentFrameIndex.load(),
    13074  pLostAllocationCount);
    13075 }
    13076 
    13077 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13078 {
    13079  return hPool->m_BlockVector.CheckCorruption();
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13083 {
    13084  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13085 
    13086  // Process default pools.
    13087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13088  {
    13089  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13090  {
    13091  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13092  VMA_ASSERT(pBlockVector);
    13093  VkResult localRes = pBlockVector->CheckCorruption();
    13094  switch(localRes)
    13095  {
    13096  case VK_ERROR_FEATURE_NOT_PRESENT:
    13097  break;
    13098  case VK_SUCCESS:
    13099  finalRes = VK_SUCCESS;
    13100  break;
    13101  default:
    13102  return localRes;
    13103  }
    13104  }
    13105  }
    13106 
    13107  // Process custom pools.
    13108  {
    13109  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13110  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13111  {
    13112  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13113  {
    13114  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13115  switch(localRes)
    13116  {
    13117  case VK_ERROR_FEATURE_NOT_PRESENT:
    13118  break;
    13119  case VK_SUCCESS:
    13120  finalRes = VK_SUCCESS;
    13121  break;
    13122  default:
    13123  return localRes;
    13124  }
    13125  }
    13126  }
    13127  }
    13128 
    13129  return finalRes;
    13130 }
    13131 
    13132 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13133 {
    13134  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13135  (*pAllocation)->InitLost();
    13136 }
    13137 
    13138 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13139 {
    13140  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13141 
    13142  VkResult res;
    13143  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13144  {
    13145  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13146  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13147  {
    13148  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13149  if(res == VK_SUCCESS)
    13150  {
    13151  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13152  }
    13153  }
    13154  else
    13155  {
    13156  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13162  }
    13163 
    13164  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13165  {
    13166  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13167  }
    13168 
    13169  return res;
    13170 }
    13171 
    13172 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13173 {
    13174  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13175  {
    13176  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13177  }
    13178 
    13179  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13180 
    13181  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13182  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13183  {
    13184  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13185  m_HeapSizeLimit[heapIndex] += size;
    13186  }
    13187 }
    13188 
    13189 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13190 {
    13191  if(hAllocation->CanBecomeLost())
    13192  {
    13193  return VK_ERROR_MEMORY_MAP_FAILED;
    13194  }
    13195 
    13196  switch(hAllocation->GetType())
    13197  {
    13198  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13199  {
    13200  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13201  char *pBytes = VMA_NULL;
    13202  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13203  if(res == VK_SUCCESS)
    13204  {
    13205  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13206  hAllocation->BlockAllocMap();
    13207  }
    13208  return res;
    13209  }
    13210  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13211  return hAllocation->DedicatedAllocMap(this, ppData);
    13212  default:
    13213  VMA_ASSERT(0);
    13214  return VK_ERROR_MEMORY_MAP_FAILED;
    13215  }
    13216 }
    13217 
    13218 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13219 {
    13220  switch(hAllocation->GetType())
    13221  {
    13222  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13223  {
    13224  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13225  hAllocation->BlockAllocUnmap();
    13226  pBlock->Unmap(this, 1);
    13227  }
    13228  break;
    13229  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13230  hAllocation->DedicatedAllocUnmap(this);
    13231  break;
    13232  default:
    13233  VMA_ASSERT(0);
    13234  }
    13235 }
    13236 
    13237 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13238 {
    13239  VkResult res = VK_SUCCESS;
    13240  switch(hAllocation->GetType())
    13241  {
    13242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13243  res = GetVulkanFunctions().vkBindBufferMemory(
    13244  m_hDevice,
    13245  hBuffer,
    13246  hAllocation->GetMemory(),
    13247  0); //memoryOffset
    13248  break;
    13249  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13250  {
    13251  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13252  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13253  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13254  break;
    13255  }
    13256  default:
    13257  VMA_ASSERT(0);
    13258  }
    13259  return res;
    13260 }
    13261 
    13262 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13263 {
    13264  VkResult res = VK_SUCCESS;
    13265  switch(hAllocation->GetType())
    13266  {
    13267  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13268  res = GetVulkanFunctions().vkBindImageMemory(
    13269  m_hDevice,
    13270  hImage,
    13271  hAllocation->GetMemory(),
    13272  0); //memoryOffset
    13273  break;
    13274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13275  {
    13276  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13277  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13278  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13279  break;
    13280  }
    13281  default:
    13282  VMA_ASSERT(0);
    13283  }
    13284  return res;
    13285 }
    13286 
    13287 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13288  VmaAllocation hAllocation,
    13289  VkDeviceSize offset, VkDeviceSize size,
    13290  VMA_CACHE_OPERATION op)
    13291 {
    13292  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13293  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13294  {
    13295  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13296  VMA_ASSERT(offset <= allocationSize);
    13297 
    13298  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13299 
    13300  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13301  memRange.memory = hAllocation->GetMemory();
    13302 
    13303  switch(hAllocation->GetType())
    13304  {
    13305  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13307  if(size == VK_WHOLE_SIZE)
    13308  {
    13309  memRange.size = allocationSize - memRange.offset;
    13310  }
    13311  else
    13312  {
    13313  VMA_ASSERT(offset + size <= allocationSize);
    13314  memRange.size = VMA_MIN(
    13315  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13316  allocationSize - memRange.offset);
    13317  }
    13318  break;
    13319 
    13320  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13321  {
    13322  // 1. Still within this allocation.
    13323  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13324  if(size == VK_WHOLE_SIZE)
    13325  {
    13326  size = allocationSize - offset;
    13327  }
    13328  else
    13329  {
    13330  VMA_ASSERT(offset + size <= allocationSize);
    13331  }
    13332  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13333 
    13334  // 2. Adjust to whole block.
    13335  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13336  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13337  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13338  memRange.offset += allocationOffset;
    13339  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13340 
    13341  break;
    13342  }
    13343 
    13344  default:
    13345  VMA_ASSERT(0);
    13346  }
    13347 
    13348  switch(op)
    13349  {
    13350  case VMA_CACHE_FLUSH:
    13351  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13352  break;
    13353  case VMA_CACHE_INVALIDATE:
    13354  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13355  break;
    13356  default:
    13357  VMA_ASSERT(0);
    13358  }
    13359  }
    13360  // else: Just ignore this call.
    13361 }
    13362 
    13363 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13364 {
    13365  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13366 
    13367  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13368  {
    13369  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13370  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13371  VMA_ASSERT(pDedicatedAllocations);
    13372  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13373  VMA_ASSERT(success);
    13374  }
    13375 
    13376  VkDeviceMemory hMemory = allocation->GetMemory();
    13377 
    13378  /*
    13379  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13380  before vkFreeMemory.
    13381 
    13382  if(allocation->GetMappedData() != VMA_NULL)
    13383  {
    13384  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13385  }
    13386  */
    13387 
    13388  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13389 
    13390  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13391 }
    13392 
    13393 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13394 {
    13395  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13396  !hAllocation->CanBecomeLost() &&
    13397  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13398  {
    13399  void* pData = VMA_NULL;
    13400  VkResult res = Map(hAllocation, &pData);
    13401  if(res == VK_SUCCESS)
    13402  {
    13403  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13404  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13405  Unmap(hAllocation);
    13406  }
    13407  else
    13408  {
    13409  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13410  }
    13411  }
    13412 }
    13413 
    13414 #if VMA_STATS_STRING_ENABLED
    13415 
    13416 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13417 {
    13418  bool dedicatedAllocationsStarted = false;
    13419  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13420  {
    13421  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13422  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13423  VMA_ASSERT(pDedicatedAllocVector);
    13424  if(pDedicatedAllocVector->empty() == false)
    13425  {
    13426  if(dedicatedAllocationsStarted == false)
    13427  {
    13428  dedicatedAllocationsStarted = true;
    13429  json.WriteString("DedicatedAllocations");
    13430  json.BeginObject();
    13431  }
    13432 
    13433  json.BeginString("Type ");
    13434  json.ContinueString(memTypeIndex);
    13435  json.EndString();
    13436 
    13437  json.BeginArray();
    13438 
    13439  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13440  {
    13441  json.BeginObject(true);
    13442  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13443  hAlloc->PrintParameters(json);
    13444  json.EndObject();
    13445  }
    13446 
    13447  json.EndArray();
    13448  }
    13449  }
    13450  if(dedicatedAllocationsStarted)
    13451  {
    13452  json.EndObject();
    13453  }
    13454 
    13455  {
    13456  bool allocationsStarted = false;
    13457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13458  {
    13459  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13460  {
    13461  if(allocationsStarted == false)
    13462  {
    13463  allocationsStarted = true;
    13464  json.WriteString("DefaultPools");
    13465  json.BeginObject();
    13466  }
    13467 
    13468  json.BeginString("Type ");
    13469  json.ContinueString(memTypeIndex);
    13470  json.EndString();
    13471 
    13472  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13473  }
    13474  }
    13475  if(allocationsStarted)
    13476  {
    13477  json.EndObject();
    13478  }
    13479  }
    13480 
    13481  // Custom pools
    13482  {
    13483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13484  const size_t poolCount = m_Pools.size();
    13485  if(poolCount > 0)
    13486  {
    13487  json.WriteString("Pools");
    13488  json.BeginObject();
    13489  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13490  {
    13491  json.BeginString();
    13492  json.ContinueString(m_Pools[poolIndex]->GetId());
    13493  json.EndString();
    13494 
    13495  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13496  }
    13497  json.EndObject();
    13498  }
    13499  }
    13500 }
    13501 
    13502 #endif // #if VMA_STATS_STRING_ENABLED
    13503 
    13505 // Public interface
    13506 
    13507 VkResult vmaCreateAllocator(
    13508  const VmaAllocatorCreateInfo* pCreateInfo,
    13509  VmaAllocator* pAllocator)
    13510 {
    13511  VMA_ASSERT(pCreateInfo && pAllocator);
    13512  VMA_DEBUG_LOG("vmaCreateAllocator");
    13513  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13514  return (*pAllocator)->Init(pCreateInfo);
    13515 }
    13516 
    13517 void vmaDestroyAllocator(
    13518  VmaAllocator allocator)
    13519 {
    13520  if(allocator != VK_NULL_HANDLE)
    13521  {
    13522  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13523  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13524  vma_delete(&allocationCallbacks, allocator);
    13525  }
    13526 }
    13527 
    13529  VmaAllocator allocator,
    13530  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13531 {
    13532  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13533  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13534 }
    13535 
    13537  VmaAllocator allocator,
    13538  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13539 {
    13540  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13541  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13542 }
    13543 
    13545  VmaAllocator allocator,
    13546  uint32_t memoryTypeIndex,
    13547  VkMemoryPropertyFlags* pFlags)
    13548 {
    13549  VMA_ASSERT(allocator && pFlags);
    13550  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13551  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  uint32_t frameIndex)
    13557 {
    13558  VMA_ASSERT(allocator);
    13559  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13560 
    13561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13562 
    13563  allocator->SetCurrentFrameIndex(frameIndex);
    13564 }
    13565 
    13566 void vmaCalculateStats(
    13567  VmaAllocator allocator,
    13568  VmaStats* pStats)
    13569 {
    13570  VMA_ASSERT(allocator && pStats);
    13571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13572  allocator->CalculateStats(pStats);
    13573 }
    13574 
    13575 #if VMA_STATS_STRING_ENABLED
    13576 
    13577 void vmaBuildStatsString(
    13578  VmaAllocator allocator,
    13579  char** ppStatsString,
    13580  VkBool32 detailedMap)
    13581 {
    13582  VMA_ASSERT(allocator && ppStatsString);
    13583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13584 
    13585  VmaStringBuilder sb(allocator);
    13586  {
    13587  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13588  json.BeginObject();
    13589 
    13590  VmaStats stats;
    13591  allocator->CalculateStats(&stats);
    13592 
    13593  json.WriteString("Total");
    13594  VmaPrintStatInfo(json, stats.total);
    13595 
    13596  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13597  {
    13598  json.BeginString("Heap ");
    13599  json.ContinueString(heapIndex);
    13600  json.EndString();
    13601  json.BeginObject();
    13602 
    13603  json.WriteString("Size");
    13604  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13605 
    13606  json.WriteString("Flags");
    13607  json.BeginArray(true);
    13608  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13609  {
    13610  json.WriteString("DEVICE_LOCAL");
    13611  }
    13612  json.EndArray();
    13613 
    13614  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13615  {
    13616  json.WriteString("Stats");
    13617  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13618  }
    13619 
    13620  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13621  {
    13622  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13623  {
    13624  json.BeginString("Type ");
    13625  json.ContinueString(typeIndex);
    13626  json.EndString();
    13627 
    13628  json.BeginObject();
    13629 
    13630  json.WriteString("Flags");
    13631  json.BeginArray(true);
    13632  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13633  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13634  {
    13635  json.WriteString("DEVICE_LOCAL");
    13636  }
    13637  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13638  {
    13639  json.WriteString("HOST_VISIBLE");
    13640  }
    13641  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13642  {
    13643  json.WriteString("HOST_COHERENT");
    13644  }
    13645  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13646  {
    13647  json.WriteString("HOST_CACHED");
    13648  }
    13649  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13650  {
    13651  json.WriteString("LAZILY_ALLOCATED");
    13652  }
    13653  json.EndArray();
    13654 
    13655  if(stats.memoryType[typeIndex].blockCount > 0)
    13656  {
    13657  json.WriteString("Stats");
    13658  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13659  }
    13660 
    13661  json.EndObject();
    13662  }
    13663  }
    13664 
    13665  json.EndObject();
    13666  }
    13667  if(detailedMap == VK_TRUE)
    13668  {
    13669  allocator->PrintDetailedMap(json);
    13670  }
    13671 
    13672  json.EndObject();
    13673  }
    13674 
    13675  const size_t len = sb.GetLength();
    13676  char* const pChars = vma_new_array(allocator, char, len + 1);
    13677  if(len > 0)
    13678  {
    13679  memcpy(pChars, sb.GetData(), len);
    13680  }
    13681  pChars[len] = '\0';
    13682  *ppStatsString = pChars;
    13683 }
    13684 
    13685 void vmaFreeStatsString(
    13686  VmaAllocator allocator,
    13687  char* pStatsString)
    13688 {
    13689  if(pStatsString != VMA_NULL)
    13690  {
    13691  VMA_ASSERT(allocator);
    13692  size_t len = strlen(pStatsString);
    13693  vma_delete_array(allocator, pStatsString, len + 1);
    13694  }
    13695 }
    13696 
    13697 #endif // #if VMA_STATS_STRING_ENABLED
    13698 
    13699 /*
    13700 This function is not protected by any mutex because it just reads immutable data.
    13701 */
    13702 VkResult vmaFindMemoryTypeIndex(
    13703  VmaAllocator allocator,
    13704  uint32_t memoryTypeBits,
    13705  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13706  uint32_t* pMemoryTypeIndex)
    13707 {
    13708  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13709  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13710  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13711 
    13712  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13713  {
    13714  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13715  }
    13716 
    13717  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13718  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13719 
    13720  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13721  if(mapped)
    13722  {
    13723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13724  }
    13725 
    13726  // Convert usage to requiredFlags and preferredFlags.
    13727  switch(pAllocationCreateInfo->usage)
    13728  {
    13730  break;
    13732  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13733  {
    13734  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13735  }
    13736  break;
    13738  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13739  break;
    13741  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13742  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13743  {
    13744  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13745  }
    13746  break;
    13748  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13749  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13750  break;
    13751  default:
    13752  break;
    13753  }
    13754 
    13755  *pMemoryTypeIndex = UINT32_MAX;
    13756  uint32_t minCost = UINT32_MAX;
    13757  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13758  memTypeIndex < allocator->GetMemoryTypeCount();
    13759  ++memTypeIndex, memTypeBit <<= 1)
    13760  {
    13761  // This memory type is acceptable according to memoryTypeBits bitmask.
    13762  if((memTypeBit & memoryTypeBits) != 0)
    13763  {
    13764  const VkMemoryPropertyFlags currFlags =
    13765  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13766  // This memory type contains requiredFlags.
    13767  if((requiredFlags & ~currFlags) == 0)
    13768  {
    13769  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13770  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13771  // Remember memory type with lowest cost.
    13772  if(currCost < minCost)
    13773  {
    13774  *pMemoryTypeIndex = memTypeIndex;
    13775  if(currCost == 0)
    13776  {
    13777  return VK_SUCCESS;
    13778  }
    13779  minCost = currCost;
    13780  }
    13781  }
    13782  }
    13783  }
    13784  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13785 }
    13786 
    13788  VmaAllocator allocator,
    13789  const VkBufferCreateInfo* pBufferCreateInfo,
    13790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13791  uint32_t* pMemoryTypeIndex)
    13792 {
    13793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13794  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13797 
    13798  const VkDevice hDev = allocator->m_hDevice;
    13799  VkBuffer hBuffer = VK_NULL_HANDLE;
    13800  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13801  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13802  if(res == VK_SUCCESS)
    13803  {
    13804  VkMemoryRequirements memReq = {};
    13805  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13806  hDev, hBuffer, &memReq);
    13807 
    13808  res = vmaFindMemoryTypeIndex(
    13809  allocator,
    13810  memReq.memoryTypeBits,
    13811  pAllocationCreateInfo,
    13812  pMemoryTypeIndex);
    13813 
    13814  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13815  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13816  }
    13817  return res;
    13818 }
    13819 
    13821  VmaAllocator allocator,
    13822  const VkImageCreateInfo* pImageCreateInfo,
    13823  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13824  uint32_t* pMemoryTypeIndex)
    13825 {
    13826  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13827  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13828  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13829  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13830 
    13831  const VkDevice hDev = allocator->m_hDevice;
    13832  VkImage hImage = VK_NULL_HANDLE;
    13833  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13834  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13835  if(res == VK_SUCCESS)
    13836  {
    13837  VkMemoryRequirements memReq = {};
    13838  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13839  hDev, hImage, &memReq);
    13840 
    13841  res = vmaFindMemoryTypeIndex(
    13842  allocator,
    13843  memReq.memoryTypeBits,
    13844  pAllocationCreateInfo,
    13845  pMemoryTypeIndex);
    13846 
    13847  allocator->GetVulkanFunctions().vkDestroyImage(
    13848  hDev, hImage, allocator->GetAllocationCallbacks());
    13849  }
    13850  return res;
    13851 }
    13852 
    13853 VkResult vmaCreatePool(
    13854  VmaAllocator allocator,
    13855  const VmaPoolCreateInfo* pCreateInfo,
    13856  VmaPool* pPool)
    13857 {
    13858  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13859 
    13860  VMA_DEBUG_LOG("vmaCreatePool");
    13861 
    13862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13863 
    13864  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13865 
    13866 #if VMA_RECORDING_ENABLED
    13867  if(allocator->GetRecorder() != VMA_NULL)
    13868  {
    13869  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13870  }
    13871 #endif
    13872 
    13873  return res;
    13874 }
    13875 
    13876 void vmaDestroyPool(
    13877  VmaAllocator allocator,
    13878  VmaPool pool)
    13879 {
    13880  VMA_ASSERT(allocator);
    13881 
    13882  if(pool == VK_NULL_HANDLE)
    13883  {
    13884  return;
    13885  }
    13886 
    13887  VMA_DEBUG_LOG("vmaDestroyPool");
    13888 
    13889  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13890 
    13891 #if VMA_RECORDING_ENABLED
    13892  if(allocator->GetRecorder() != VMA_NULL)
    13893  {
    13894  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13895  }
    13896 #endif
    13897 
    13898  allocator->DestroyPool(pool);
    13899 }
    13900 
    13901 void vmaGetPoolStats(
    13902  VmaAllocator allocator,
    13903  VmaPool pool,
    13904  VmaPoolStats* pPoolStats)
    13905 {
    13906  VMA_ASSERT(allocator && pool && pPoolStats);
    13907 
    13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13909 
    13910  allocator->GetPoolStats(pool, pPoolStats);
    13911 }
    13912 
    13914  VmaAllocator allocator,
    13915  VmaPool pool,
    13916  size_t* pLostAllocationCount)
    13917 {
    13918  VMA_ASSERT(allocator && pool);
    13919 
    13920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13921 
    13922 #if VMA_RECORDING_ENABLED
    13923  if(allocator->GetRecorder() != VMA_NULL)
    13924  {
    13925  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13926  }
    13927 #endif
    13928 
    13929  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13930 }
    13931 
    13932 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13933 {
    13934  VMA_ASSERT(allocator && pool);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13937 
    13938  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13939 
    13940  return allocator->CheckPoolCorruption(pool);
    13941 }
    13942 
    13943 VkResult vmaAllocateMemory(
    13944  VmaAllocator allocator,
    13945  const VkMemoryRequirements* pVkMemoryRequirements,
    13946  const VmaAllocationCreateInfo* pCreateInfo,
    13947  VmaAllocation* pAllocation,
    13948  VmaAllocationInfo* pAllocationInfo)
    13949 {
    13950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13951 
    13952  VMA_DEBUG_LOG("vmaAllocateMemory");
    13953 
    13954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13955 
    13956  VkResult result = allocator->AllocateMemory(
    13957  *pVkMemoryRequirements,
    13958  false, // requiresDedicatedAllocation
    13959  false, // prefersDedicatedAllocation
    13960  VK_NULL_HANDLE, // dedicatedBuffer
    13961  VK_NULL_HANDLE, // dedicatedImage
    13962  *pCreateInfo,
    13963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13964  pAllocation);
    13965 
    13966 #if VMA_RECORDING_ENABLED
    13967  if(allocator->GetRecorder() != VMA_NULL)
    13968  {
    13969  allocator->GetRecorder()->RecordAllocateMemory(
    13970  allocator->GetCurrentFrameIndex(),
    13971  *pVkMemoryRequirements,
    13972  *pCreateInfo,
    13973  *pAllocation);
    13974  }
    13975 #endif
    13976 
    13977  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13978  {
    13979  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13980  }
    13981 
    13982  return result;
    13983 }
    13984 
    13986  VmaAllocator allocator,
    13987  VkBuffer buffer,
    13988  const VmaAllocationCreateInfo* pCreateInfo,
    13989  VmaAllocation* pAllocation,
    13990  VmaAllocationInfo* pAllocationInfo)
    13991 {
    13992  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13993 
    13994  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13995 
    13996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13997 
    13998  VkMemoryRequirements vkMemReq = {};
    13999  bool requiresDedicatedAllocation = false;
    14000  bool prefersDedicatedAllocation = false;
    14001  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14002  requiresDedicatedAllocation,
    14003  prefersDedicatedAllocation);
    14004 
    14005  VkResult result = allocator->AllocateMemory(
    14006  vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation,
    14009  buffer, // dedicatedBuffer
    14010  VK_NULL_HANDLE, // dedicatedImage
    14011  *pCreateInfo,
    14012  VMA_SUBALLOCATION_TYPE_BUFFER,
    14013  pAllocation);
    14014 
    14015 #if VMA_RECORDING_ENABLED
    14016  if(allocator->GetRecorder() != VMA_NULL)
    14017  {
    14018  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14019  allocator->GetCurrentFrameIndex(),
    14020  vkMemReq,
    14021  requiresDedicatedAllocation,
    14022  prefersDedicatedAllocation,
    14023  *pCreateInfo,
    14024  *pAllocation);
    14025  }
    14026 #endif
    14027 
    14028  if(pAllocationInfo && result == VK_SUCCESS)
    14029  {
    14030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14031  }
    14032 
    14033  return result;
    14034 }
    14035 
    14036 VkResult vmaAllocateMemoryForImage(
    14037  VmaAllocator allocator,
    14038  VkImage image,
    14039  const VmaAllocationCreateInfo* pCreateInfo,
    14040  VmaAllocation* pAllocation,
    14041  VmaAllocationInfo* pAllocationInfo)
    14042 {
    14043  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14044 
    14045  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14046 
    14047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14048 
    14049  VkMemoryRequirements vkMemReq = {};
    14050  bool requiresDedicatedAllocation = false;
    14051  bool prefersDedicatedAllocation = false;
    14052  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14053  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14054 
    14055  VkResult result = allocator->AllocateMemory(
    14056  vkMemReq,
    14057  requiresDedicatedAllocation,
    14058  prefersDedicatedAllocation,
    14059  VK_NULL_HANDLE, // dedicatedBuffer
    14060  image, // dedicatedImage
    14061  *pCreateInfo,
    14062  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14063  pAllocation);
    14064 
    14065 #if VMA_RECORDING_ENABLED
    14066  if(allocator->GetRecorder() != VMA_NULL)
    14067  {
    14068  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14069  allocator->GetCurrentFrameIndex(),
    14070  vkMemReq,
    14071  requiresDedicatedAllocation,
    14072  prefersDedicatedAllocation,
    14073  *pCreateInfo,
    14074  *pAllocation);
    14075  }
    14076 #endif
    14077 
    14078  if(pAllocationInfo && result == VK_SUCCESS)
    14079  {
    14080  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14081  }
    14082 
    14083  return result;
    14084 }
    14085 
    14086 void vmaFreeMemory(
    14087  VmaAllocator allocator,
    14088  VmaAllocation allocation)
    14089 {
    14090  VMA_ASSERT(allocator);
    14091 
    14092  if(allocation == VK_NULL_HANDLE)
    14093  {
    14094  return;
    14095  }
    14096 
    14097  VMA_DEBUG_LOG("vmaFreeMemory");
    14098 
    14099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14100 
    14101 #if VMA_RECORDING_ENABLED
    14102  if(allocator->GetRecorder() != VMA_NULL)
    14103  {
    14104  allocator->GetRecorder()->RecordFreeMemory(
    14105  allocator->GetCurrentFrameIndex(),
    14106  allocation);
    14107  }
    14108 #endif
    14109 
    14110  allocator->FreeMemory(allocation);
    14111 }
    14112 
    14113 VkResult vmaResizeAllocation(
    14114  VmaAllocator allocator,
    14115  VmaAllocation allocation,
    14116  VkDeviceSize newSize)
    14117 {
    14118  VMA_ASSERT(allocator && allocation);
    14119 
    14120  VMA_DEBUG_LOG("vmaResizeAllocation");
    14121 
    14122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14123 
    14124 #if VMA_RECORDING_ENABLED
    14125  if(allocator->GetRecorder() != VMA_NULL)
    14126  {
    14127  allocator->GetRecorder()->RecordResizeAllocation(
    14128  allocator->GetCurrentFrameIndex(),
    14129  allocation,
    14130  newSize);
    14131  }
    14132 #endif
    14133 
    14134  return allocator->ResizeAllocation(allocation, newSize);
    14135 }
    14136 
    14138  VmaAllocator allocator,
    14139  VmaAllocation allocation,
    14140  VmaAllocationInfo* pAllocationInfo)
    14141 {
    14142  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14143 
    14144  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordGetAllocationInfo(
    14150  allocator->GetCurrentFrameIndex(),
    14151  allocation);
    14152  }
    14153 #endif
    14154 
    14155  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14156 }
    14157 
    14158 VkBool32 vmaTouchAllocation(
    14159  VmaAllocator allocator,
    14160  VmaAllocation allocation)
    14161 {
    14162  VMA_ASSERT(allocator && allocation);
    14163 
    14164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14165 
    14166 #if VMA_RECORDING_ENABLED
    14167  if(allocator->GetRecorder() != VMA_NULL)
    14168  {
    14169  allocator->GetRecorder()->RecordTouchAllocation(
    14170  allocator->GetCurrentFrameIndex(),
    14171  allocation);
    14172  }
    14173 #endif
    14174 
    14175  return allocator->TouchAllocation(allocation);
    14176 }
    14177 
    14179  VmaAllocator allocator,
    14180  VmaAllocation allocation,
    14181  void* pUserData)
    14182 {
    14183  VMA_ASSERT(allocator && allocation);
    14184 
    14185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14186 
    14187  allocation->SetUserData(allocator, pUserData);
    14188 
    14189 #if VMA_RECORDING_ENABLED
    14190  if(allocator->GetRecorder() != VMA_NULL)
    14191  {
    14192  allocator->GetRecorder()->RecordSetAllocationUserData(
    14193  allocator->GetCurrentFrameIndex(),
    14194  allocation,
    14195  pUserData);
    14196  }
    14197 #endif
    14198 }
    14199 
    14201  VmaAllocator allocator,
    14202  VmaAllocation* pAllocation)
    14203 {
    14204  VMA_ASSERT(allocator && pAllocation);
    14205 
    14206  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14207 
    14208  allocator->CreateLostAllocation(pAllocation);
    14209 
    14210 #if VMA_RECORDING_ENABLED
    14211  if(allocator->GetRecorder() != VMA_NULL)
    14212  {
    14213  allocator->GetRecorder()->RecordCreateLostAllocation(
    14214  allocator->GetCurrentFrameIndex(),
    14215  *pAllocation);
    14216  }
    14217 #endif
    14218 }
    14219 
    14220 VkResult vmaMapMemory(
    14221  VmaAllocator allocator,
    14222  VmaAllocation allocation,
    14223  void** ppData)
    14224 {
    14225  VMA_ASSERT(allocator && allocation && ppData);
    14226 
    14227  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14228 
    14229  VkResult res = allocator->Map(allocation, ppData);
    14230 
    14231 #if VMA_RECORDING_ENABLED
    14232  if(allocator->GetRecorder() != VMA_NULL)
    14233  {
    14234  allocator->GetRecorder()->RecordMapMemory(
    14235  allocator->GetCurrentFrameIndex(),
    14236  allocation);
    14237  }
    14238 #endif
    14239 
    14240  return res;
    14241 }
    14242 
    14243 void vmaUnmapMemory(
    14244  VmaAllocator allocator,
    14245  VmaAllocation allocation)
    14246 {
    14247  VMA_ASSERT(allocator && allocation);
    14248 
    14249  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14250 
    14251 #if VMA_RECORDING_ENABLED
    14252  if(allocator->GetRecorder() != VMA_NULL)
    14253  {
    14254  allocator->GetRecorder()->RecordUnmapMemory(
    14255  allocator->GetCurrentFrameIndex(),
    14256  allocation);
    14257  }
    14258 #endif
    14259 
    14260  allocator->Unmap(allocation);
    14261 }
    14262 
    14263 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14264 {
    14265  VMA_ASSERT(allocator && allocation);
    14266 
    14267  VMA_DEBUG_LOG("vmaFlushAllocation");
    14268 
    14269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14270 
    14271  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordFlushAllocation(
    14277  allocator->GetCurrentFrameIndex(),
    14278  allocation, offset, size);
    14279  }
    14280 #endif
    14281 }
    14282 
    14283 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14284 {
    14285  VMA_ASSERT(allocator && allocation);
    14286 
    14287  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14288 
    14289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14290 
    14291  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14292 
    14293 #if VMA_RECORDING_ENABLED
    14294  if(allocator->GetRecorder() != VMA_NULL)
    14295  {
    14296  allocator->GetRecorder()->RecordInvalidateAllocation(
    14297  allocator->GetCurrentFrameIndex(),
    14298  allocation, offset, size);
    14299  }
    14300 #endif
    14301 }
    14302 
    14303 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14304 {
    14305  VMA_ASSERT(allocator);
    14306 
    14307  VMA_DEBUG_LOG("vmaCheckCorruption");
    14308 
    14309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14310 
    14311  return allocator->CheckCorruption(memoryTypeBits);
    14312 }
    14313 
    14314 VkResult vmaDefragment(
    14315  VmaAllocator allocator,
    14316  VmaAllocation* pAllocations,
    14317  size_t allocationCount,
    14318  VkBool32* pAllocationsChanged,
    14319  const VmaDefragmentationInfo *pDefragmentationInfo,
    14320  VmaDefragmentationStats* pDefragmentationStats)
    14321 {
    14322  VMA_ASSERT(allocator && pAllocations);
    14323 
    14324  VMA_DEBUG_LOG("vmaDefragment");
    14325 
    14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14327 
    14328  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14329 }
    14330 
    14331 VkResult vmaBindBufferMemory(
    14332  VmaAllocator allocator,
    14333  VmaAllocation allocation,
    14334  VkBuffer buffer)
    14335 {
    14336  VMA_ASSERT(allocator && allocation && buffer);
    14337 
    14338  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14339 
    14340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14341 
    14342  return allocator->BindBufferMemory(allocation, buffer);
    14343 }
    14344 
    14345 VkResult vmaBindImageMemory(
    14346  VmaAllocator allocator,
    14347  VmaAllocation allocation,
    14348  VkImage image)
    14349 {
    14350  VMA_ASSERT(allocator && allocation && image);
    14351 
    14352  VMA_DEBUG_LOG("vmaBindImageMemory");
    14353 
    14354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14355 
    14356  return allocator->BindImageMemory(allocation, image);
    14357 }
    14358 
    14359 VkResult vmaCreateBuffer(
    14360  VmaAllocator allocator,
    14361  const VkBufferCreateInfo* pBufferCreateInfo,
    14362  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14363  VkBuffer* pBuffer,
    14364  VmaAllocation* pAllocation,
    14365  VmaAllocationInfo* pAllocationInfo)
    14366 {
    14367  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14368 
    14369  if(pBufferCreateInfo->size == 0)
    14370  {
    14371  return VK_ERROR_VALIDATION_FAILED_EXT;
    14372  }
    14373 
    14374  VMA_DEBUG_LOG("vmaCreateBuffer");
    14375 
    14376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14377 
    14378  *pBuffer = VK_NULL_HANDLE;
    14379  *pAllocation = VK_NULL_HANDLE;
    14380 
    14381  // 1. Create VkBuffer.
    14382  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14383  allocator->m_hDevice,
    14384  pBufferCreateInfo,
    14385  allocator->GetAllocationCallbacks(),
    14386  pBuffer);
    14387  if(res >= 0)
    14388  {
    14389  // 2. vkGetBufferMemoryRequirements.
    14390  VkMemoryRequirements vkMemReq = {};
    14391  bool requiresDedicatedAllocation = false;
    14392  bool prefersDedicatedAllocation = false;
    14393  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14394  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14395 
    14396  // Make sure alignment requirements for specific buffer usages reported
    14397  // in Physical Device Properties are included in alignment reported by memory requirements.
    14398  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14399  {
    14400  VMA_ASSERT(vkMemReq.alignment %
    14401  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14402  }
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14412  }
    14413 
    14414  // 3. Allocate memory using allocator.
    14415  res = allocator->AllocateMemory(
    14416  vkMemReq,
    14417  requiresDedicatedAllocation,
    14418  prefersDedicatedAllocation,
    14419  *pBuffer, // dedicatedBuffer
    14420  VK_NULL_HANDLE, // dedicatedImage
    14421  *pAllocationCreateInfo,
    14422  VMA_SUBALLOCATION_TYPE_BUFFER,
    14423  pAllocation);
    14424 
    14425 #if VMA_RECORDING_ENABLED
    14426  if(allocator->GetRecorder() != VMA_NULL)
    14427  {
    14428  allocator->GetRecorder()->RecordCreateBuffer(
    14429  allocator->GetCurrentFrameIndex(),
    14430  *pBufferCreateInfo,
    14431  *pAllocationCreateInfo,
    14432  *pAllocation);
    14433  }
    14434 #endif
    14435 
    14436  if(res >= 0)
    14437  {
    14438  // 3. Bind buffer with memory.
    14439  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14440  if(res >= 0)
    14441  {
    14442  // All steps succeeded.
    14443  #if VMA_STATS_STRING_ENABLED
    14444  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14445  #endif
    14446  if(pAllocationInfo != VMA_NULL)
    14447  {
    14448  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14449  }
    14450 
    14451  return VK_SUCCESS;
    14452  }
    14453  allocator->FreeMemory(*pAllocation);
    14454  *pAllocation = VK_NULL_HANDLE;
    14455  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14456  *pBuffer = VK_NULL_HANDLE;
    14457  return res;
    14458  }
    14459  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14460  *pBuffer = VK_NULL_HANDLE;
    14461  return res;
    14462  }
    14463  return res;
    14464 }
    14465 
    14466 void vmaDestroyBuffer(
    14467  VmaAllocator allocator,
    14468  VkBuffer buffer,
    14469  VmaAllocation allocation)
    14470 {
    14471  VMA_ASSERT(allocator);
    14472 
    14473  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14474  {
    14475  return;
    14476  }
    14477 
    14478  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14479 
    14480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14481 
    14482 #if VMA_RECORDING_ENABLED
    14483  if(allocator->GetRecorder() != VMA_NULL)
    14484  {
    14485  allocator->GetRecorder()->RecordDestroyBuffer(
    14486  allocator->GetCurrentFrameIndex(),
    14487  allocation);
    14488  }
    14489 #endif
    14490 
    14491  if(buffer != VK_NULL_HANDLE)
    14492  {
    14493  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14494  }
    14495 
    14496  if(allocation != VK_NULL_HANDLE)
    14497  {
    14498  allocator->FreeMemory(allocation);
    14499  }
    14500 }
    14501 
    14502 VkResult vmaCreateImage(
    14503  VmaAllocator allocator,
    14504  const VkImageCreateInfo* pImageCreateInfo,
    14505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14506  VkImage* pImage,
    14507  VmaAllocation* pAllocation,
    14508  VmaAllocationInfo* pAllocationInfo)
    14509 {
    14510  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14511 
    14512  if(pImageCreateInfo->extent.width == 0 ||
    14513  pImageCreateInfo->extent.height == 0 ||
    14514  pImageCreateInfo->extent.depth == 0 ||
    14515  pImageCreateInfo->mipLevels == 0 ||
    14516  pImageCreateInfo->arrayLayers == 0)
    14517  {
    14518  return VK_ERROR_VALIDATION_FAILED_EXT;
    14519  }
    14520 
    14521  VMA_DEBUG_LOG("vmaCreateImage");
    14522 
    14523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14524 
    14525  *pImage = VK_NULL_HANDLE;
    14526  *pAllocation = VK_NULL_HANDLE;
    14527 
    14528  // 1. Create VkImage.
    14529  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14530  allocator->m_hDevice,
    14531  pImageCreateInfo,
    14532  allocator->GetAllocationCallbacks(),
    14533  pImage);
    14534  if(res >= 0)
    14535  {
    14536  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14537  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14538  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14539 
    14540  // 2. Allocate memory using allocator.
    14541  VkMemoryRequirements vkMemReq = {};
    14542  bool requiresDedicatedAllocation = false;
    14543  bool prefersDedicatedAllocation = false;
    14544  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14545  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14546 
    14547  res = allocator->AllocateMemory(
    14548  vkMemReq,
    14549  requiresDedicatedAllocation,
    14550  prefersDedicatedAllocation,
    14551  VK_NULL_HANDLE, // dedicatedBuffer
    14552  *pImage, // dedicatedImage
    14553  *pAllocationCreateInfo,
    14554  suballocType,
    14555  pAllocation);
    14556 
    14557 #if VMA_RECORDING_ENABLED
    14558  if(allocator->GetRecorder() != VMA_NULL)
    14559  {
    14560  allocator->GetRecorder()->RecordCreateImage(
    14561  allocator->GetCurrentFrameIndex(),
    14562  *pImageCreateInfo,
    14563  *pAllocationCreateInfo,
    14564  *pAllocation);
    14565  }
    14566 #endif
    14567 
    14568  if(res >= 0)
    14569  {
    14570  // 3. Bind image with memory.
    14571  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14572  if(res >= 0)
    14573  {
    14574  // All steps succeeded.
    14575  #if VMA_STATS_STRING_ENABLED
    14576  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14577  #endif
    14578  if(pAllocationInfo != VMA_NULL)
    14579  {
    14580  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14581  }
    14582 
    14583  return VK_SUCCESS;
    14584  }
    14585  allocator->FreeMemory(*pAllocation);
    14586  *pAllocation = VK_NULL_HANDLE;
    14587  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14588  *pImage = VK_NULL_HANDLE;
    14589  return res;
    14590  }
    14591  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14592  *pImage = VK_NULL_HANDLE;
    14593  return res;
    14594  }
    14595  return res;
    14596 }
    14597 
    14598 void vmaDestroyImage(
    14599  VmaAllocator allocator,
    14600  VkImage image,
    14601  VmaAllocation allocation)
    14602 {
    14603  VMA_ASSERT(allocator);
    14604 
    14605  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14606  {
    14607  return;
    14608  }
    14609 
    14610  VMA_DEBUG_LOG("vmaDestroyImage");
    14611 
    14612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14613 
    14614 #if VMA_RECORDING_ENABLED
    14615  if(allocator->GetRecorder() != VMA_NULL)
    14616  {
    14617  allocator->GetRecorder()->RecordDestroyImage(
    14618  allocator->GetCurrentFrameIndex(),
    14619  allocation);
    14620  }
    14621 #endif
    14622 
    14623  if(image != VK_NULL_HANDLE)
    14624  {
    14625  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14626  }
    14627  if(allocation != VK_NULL_HANDLE)
    14628  {
    14629  allocator->FreeMemory(allocation);
    14630  }
    14631 }
    14632 
    14633 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1885
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1641
    @@ -82,7 +82,7 @@ $(function() {
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1588
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2307
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1638
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2552
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2577
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2096
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1485
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    @@ -102,13 +102,13 @@ $(function() {
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1775
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1593
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1774
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2556
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2581
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1667
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1784
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2564
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2589
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1979
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2547
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2572
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1594
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1519
    Represents main object of this library initialized.
    @@ -131,10 +131,10 @@ $(function() {
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1820
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2542
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2567
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2560
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2585
    Definition: vk_mem_alloc.h:1859
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2003
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1592
    @@ -151,7 +151,7 @@ $(function() {
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1617
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1551
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2562
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2587
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1990
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2204
    @@ -210,7 +210,7 @@ $(function() {
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2312
    Definition: vk_mem_alloc.h:1960
    Definition: vk_mem_alloc.h:1972
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2558
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2583
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1583
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1770
    @@ -230,6 +230,7 @@ $(function() {
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1586
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2113
    +
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2293
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    diff --git a/src/Tests.cpp b/src/Tests.cpp index e339f9c..f7536c4 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -2711,6 +2711,159 @@ static void TestPool_SameSize() vmaDestroyPool(g_hAllocator, pool); } +static void TestResize() +{ + wprintf(L"Testing vmaResizeAllocation...\n"); + + const VkDeviceSize KILOBYTE = 1024ull; + const VkDeviceSize MEGABYTE = KILOBYTE * 1024; + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 2 * MEGABYTE; + bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + + uint32_t memTypeIndex = UINT32_MAX; + TEST( vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &memTypeIndex) == VK_SUCCESS ); + + VmaPoolCreateInfo poolCreateInfo = {}; + poolCreateInfo.flags = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT; + poolCreateInfo.blockSize = 8 * MEGABYTE; + poolCreateInfo.minBlockCount = 1; + poolCreateInfo.maxBlockCount = 1; + poolCreateInfo.memoryTypeIndex = memTypeIndex; + + VmaPool pool; + TEST( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) == VK_SUCCESS ); + + allocCreateInfo.pool = pool; + + // Fill 8 MB pool with 4 * 2 MB allocations. + VmaAllocation allocs[4] = {}; + + VkMemoryRequirements memReq = {}; + memReq.memoryTypeBits = UINT32_MAX; + memReq.alignment = 4; + memReq.size = bufCreateInfo.size; + + VmaAllocationInfo allocInfo = {}; + + for(uint32_t i = 0; i < 4; ++i) + { + TEST( vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &allocs[i], nullptr) == VK_SUCCESS ); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 2MB + + // Case: Resize to the same size always succeeds. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 2 * MEGABYTE) == VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Shrink allocation at the end. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1ull * 1024 * 1024); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Shrink allocation before free space. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 512 * KILOBYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 512 * KILOBYTE); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 0.5MB, free 1.5MB + + // Case: Shrink allocation before next allocation. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + // Now it's: a0 1MB, free 1 MB, a1 2MB, a2 2MB, a3 0.5MB, free 1.5MB + + // Case: Grow allocation while there is even more space available. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + // Now it's: a0 1MB, free 1 MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Grow allocation while there is exact amount of free space available. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 2 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 2 * MEGABYTE); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Fail to grow when there is not enough free space due to next allocation. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 3 * MEGABYTE) == VK_ERROR_OUT_OF_POOL_MEMORY ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 2 * MEGABYTE); + } + + // Case: Fail to grow when there is not enough free space due to end of memory block. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 3 * MEGABYTE) == VK_ERROR_OUT_OF_POOL_MEMORY ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + for(uint32_t i = 4; i--; ) + { + vmaFreeMemory(g_hAllocator, allocs[i]); + } + + vmaDestroyPool(g_hAllocator, pool); + + // Test dedicated allocation + { + VmaAllocationCreateInfo dedicatedAllocCreateInfo = {}; + dedicatedAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + dedicatedAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + + VmaAllocation dedicatedAlloc = VK_NULL_HANDLE; + TEST( vmaAllocateMemory(g_hAllocator, &memReq, &dedicatedAllocCreateInfo, &dedicatedAlloc, nullptr) == VK_SUCCESS ); + + // Case: Resize to the same size always succeeds. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 2 * MEGABYTE) == VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Shrinking fails. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 1 * MEGABYTE) < VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Growing fails. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 3 * MEGABYTE) < VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + vmaFreeMemory(g_hAllocator, dedicatedAlloc); + } +} + static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern) { const uint8_t* pBytes = (const uint8_t*)pMemory; @@ -4275,7 +4428,7 @@ void Test() // ######################################## // ######################################## - BasicTestBuddyAllocator(); + TestResize(); return; } @@ -4287,6 +4440,7 @@ void Test() #else TestPool_SameSize(); TestHeapSizeLimit(); + TestResize(); #endif #if VMA_DEBUG_INITIALIZE_ALLOCATIONS TestAllocationsInitialization(); diff --git a/src/VmaReplay/VmaReplay.cpp b/src/VmaReplay/VmaReplay.cpp index 9635b03..a9acfa5 100644 --- a/src/VmaReplay/VmaReplay.cpp +++ b/src/VmaReplay/VmaReplay.cpp @@ -82,6 +82,7 @@ enum class VMA_FUNCTION TouchAllocation, GetAllocationInfo, MakePoolAllocationsLost, + ResizeAllocation, Count }; static const char* VMA_FUNCTION_NAMES[] = { @@ -104,6 +105,7 @@ static const char* VMA_FUNCTION_NAMES[] = { "vmaTouchAllocation", "vmaGetAllocationInfo", "vmaMakePoolAllocationsLost", + "vmaResizeAllocation", }; static_assert( _countof(VMA_FUNCTION_NAMES) == (size_t)VMA_FUNCTION::Count, @@ -143,7 +145,7 @@ static size_t g_DumpStatsAfterLineNextIndex = 0; static bool ValidateFileVersion() { if(GetVersionMajor(g_FileVersion) == 1 && - GetVersionMinor(g_FileVersion) <= 3) + GetVersionMinor(g_FileVersion) <= 4) { return true; } @@ -1015,6 +1017,7 @@ private: void ExecuteTouchAllocation(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteGetAllocationInfo(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteMakePoolAllocationsLost(size_t lineNumber, const CsvSplit& csvSplit); + void ExecuteResizeAllocation(size_t lineNumber, const CsvSplit& csvSplit); void DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit); }; @@ -1156,6 +1159,8 @@ void Player::ExecuteLine(size_t lineNumber, const StrRange& line) ExecuteGetAllocationInfo(lineNumber, csvSplit); else if(StrRangeEq(functionName, "vmaMakePoolAllocationsLost")) ExecuteMakePoolAllocationsLost(lineNumber, csvSplit); + else if(StrRangeEq(functionName, "vmaResizeAllocation")) + ExecuteResizeAllocation(lineNumber, csvSplit); else { if(IssueWarning()) @@ -2599,6 +2604,45 @@ void Player::ExecuteMakePoolAllocationsLost(size_t lineNumber, const CsvSplit& c } } +void Player::ExecuteResizeAllocation(size_t lineNumber, const CsvSplit& csvSplit) +{ + m_Stats.RegisterFunctionCall(VMA_FUNCTION::ResizeAllocation); + + if(ValidateFunctionParameterCount(lineNumber, csvSplit, 2, false)) + { + uint64_t origPtr = 0; + uint64_t newSize = 0; + + if(StrRangeToPtr(csvSplit.GetRange(FIRST_PARAM_INDEX), origPtr) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 1), newSize)) + { + if(origPtr != 0) + { + const auto it = m_Allocations.find(origPtr); + if(it != m_Allocations.end()) + { + vmaResizeAllocation(m_Allocator, it->second.allocation, newSize); + UpdateMemStats(); + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Allocation %llX not found.\n", lineNumber, origPtr); + } + } + } + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Invalid parameters for vmaResizeAllocation.\n", lineNumber); + } + } + } +} + //////////////////////////////////////////////////////////////////////////////// // Main functions diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index 99727f3..8c7b2c6 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -1306,6 +1306,15 @@ static void InitializeApplication() allocatorInfo.pAllocationCallbacks = &cpuAllocationCallbacks; } + // Uncomment to enable recording to CSV file. + /* + { + VmaRecordSettings recordSettings = {}; + recordSettings.pFilePath = "VulkanSample.csv"; + allocatorInfo.pRecordSettings = &recordSettings; + } + */ + ERR_GUARD_VULKAN( vmaCreateAllocator(&allocatorInfo, &g_hAllocator) ); // Retrieve queue (doesn't need to be destroyed) diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 6fb57d0..6326324 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -2374,6 +2374,31 @@ void vmaFreeMemory( VmaAllocator allocator, VmaAllocation allocation); +/** \brief Tries to resize an allocation in place, if there is enough free memory after it. + +Tries to change allocation's size without moving or reallocating it. +You can both shrink and grow allocation size. +When growing, it succeeds only when the allocation belongs to a memory block with enough +free space after it. + +Returns `VK_SUCCESS` if allocation's size has been successfully changed. +Returns `VK_ERROR_OUT_OF_POOL_MEMORY` if allocation's size could not be changed. + +After successful call to this function, VmaAllocationInfo::size of this allocation changes. +All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer. + +- Calling this function on allocation that is in lost state fails with result `VK_ERROR_VALIDATION_FAILED_EXT`. +- Calling this function with `newSize` same as current allocation size does nothing and returns `VK_SUCCESS`. +- Resizing dedicated allocations, as well as allocations created in pools that use linear + or buddy algorithm, is not supported. + The function returns `VK_ERROR_FEATURE_NOT_PRESENT` in such cases. + Support may be added in the future. +*/ +VkResult vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize); + /** \brief Returns current information about specified allocation and atomically marks it as used in current frame. Current paramters of given allocation are returned in `pAllocationInfo`. @@ -4504,7 +4529,9 @@ public: void ChangeBlockAllocation( VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, - VkDeviceSize offset); + VkDeviceSize offset); + + void ChangeSize(VkDeviceSize newSize); // pMappedData not null means allocation is created with MAPPED flag. void InitDedicatedAllocation( @@ -4766,6 +4793,9 @@ public: virtual void Free(const VmaAllocation allocation) = 0; virtual void FreeAtOffset(VkDeviceSize offset) = 0; + // Tries to resize (grow or shrink) space for given allocation, in place. + virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; } + protected: const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } @@ -4845,6 +4875,8 @@ public: virtual void Free(const VmaAllocation allocation); virtual void FreeAtOffset(VkDeviceSize offset); + virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize); + private: uint32_t m_FreeCount; VkDeviceSize m_SumFreeSize; @@ -5597,6 +5629,10 @@ public: VmaAllocation allocation); void RecordFreeMemory(uint32_t frameIndex, VmaAllocation allocation); + void RecordResizeAllocation( + uint32_t frameIndex, + VmaAllocation allocation, + VkDeviceSize newSize); void RecordSetAllocationUserData(uint32_t frameIndex, VmaAllocation allocation, const void* pUserData); @@ -5763,6 +5799,10 @@ public: // Main deallocation function. void FreeMemory(const VmaAllocation allocation); + VkResult ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize); + void CalculateStats(VmaStats* pStats); #if VMA_STATS_STRING_ENABLED @@ -6296,6 +6336,12 @@ void VmaAllocation_T::ChangeBlockAllocation( m_BlockAllocation.m_Offset = offset; } +void VmaAllocation_T::ChangeSize(VkDeviceSize newSize) +{ + VMA_ASSERT(newSize > 0); + m_Size = newSize; +} + VkDeviceSize VmaAllocation_T::GetOffset() const { switch(m_Type) @@ -7222,6 +7268,133 @@ void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) VMA_ASSERT(0 && "Not found!"); } +bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) +{ + typedef VmaSuballocationList::iterator iter_type; + for(iter_type suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == alloc) + { + iter_type nextItem = suballocItem; + ++nextItem; + + // Should have been ensured on higher level. + VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0); + + // Shrinking. + if(newSize < alloc->GetSize()) + { + const VkDeviceSize sizeDiff = suballoc.size - newSize; + + // There is next item. + if(nextItem != m_Suballocations.end()) + { + // Next item is free. + if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + // Grow this next item backward. + UnregisterFreeSuballocation(nextItem); + nextItem->offset -= sizeDiff; + nextItem->size += sizeDiff; + RegisterFreeSuballocation(nextItem); + } + // Next item is not free. + else + { + // Create free item after current one. + VmaSuballocation newFreeSuballoc; + newFreeSuballoc.hAllocation = VK_NULL_HANDLE; + newFreeSuballoc.offset = suballoc.offset + newSize; + newFreeSuballoc.size = sizeDiff; + newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc); + RegisterFreeSuballocation(newFreeSuballocIt); + + ++m_FreeCount; + } + } + // This is the last item. + else + { + // Create free item at the end. + VmaSuballocation newFreeSuballoc; + newFreeSuballoc.hAllocation = VK_NULL_HANDLE; + newFreeSuballoc.offset = suballoc.offset + newSize; + newFreeSuballoc.size = sizeDiff; + newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + m_Suballocations.push_back(newFreeSuballoc); + + iter_type newFreeSuballocIt = m_Suballocations.end(); + RegisterFreeSuballocation(--newFreeSuballocIt); + + ++m_FreeCount; + } + + suballoc.size = newSize; + m_SumFreeSize += sizeDiff; + } + // Growing. + else + { + const VkDeviceSize sizeDiff = newSize - suballoc.size; + + // There is next item. + if(nextItem != m_Suballocations.end()) + { + // Next item is free. + if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + // There is not enough free space, including margin. + if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN) + { + return false; + } + + // There is more free space than required. + if(nextItem->size > sizeDiff) + { + // Move and shrink this next item. + UnregisterFreeSuballocation(nextItem); + nextItem->offset += sizeDiff; + nextItem->size -= sizeDiff; + RegisterFreeSuballocation(nextItem); + } + // There is exactly the amount of free space required. + else + { + // Remove this next free item. + UnregisterFreeSuballocation(nextItem); + m_Suballocations.erase(nextItem); + --m_FreeCount; + } + } + // Next item is not free - there is no space to grow. + else + { + return false; + } + } + // This is the last item - there is no space to grow. + else + { + return false; + } + + suballoc.size = newSize; + m_SumFreeSize -= sizeDiff; + } + + // We cannot call Validate() here because alloc object is updated to new size outside of this call. + return true; + } + } + VMA_ASSERT(0 && "Not found!"); + return false; +} + bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const { VkDeviceSize lastSize = 0; @@ -11368,7 +11541,7 @@ VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) // Write header. fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); - fprintf(m_File, "%s\n", "1,3"); + fprintf(m_File, "%s\n", "1,4"); return VK_SUCCESS; } @@ -11524,6 +11697,20 @@ void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, Flush(); } +void VmaRecorder::RecordResizeAllocation( + uint32_t frameIndex, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, newSize); + Flush(); +} + void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, VmaAllocation allocation, const void* pUserData) @@ -12487,6 +12674,40 @@ void VmaAllocator_T::FreeMemory(const VmaAllocation allocation) vma_delete(this, allocation); } +VkResult VmaAllocator_T::ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(newSize == alloc->GetSize()) + { + return VK_SUCCESS; + } + + switch(alloc->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return VK_ERROR_FEATURE_NOT_PRESENT; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize)) + { + alloc->ChangeSize(newSize); + VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate()); + return VK_SUCCESS; + } + else + { + return VK_ERROR_OUT_OF_POOL_MEMORY; + } + default: + VMA_ASSERT(0); + return VK_ERROR_VALIDATION_FAILED_EXT; + } +} + void VmaAllocator_T::CalculateStats(VmaStats* pStats) { // Initialize. @@ -13889,6 +14110,30 @@ void vmaFreeMemory( allocator->FreeMemory(allocation); } +VkResult vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaResizeAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordResizeAllocation( + allocator->GetCurrentFrameIndex(), + allocation, + newSize); + } +#endif + + return allocator->ResizeAllocation(allocation, newSize); +} + void vmaGetAllocationInfo( VmaAllocator allocator, VmaAllocation allocation,