diff --git a/docs/Recording file format.md b/docs/Recording file format.md index 7fa1344..64e3c35 100644 --- a/docs/Recording file format.md +++ b/docs/Recording file format.md @@ -271,7 +271,7 @@ An ordered sequence of values of some type, separated by single space. # Example file Vulkan Memory Allocator,Calls recording - 1,5 + 1,6 Config,Begin PhysicalDevice,apiVersion,4198477 PhysicalDevice,driverVersion,8388653 @@ -299,6 +299,7 @@ An ordered sequence of values of some type, separated by single space. PhysicalDeviceMemory,Type,3,heapIndex,1 PhysicalDeviceMemory,Type,3,propertyFlags,14 Extension,VK_KHR_dedicated_allocation,1 + Extension,VK_KHR_bind_memory2,1 Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,0 Macro,VMA_DEBUG_ALIGNMENT,1 Macro,VMA_DEBUG_MARGIN,0 diff --git a/docs/html/choosing_memory_type.html b/docs/html/choosing_memory_type.html index 5395092..623d20e 100644 --- a/docs/html/choosing_memory_type.html +++ b/docs/html/choosing_memory_type.html @@ -73,7 +73,7 @@ $(function() {
  1. If you just want to find memory type index that meets your requirements, you can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
  2. If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed. vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, which may be useful for sparse binding.
  3. -
  4. If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory().
  5. +
  6. If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
  7. If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the easiest and recommended way to use this library.

When using 3. or 4., the library internally queries Vulkan for memory types supported for that buffer or image (function vkGetBufferMemoryRequirements()) and uses only one of these types.

diff --git a/docs/html/globals.html b/docs/html/globals.html index d755ab2..5523d92 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -135,9 +135,15 @@ $(function() {
  • VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : vk_mem_alloc.h
  • +
  • VMA_BIND_MEMORY2 +: vk_mem_alloc.h +
  • VMA_DEDICATED_ALLOCATION : vk_mem_alloc.h
  • @@ -225,9 +231,15 @@ $(function() {
  • vmaBindBufferMemory() : vk_mem_alloc.h
  • +
  • vmaBindBufferMemory2() +: vk_mem_alloc.h +
  • vmaBindImageMemory() : vk_mem_alloc.h
  • +
  • vmaBindImageMemory2() +: vk_mem_alloc.h +
  • vmaBuildStatsString() : vk_mem_alloc.h
  • @@ -343,7 +355,7 @@ $(function() { : vk_mem_alloc.h
  • VmaPoolCreateFlagBits -: vk_mem_alloc.h +: vk_mem_alloc.h
  • VmaPoolCreateFlags : vk_mem_alloc.h diff --git a/docs/html/globals_defs.html b/docs/html/globals_defs.html index a81f494..1bdce46 100644 --- a/docs/html/globals_defs.html +++ b/docs/html/globals_defs.html @@ -65,6 +65,9 @@ $(function() {
  • NOMINMAX : vk_mem_alloc.h
  • +
  • VMA_BIND_MEMORY2 +: vk_mem_alloc.h +
  • VMA_DEDICATED_ALLOCATION : vk_mem_alloc.h
  • diff --git a/docs/html/globals_eval.html b/docs/html/globals_eval.html index 49662c4..127887a 100644 --- a/docs/html/globals_eval.html +++ b/docs/html/globals_eval.html @@ -118,6 +118,9 @@ $(function() {
  • VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : vk_mem_alloc.h
  • diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index 5d669a7..eac5b52 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -79,9 +79,15 @@ $(function() {
  • vmaBindBufferMemory() : vk_mem_alloc.h
  • +
  • vmaBindBufferMemory2() +: vk_mem_alloc.h +
  • vmaBindImageMemory() : vk_mem_alloc.h
  • +
  • vmaBindImageMemory2() +: vk_mem_alloc.h +
  • vmaBuildStatsString() : vk_mem_alloc.h
  • diff --git a/docs/html/search/all_10.js b/docs/html/search/all_10.js index c8fa58c..c3722a9 100644 --- a/docs/html/search/all_10.js +++ b/docs/html/search/all_10.js @@ -38,7 +38,9 @@ var searchData= ['vma_5fallocation_5fcreate_5fuser_5fdata_5fcopy_5fstring_5fbit',['VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597aa6f24f821cd6a7c5e4a443f7bf59c520',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca4816ddaed324ba110172ca608a20f29d',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cae4d5ad929caba5f23eb502b13bd5286c',1,'vk_mem_alloc.h']]], + ['vma_5fallocator_5fcreate_5fkhr_5fbind_5fmemory2_5fbit',['VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca8fb75bf07cd184ab903596295e863dee',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cace7da7cc6e71a625dfa763c55a597878',1,'vk_mem_alloc.h']]], + ['vma_5fbind_5fmemory2',['VMA_BIND_MEMORY2',['../vk__mem__alloc_8h.html#a88bef97f86d70a34a4c0746e09a2680d',1,'vk_mem_alloc.h']]], ['vma_5fdedicated_5fallocation',['VMA_DEDICATED_ALLOCATION',['../vk__mem__alloc_8h.html#af7b860e63b96d11e44ae8587ba06bbf4',1,'vk_mem_alloc.h']]], ['vma_5fdefragmentation_5fflag_5fbits_5fmax_5fenum',['VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a6552a65b71d16f378c6994b3ceaef50cab87ec33154803bfeb5ac2b379f1d6a97',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fonly',['VMA_MEMORY_USAGE_CPU_ONLY',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305cca40bdf4cddeffeb12f43d45ca1286e0a5',1,'vk_mem_alloc.h']]], @@ -70,7 +72,9 @@ var searchData= ['vmaallocatorcreateflags',['VmaAllocatorCreateFlags',['../vk__mem__alloc_8h.html#acfe6863e160722c2c1bbcf7573fddc4d',1,'vk_mem_alloc.h']]], ['vmaallocatorcreateinfo',['VmaAllocatorCreateInfo',['../struct_vma_allocator_create_info.html',1,'VmaAllocatorCreateInfo'],['../vk__mem__alloc_8h.html#ae0f6d1d733dded220d28134da46b4283',1,'VmaAllocatorCreateInfo(): vk_mem_alloc.h']]], ['vmabindbuffermemory',['vmaBindBufferMemory',['../vk__mem__alloc_8h.html#a6b0929b914b60cf2d45cac4bf3547470',1,'vk_mem_alloc.h']]], + ['vmabindbuffermemory2',['vmaBindBufferMemory2',['../vk__mem__alloc_8h.html#a927c944f45e0f2941182abb6f608e64a',1,'vk_mem_alloc.h']]], ['vmabindimagememory',['vmaBindImageMemory',['../vk__mem__alloc_8h.html#a3d3ca45799923aa5d138e9e5f9eb2da5',1,'vk_mem_alloc.h']]], + ['vmabindimagememory2',['vmaBindImageMemory2',['../vk__mem__alloc_8h.html#aa8251ee81b0045a443e35b8e8aa021bc',1,'vk_mem_alloc.h']]], ['vmabuildstatsstring',['vmaBuildStatsString',['../vk__mem__alloc_8h.html#aa4fee7eb5253377599ef4fd38c93c2a0',1,'vk_mem_alloc.h']]], ['vmacalculatestats',['vmaCalculateStats',['../vk__mem__alloc_8h.html#a333b61c1788cb23559177531e6a93ca3',1,'vk_mem_alloc.h']]], ['vmacheckcorruption',['vmaCheckCorruption',['../vk__mem__alloc_8h.html#a49329a7f030dafcf82f7b73334c22e98',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/defines_1.js b/docs/html/search/defines_1.js index bf18592..597da07 100644 --- a/docs/html/search/defines_1.js +++ b/docs/html/search/defines_1.js @@ -1,5 +1,6 @@ var searchData= [ + ['vma_5fbind_5fmemory2',['VMA_BIND_MEMORY2',['../vk__mem__alloc_8h.html#a88bef97f86d70a34a4c0746e09a2680d',1,'vk_mem_alloc.h']]], ['vma_5fdedicated_5fallocation',['VMA_DEDICATED_ALLOCATION',['../vk__mem__alloc_8h.html#af7b860e63b96d11e44ae8587ba06bbf4',1,'vk_mem_alloc.h']]], ['vma_5frecording_5fenabled',['VMA_RECORDING_ENABLED',['../vk__mem__alloc_8h.html#a1f0c126759fc96ccb6e2d23c101d770c',1,'vk_mem_alloc.h']]], ['vma_5fstats_5fstring_5fenabled',['VMA_STATS_STRING_ENABLED',['../vk__mem__alloc_8h.html#ae25f0d55fd91cb166f002b63244800e1',1,'vk_mem_alloc.h']]] diff --git a/docs/html/search/enumvalues_0.js b/docs/html/search/enumvalues_0.js index 6353a4c..98e73f3 100644 --- a/docs/html/search/enumvalues_0.js +++ b/docs/html/search/enumvalues_0.js @@ -18,6 +18,7 @@ var searchData= ['vma_5fallocation_5fcreate_5fuser_5fdata_5fcopy_5fstring_5fbit',['VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597aa6f24f821cd6a7c5e4a443f7bf59c520',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca4816ddaed324ba110172ca608a20f29d',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cae4d5ad929caba5f23eb502b13bd5286c',1,'vk_mem_alloc.h']]], + ['vma_5fallocator_5fcreate_5fkhr_5fbind_5fmemory2_5fbit',['VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7ca8fb75bf07cd184ab903596295e863dee',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fcreate_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT',['../vk__mem__alloc_8h.html#a4f87c9100d154a65a4ad495f7763cf7cace7da7cc6e71a625dfa763c55a597878',1,'vk_mem_alloc.h']]], ['vma_5fdefragmentation_5fflag_5fbits_5fmax_5fenum',['VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM',['../vk__mem__alloc_8h.html#a6552a65b71d16f378c6994b3ceaef50cab87ec33154803bfeb5ac2b379f1d6a97',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fonly',['VMA_MEMORY_USAGE_CPU_ONLY',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305cca40bdf4cddeffeb12f43d45ca1286e0a5',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index 708afb4..746638b 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -5,7 +5,9 @@ var searchData= ['vmaallocatememoryforimage',['vmaAllocateMemoryForImage',['../vk__mem__alloc_8h.html#a0faa3f9e5fb233d29d1e00390650febb',1,'vk_mem_alloc.h']]], ['vmaallocatememorypages',['vmaAllocateMemoryPages',['../vk__mem__alloc_8h.html#ad37e82e492b3de38fc3f4cffd9ad0ae1',1,'vk_mem_alloc.h']]], ['vmabindbuffermemory',['vmaBindBufferMemory',['../vk__mem__alloc_8h.html#a6b0929b914b60cf2d45cac4bf3547470',1,'vk_mem_alloc.h']]], + ['vmabindbuffermemory2',['vmaBindBufferMemory2',['../vk__mem__alloc_8h.html#a927c944f45e0f2941182abb6f608e64a',1,'vk_mem_alloc.h']]], ['vmabindimagememory',['vmaBindImageMemory',['../vk__mem__alloc_8h.html#a3d3ca45799923aa5d138e9e5f9eb2da5',1,'vk_mem_alloc.h']]], + ['vmabindimagememory2',['vmaBindImageMemory2',['../vk__mem__alloc_8h.html#aa8251ee81b0045a443e35b8e8aa021bc',1,'vk_mem_alloc.h']]], ['vmabuildstatsstring',['vmaBuildStatsString',['../vk__mem__alloc_8h.html#aa4fee7eb5253377599ef4fd38c93c2a0',1,'vk_mem_alloc.h']]], ['vmacalculatestats',['vmaCalculateStats',['../vk__mem__alloc_8h.html#a333b61c1788cb23559177531e6a93ca3',1,'vk_mem_alloc.h']]], ['vmacheckcorruption',['vmaCheckCorruption',['../vk__mem__alloc_8h.html#a49329a7f030dafcf82f7b73334c22e98',1,'vk_mem_alloc.h']]], diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index aea8161..3f9d4bd 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -124,6 +124,8 @@ Macros   #define VMA_DEDICATED_ALLOCATION   0   +#define VMA_BIND_MEMORY2   0 +  #define VMA_STATS_STRING_ENABLED   1   @@ -205,6 +207,7 @@ Typedefs Enumerations @@ -374,9 +377,15 @@ Functions + + + + + + @@ -402,6 +411,20 @@ Functions
    enum  VmaAllocatorCreateFlagBits { VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, +VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF }
     Flags for created VmaAllocator. More...
    VkResult vmaBindBufferMemory (VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
     Binds buffer to allocation. More...
     
    VkResult vmaBindBufferMemory2 (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
     Binds buffer to allocation with additional parameters. More...
     
    VkResult vmaBindImageMemory (VmaAllocator allocator, VmaAllocation allocation, VkImage image)
     Binds image to allocation. More...
     
    VkResult vmaBindImageMemory2 (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
     Binds image to allocation with additional parameters. More...
     
    VkResult vmaCreateBuffer (VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
     
    void vmaDestroyBuffer (VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    +
    + + +

    ◆ VMA_BIND_MEMORY2

    + +
    +
    + + + + +
    #define VMA_BIND_MEMORY2   0
    +
    +
    @@ -947,6 +970,10 @@ Functions

    vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.

    +VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT 

    Enables usage of VK_KHR_bind_memory2 extension.

    +

    You may set this flag only if you found out that this device extension is supported, you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, and you want it to be used internally by this library.

    +

    The extension provides functions vkBindBufferMemory2KHR and vkBindImageMemory2KHR, which allow to pass a chain of pNext structures while binding. This flag is required if you use pNext parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().

    + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM  @@ -1333,6 +1360,63 @@ Functions

    Binds specified buffer to region of memory represented by specified allocation. Gets VkDeviceMemory handle and offset from the allocation. If you want to create a buffer, allocate memory for it and bind them together separately, you should use this function for binding instead of standard vkBindBufferMemory(), because it ensures proper synchronization so that when a VkDeviceMemory object is used by multiple allocations, calls to vkBind*Memory() or vkMapMemory() won't happen from multiple threads simultaneously (which is illegal in Vulkan).

    It is recommended to use function vmaCreateBuffer() instead of this one.

    + + + +

    ◆ vmaBindBufferMemory2()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaBindBufferMemory2 (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize allocationLocalOffset,
    VkBuffer buffer,
    const void * pNext 
    )
    +
    + +

    Binds buffer to allocation with additional parameters.

    +
    Parameters
    + + + +
    allocationLocalOffsetAdditional offset to be added while binding, relative to the beginnig of the allocation. Normally it should be 0.
    pNextA chain of structures to be attached to VkBindBufferMemoryInfoKHR structure used internally. Normally it should be null.
    +
    +
    +

    This function is similar to vmaBindBufferMemory(), but it provides additional parameters.

    +

    If pNext is not null, VmaAllocator object must have been created with VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag. Otherwise the call fails.

    +
    @@ -1371,6 +1455,63 @@ Functions

    Binds specified image to region of memory represented by specified allocation. Gets VkDeviceMemory handle and offset from the allocation. If you want to create an image, allocate memory for it and bind them together separately, you should use this function for binding instead of standard vkBindImageMemory(), because it ensures proper synchronization so that when a VkDeviceMemory object is used by multiple allocations, calls to vkBind*Memory() or vkMapMemory() won't happen from multiple threads simultaneously (which is illegal in Vulkan).

    It is recommended to use function vmaCreateImage() instead of this one.

    + + + +

    ◆ vmaBindImageMemory2()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaBindImageMemory2 (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize allocationLocalOffset,
    VkImage image,
    const void * pNext 
    )
    +
    + +

    Binds image to allocation with additional parameters.

    +
    Parameters
    + + + +
    allocationLocalOffsetAdditional offset to be added while binding, relative to the beginnig of the allocation. Normally it should be 0.
    pNextA chain of structures to be attached to VkBindImageMemoryInfoKHR structure used internally. Normally it should be null.
    +
    +
    +

    This function is similar to vmaBindImageMemory(), but it provides additional parameters.

    +

    If pNext is not null, VmaAllocator object must have been created with VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag. Otherwise the call fails.

    +
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index a64056e..bfa03f5 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,212 +65,215 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1677 /*
    1678 Define this macro to 0/1 to disable/enable support for recording functionality,
    1679 available through VmaAllocatorCreateInfo::pRecordSettings.
    1680 */
    1681 #ifndef VMA_RECORDING_ENABLED
    1682  #ifdef _WIN32
    1683  #define VMA_RECORDING_ENABLED 1
    1684  #else
    1685  #define VMA_RECORDING_ENABLED 0
    1686  #endif
    1687 #endif
    1688 
    1689 #ifndef NOMINMAX
    1690  #define NOMINMAX // For windows.h
    1691 #endif
    1692 
    1693 #ifndef VULKAN_H_
    1694  #include <vulkan/vulkan.h>
    1695 #endif
    1696 
    1697 #if VMA_RECORDING_ENABLED
    1698  #include <windows.h>
    1699 #endif
    1700 
    1701 #if !defined(VMA_DEDICATED_ALLOCATION)
    1702  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1703  #define VMA_DEDICATED_ALLOCATION 1
    1704  #else
    1705  #define VMA_DEDICATED_ALLOCATION 0
    1706  #endif
    1707 #endif
    1708 
    1718 VK_DEFINE_HANDLE(VmaAllocator)
    1719 
    1720 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1722  VmaAllocator allocator,
    1723  uint32_t memoryType,
    1724  VkDeviceMemory memory,
    1725  VkDeviceSize size);
    1727 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1728  VmaAllocator allocator,
    1729  uint32_t memoryType,
    1730  VkDeviceMemory memory,
    1731  VkDeviceSize size);
    1732 
    1746 
    1776 
    1779 typedef VkFlags VmaAllocatorCreateFlags;
    1780 
    1785 typedef struct VmaVulkanFunctions {
    1786  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1787  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1788  PFN_vkAllocateMemory vkAllocateMemory;
    1789  PFN_vkFreeMemory vkFreeMemory;
    1790  PFN_vkMapMemory vkMapMemory;
    1791  PFN_vkUnmapMemory vkUnmapMemory;
    1792  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1793  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1794  PFN_vkBindBufferMemory vkBindBufferMemory;
    1795  PFN_vkBindImageMemory vkBindImageMemory;
    1796  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1797  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1798  PFN_vkCreateBuffer vkCreateBuffer;
    1799  PFN_vkDestroyBuffer vkDestroyBuffer;
    1800  PFN_vkCreateImage vkCreateImage;
    1801  PFN_vkDestroyImage vkDestroyImage;
    1802  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1803 #if VMA_DEDICATED_ALLOCATION
    1804  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1805  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1806 #endif
    1808 
    1810 typedef enum VmaRecordFlagBits {
    1817 
    1820 typedef VkFlags VmaRecordFlags;
    1821 
    1823 typedef struct VmaRecordSettings
    1824 {
    1834  const char* pFilePath;
    1836 
    1839 {
    1843 
    1844  VkPhysicalDevice physicalDevice;
    1846 
    1847  VkDevice device;
    1849 
    1852 
    1853  const VkAllocationCallbacks* pAllocationCallbacks;
    1855 
    1895  const VkDeviceSize* pHeapSizeLimit;
    1916 
    1918 VkResult vmaCreateAllocator(
    1919  const VmaAllocatorCreateInfo* pCreateInfo,
    1920  VmaAllocator* pAllocator);
    1921 
    1923 void vmaDestroyAllocator(
    1924  VmaAllocator allocator);
    1925 
    1931  VmaAllocator allocator,
    1932  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1933 
    1939  VmaAllocator allocator,
    1940  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1941 
    1949  VmaAllocator allocator,
    1950  uint32_t memoryTypeIndex,
    1951  VkMemoryPropertyFlags* pFlags);
    1952 
    1962  VmaAllocator allocator,
    1963  uint32_t frameIndex);
    1964 
    1967 typedef struct VmaStatInfo
    1968 {
    1970  uint32_t blockCount;
    1976  VkDeviceSize usedBytes;
    1978  VkDeviceSize unusedBytes;
    1981 } VmaStatInfo;
    1982 
    1984 typedef struct VmaStats
    1985 {
    1986  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1987  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1989 } VmaStats;
    1990 
    1992 void vmaCalculateStats(
    1993  VmaAllocator allocator,
    1994  VmaStats* pStats);
    1995 
    1996 #ifndef VMA_STATS_STRING_ENABLED
    1997 #define VMA_STATS_STRING_ENABLED 1
    1998 #endif
    1999 
    2000 #if VMA_STATS_STRING_ENABLED
    2001 
    2003 
    2005 void vmaBuildStatsString(
    2006  VmaAllocator allocator,
    2007  char** ppStatsString,
    2008  VkBool32 detailedMap);
    2009 
    2010 void vmaFreeStatsString(
    2011  VmaAllocator allocator,
    2012  char* pStatsString);
    2013 
    2014 #endif // #if VMA_STATS_STRING_ENABLED
    2015 
    2024 VK_DEFINE_HANDLE(VmaPool)
    2025 
    2026 typedef enum VmaMemoryUsage
    2027 {
    2076 } VmaMemoryUsage;
    2077 
    2087 
    2148 
    2164 
    2174 
    2181 
    2185 
    2187 {
    2200  VkMemoryPropertyFlags requiredFlags;
    2205  VkMemoryPropertyFlags preferredFlags;
    2213  uint32_t memoryTypeBits;
    2226  void* pUserData;
    2228 
    2245 VkResult vmaFindMemoryTypeIndex(
    2246  VmaAllocator allocator,
    2247  uint32_t memoryTypeBits,
    2248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2249  uint32_t* pMemoryTypeIndex);
    2250 
    2264  VmaAllocator allocator,
    2265  const VkBufferCreateInfo* pBufferCreateInfo,
    2266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2267  uint32_t* pMemoryTypeIndex);
    2268 
    2282  VmaAllocator allocator,
    2283  const VkImageCreateInfo* pImageCreateInfo,
    2284  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2285  uint32_t* pMemoryTypeIndex);
    2286 
    2307 
    2324 
    2335 
    2341 
    2344 typedef VkFlags VmaPoolCreateFlags;
    2345 
    2348 typedef struct VmaPoolCreateInfo {
    2363  VkDeviceSize blockSize;
    2392 
    2395 typedef struct VmaPoolStats {
    2398  VkDeviceSize size;
    2401  VkDeviceSize unusedSize;
    2414  VkDeviceSize unusedRangeSizeMax;
    2417  size_t blockCount;
    2418 } VmaPoolStats;
    2419 
    2426 VkResult vmaCreatePool(
    2427  VmaAllocator allocator,
    2428  const VmaPoolCreateInfo* pCreateInfo,
    2429  VmaPool* pPool);
    2430 
    2433 void vmaDestroyPool(
    2434  VmaAllocator allocator,
    2435  VmaPool pool);
    2436 
    2443 void vmaGetPoolStats(
    2444  VmaAllocator allocator,
    2445  VmaPool pool,
    2446  VmaPoolStats* pPoolStats);
    2447 
    2455  VmaAllocator allocator,
    2456  VmaPool pool,
    2457  size_t* pLostAllocationCount);
    2458 
    2473 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2474 
    2499 VK_DEFINE_HANDLE(VmaAllocation)
    2500 
    2501 
    2503 typedef struct VmaAllocationInfo {
    2508  uint32_t memoryType;
    2517  VkDeviceMemory deviceMemory;
    2522  VkDeviceSize offset;
    2527  VkDeviceSize size;
    2541  void* pUserData;
    2543 
    2554 VkResult vmaAllocateMemory(
    2555  VmaAllocator allocator,
    2556  const VkMemoryRequirements* pVkMemoryRequirements,
    2557  const VmaAllocationCreateInfo* pCreateInfo,
    2558  VmaAllocation* pAllocation,
    2559  VmaAllocationInfo* pAllocationInfo);
    2560 
    2580 VkResult vmaAllocateMemoryPages(
    2581  VmaAllocator allocator,
    2582  const VkMemoryRequirements* pVkMemoryRequirements,
    2583  const VmaAllocationCreateInfo* pCreateInfo,
    2584  size_t allocationCount,
    2585  VmaAllocation* pAllocations,
    2586  VmaAllocationInfo* pAllocationInfo);
    2587 
    2595  VmaAllocator allocator,
    2596  VkBuffer buffer,
    2597  const VmaAllocationCreateInfo* pCreateInfo,
    2598  VmaAllocation* pAllocation,
    2599  VmaAllocationInfo* pAllocationInfo);
    2600 
    2602 VkResult vmaAllocateMemoryForImage(
    2603  VmaAllocator allocator,
    2604  VkImage image,
    2605  const VmaAllocationCreateInfo* pCreateInfo,
    2606  VmaAllocation* pAllocation,
    2607  VmaAllocationInfo* pAllocationInfo);
    2608 
    2613 void vmaFreeMemory(
    2614  VmaAllocator allocator,
    2615  VmaAllocation allocation);
    2616 
    2627 void vmaFreeMemoryPages(
    2628  VmaAllocator allocator,
    2629  size_t allocationCount,
    2630  VmaAllocation* pAllocations);
    2631 
    2638 VkResult vmaResizeAllocation(
    2639  VmaAllocator allocator,
    2640  VmaAllocation allocation,
    2641  VkDeviceSize newSize);
    2642 
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation,
    2662  VmaAllocationInfo* pAllocationInfo);
    2663 
    2678 VkBool32 vmaTouchAllocation(
    2679  VmaAllocator allocator,
    2680  VmaAllocation allocation);
    2681 
    2696  VmaAllocator allocator,
    2697  VmaAllocation allocation,
    2698  void* pUserData);
    2699 
    2711  VmaAllocator allocator,
    2712  VmaAllocation* pAllocation);
    2713 
    2748 VkResult vmaMapMemory(
    2749  VmaAllocator allocator,
    2750  VmaAllocation allocation,
    2751  void** ppData);
    2752 
    2757 void vmaUnmapMemory(
    2758  VmaAllocator allocator,
    2759  VmaAllocation allocation);
    2760 
    2777 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2778 
    2795 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2796 
    2813 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2814 
    2821 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2822 
    2823 typedef enum VmaDefragmentationFlagBits {
    2827 typedef VkFlags VmaDefragmentationFlags;
    2828 
    2833 typedef struct VmaDefragmentationInfo2 {
    2857  uint32_t poolCount;
    2878  VkDeviceSize maxCpuBytesToMove;
    2888  VkDeviceSize maxGpuBytesToMove;
    2902  VkCommandBuffer commandBuffer;
    2904 
    2909 typedef struct VmaDefragmentationInfo {
    2914  VkDeviceSize maxBytesToMove;
    2921 
    2923 typedef struct VmaDefragmentationStats {
    2925  VkDeviceSize bytesMoved;
    2927  VkDeviceSize bytesFreed;
    2933 
    2963 VkResult vmaDefragmentationBegin(
    2964  VmaAllocator allocator,
    2965  const VmaDefragmentationInfo2* pInfo,
    2966  VmaDefragmentationStats* pStats,
    2967  VmaDefragmentationContext *pContext);
    2968 
    2974 VkResult vmaDefragmentationEnd(
    2975  VmaAllocator allocator,
    2976  VmaDefragmentationContext context);
    2977 
    3018 VkResult vmaDefragment(
    3019  VmaAllocator allocator,
    3020  VmaAllocation* pAllocations,
    3021  size_t allocationCount,
    3022  VkBool32* pAllocationsChanged,
    3023  const VmaDefragmentationInfo *pDefragmentationInfo,
    3024  VmaDefragmentationStats* pDefragmentationStats);
    3025 
    3038 VkResult vmaBindBufferMemory(
    3039  VmaAllocator allocator,
    3040  VmaAllocation allocation,
    3041  VkBuffer buffer);
    3042 
    3055 VkResult vmaBindImageMemory(
    3056  VmaAllocator allocator,
    3057  VmaAllocation allocation,
    3058  VkImage image);
    3059 
    3086 VkResult vmaCreateBuffer(
    3087  VmaAllocator allocator,
    3088  const VkBufferCreateInfo* pBufferCreateInfo,
    3089  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3090  VkBuffer* pBuffer,
    3091  VmaAllocation* pAllocation,
    3092  VmaAllocationInfo* pAllocationInfo);
    3093 
    3105 void vmaDestroyBuffer(
    3106  VmaAllocator allocator,
    3107  VkBuffer buffer,
    3108  VmaAllocation allocation);
    3109 
    3111 VkResult vmaCreateImage(
    3112  VmaAllocator allocator,
    3113  const VkImageCreateInfo* pImageCreateInfo,
    3114  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3115  VkImage* pImage,
    3116  VmaAllocation* pAllocation,
    3117  VmaAllocationInfo* pAllocationInfo);
    3118 
    3130 void vmaDestroyImage(
    3131  VmaAllocator allocator,
    3132  VkImage image,
    3133  VmaAllocation allocation);
    3134 
    3135 #ifdef __cplusplus
    3136 }
    3137 #endif
    3138 
    3139 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3140 
    3141 // For Visual Studio IntelliSense.
    3142 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3143 #define VMA_IMPLEMENTATION
    3144 #endif
    3145 
    3146 #ifdef VMA_IMPLEMENTATION
    3147 #undef VMA_IMPLEMENTATION
    3148 
    3149 #include <cstdint>
    3150 #include <cstdlib>
    3151 #include <cstring>
    3152 
    3153 /*******************************************************************************
    3154 CONFIGURATION SECTION
    3155 
    3156 Define some of these macros before each #include of this header or change them
    3157 here if you need other then default behavior depending on your environment.
    3158 */
    3159 
    3160 /*
    3161 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3162 internally, like:
    3163 
    3164  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3165 
    3166 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3167 VmaAllocatorCreateInfo::pVulkanFunctions.
    3168 */
    3169 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3170 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3171 #endif
    3172 
    3173 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3174 //#define VMA_USE_STL_CONTAINERS 1
    3175 
    3176 /* Set this macro to 1 to make the library including and using STL containers:
    3177 std::pair, std::vector, std::list, std::unordered_map.
    3178 
    3179 Set it to 0 or undefined to make the library using its own implementation of
    3180 the containers.
    3181 */
    3182 #if VMA_USE_STL_CONTAINERS
    3183  #define VMA_USE_STL_VECTOR 1
    3184  #define VMA_USE_STL_UNORDERED_MAP 1
    3185  #define VMA_USE_STL_LIST 1
    3186 #endif
    3187 
    3188 #ifndef VMA_USE_STL_SHARED_MUTEX
    3189  // Compiler conforms to C++17.
    3190  #if __cplusplus >= 201703L
    3191  #define VMA_USE_STL_SHARED_MUTEX 1
    3192  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3193  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3194  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3195  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3196  #define VMA_USE_STL_SHARED_MUTEX 1
    3197  #else
    3198  #define VMA_USE_STL_SHARED_MUTEX 0
    3199  #endif
    3200 #endif
    3201 
    3202 /*
    3203 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3204 Library has its own container implementation.
    3205 */
    3206 #if VMA_USE_STL_VECTOR
    3207  #include <vector>
    3208 #endif
    3209 
    3210 #if VMA_USE_STL_UNORDERED_MAP
    3211  #include <unordered_map>
    3212 #endif
    3213 
    3214 #if VMA_USE_STL_LIST
    3215  #include <list>
    3216 #endif
    3217 
    3218 /*
    3219 Following headers are used in this CONFIGURATION section only, so feel free to
    3220 remove them if not needed.
    3221 */
    3222 #include <cassert> // for assert
    3223 #include <algorithm> // for min, max
    3224 #include <mutex>
    3225 
    3226 #ifndef VMA_NULL
    3227  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3228  #define VMA_NULL nullptr
    3229 #endif
    3230 
    3231 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3232 #include <cstdlib>
    3233 void *aligned_alloc(size_t alignment, size_t size)
    3234 {
    3235  // alignment must be >= sizeof(void*)
    3236  if(alignment < sizeof(void*))
    3237  {
    3238  alignment = sizeof(void*);
    3239  }
    3240 
    3241  return memalign(alignment, size);
    3242 }
    3243 #elif defined(__APPLE__) || defined(__ANDROID__)
    3244 #include <cstdlib>
    3245 void *aligned_alloc(size_t alignment, size_t size)
    3246 {
    3247  // alignment must be >= sizeof(void*)
    3248  if(alignment < sizeof(void*))
    3249  {
    3250  alignment = sizeof(void*);
    3251  }
    3252 
    3253  void *pointer;
    3254  if(posix_memalign(&pointer, alignment, size) == 0)
    3255  return pointer;
    3256  return VMA_NULL;
    3257 }
    3258 #endif
    3259 
    3260 // If your compiler is not compatible with C++11 and definition of
    3261 // aligned_alloc() function is missing, uncommeting following line may help:
    3262 
    3263 //#include <malloc.h>
    3264 
    3265 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3266 #ifndef VMA_ASSERT
    3267  #ifdef _DEBUG
    3268  #define VMA_ASSERT(expr) assert(expr)
    3269  #else
    3270  #define VMA_ASSERT(expr)
    3271  #endif
    3272 #endif
    3273 
    3274 // Assert that will be called very often, like inside data structures e.g. operator[].
    3275 // Making it non-empty can make program slow.
    3276 #ifndef VMA_HEAVY_ASSERT
    3277  #ifdef _DEBUG
    3278  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3279  #else
    3280  #define VMA_HEAVY_ASSERT(expr)
    3281  #endif
    3282 #endif
    3283 
    3284 #ifndef VMA_ALIGN_OF
    3285  #define VMA_ALIGN_OF(type) (__alignof(type))
    3286 #endif
    3287 
    3288 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3289  #if defined(_WIN32)
    3290  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3291  #else
    3292  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3293  #endif
    3294 #endif
    3295 
    3296 #ifndef VMA_SYSTEM_FREE
    3297  #if defined(_WIN32)
    3298  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3299  #else
    3300  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3301  #endif
    3302 #endif
    3303 
    3304 #ifndef VMA_MIN
    3305  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3306 #endif
    3307 
    3308 #ifndef VMA_MAX
    3309  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3310 #endif
    3311 
    3312 #ifndef VMA_SWAP
    3313  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3314 #endif
    3315 
    3316 #ifndef VMA_SORT
    3317  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3318 #endif
    3319 
    3320 #ifndef VMA_DEBUG_LOG
    3321  #define VMA_DEBUG_LOG(format, ...)
    3322  /*
    3323  #define VMA_DEBUG_LOG(format, ...) do { \
    3324  printf(format, __VA_ARGS__); \
    3325  printf("\n"); \
    3326  } while(false)
    3327  */
    3328 #endif
    3329 
    3330 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3331 #if VMA_STATS_STRING_ENABLED
    3332  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3333  {
    3334  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3335  }
    3336  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3337  {
    3338  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3339  }
    3340  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3341  {
    3342  snprintf(outStr, strLen, "%p", ptr);
    3343  }
    3344 #endif
    3345 
    3346 #ifndef VMA_MUTEX
    3347  class VmaMutex
    3348  {
    3349  public:
    3350  void Lock() { m_Mutex.lock(); }
    3351  void Unlock() { m_Mutex.unlock(); }
    3352  private:
    3353  std::mutex m_Mutex;
    3354  };
    3355  #define VMA_MUTEX VmaMutex
    3356 #endif
    3357 
    3358 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3359 #ifndef VMA_RW_MUTEX
    3360  #if VMA_USE_STL_SHARED_MUTEX
    3361  // Use std::shared_mutex from C++17.
    3362  #include <shared_mutex>
    3363  class VmaRWMutex
    3364  {
    3365  public:
    3366  void LockRead() { m_Mutex.lock_shared(); }
    3367  void UnlockRead() { m_Mutex.unlock_shared(); }
    3368  void LockWrite() { m_Mutex.lock(); }
    3369  void UnlockWrite() { m_Mutex.unlock(); }
    3370  private:
    3371  std::shared_mutex m_Mutex;
    3372  };
    3373  #define VMA_RW_MUTEX VmaRWMutex
    3374  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3375  // Use SRWLOCK from WinAPI.
    3376  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3377  class VmaRWMutex
    3378  {
    3379  public:
    3380  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3381  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3382  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3383  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3384  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3385  private:
    3386  SRWLOCK m_Lock;
    3387  };
    3388  #define VMA_RW_MUTEX VmaRWMutex
    3389  #else
    3390  // Less efficient fallback: Use normal mutex.
    3391  class VmaRWMutex
    3392  {
    3393  public:
    3394  void LockRead() { m_Mutex.Lock(); }
    3395  void UnlockRead() { m_Mutex.Unlock(); }
    3396  void LockWrite() { m_Mutex.Lock(); }
    3397  void UnlockWrite() { m_Mutex.Unlock(); }
    3398  private:
    3399  VMA_MUTEX m_Mutex;
    3400  };
    3401  #define VMA_RW_MUTEX VmaRWMutex
    3402  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3403 #endif // #ifndef VMA_RW_MUTEX
    3404 
    3405 /*
    3406 If providing your own implementation, you need to implement a subset of std::atomic:
    3407 
    3408 - Constructor(uint32_t desired)
    3409 - uint32_t load() const
    3410 - void store(uint32_t desired)
    3411 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3412 */
    3413 #ifndef VMA_ATOMIC_UINT32
    3414  #include <atomic>
    3415  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3416 #endif
    3417 
    3418 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3419 
    3423  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3424 #endif
    3425 
    3426 #ifndef VMA_DEBUG_ALIGNMENT
    3427 
    3431  #define VMA_DEBUG_ALIGNMENT (1)
    3432 #endif
    3433 
    3434 #ifndef VMA_DEBUG_MARGIN
    3435 
    3439  #define VMA_DEBUG_MARGIN (0)
    3440 #endif
    3441 
    3442 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3443 
    3447  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3448 #endif
    3449 
    3450 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3451 
    3456  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3457 #endif
    3458 
    3459 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3460 
    3464  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3465 #endif
    3466 
    3467 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3468 
    3472  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3473 #endif
    3474 
    3475 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3476  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3478 #endif
    3479 
    3480 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3481  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3483 #endif
    3484 
    3485 #ifndef VMA_CLASS_NO_COPY
    3486  #define VMA_CLASS_NO_COPY(className) \
    3487  private: \
    3488  className(const className&) = delete; \
    3489  className& operator=(const className&) = delete;
    3490 #endif
    3491 
    3492 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3493 
    3494 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3495 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3496 
    3497 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3498 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3499 
    3500 /*******************************************************************************
    3501 END OF CONFIGURATION
    3502 */
    3503 
    3504 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3505 
    3506 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3507  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3508 
    3509 // Returns number of bits set to 1 in (v).
    3510 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3511 {
    3512  uint32_t c = v - ((v >> 1) & 0x55555555);
    3513  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3514  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3515  c = ((c >> 8) + c) & 0x00FF00FF;
    3516  c = ((c >> 16) + c) & 0x0000FFFF;
    3517  return c;
    3518 }
    3519 
    3520 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3521 // Use types like uint32_t, uint64_t as T.
    3522 template <typename T>
    3523 static inline T VmaAlignUp(T val, T align)
    3524 {
    3525  return (val + align - 1) / align * align;
    3526 }
    3527 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3528 // Use types like uint32_t, uint64_t as T.
    3529 template <typename T>
    3530 static inline T VmaAlignDown(T val, T align)
    3531 {
    3532  return val / align * align;
    3533 }
    3534 
    3535 // Division with mathematical rounding to nearest number.
    3536 template <typename T>
    3537 static inline T VmaRoundDiv(T x, T y)
    3538 {
    3539  return (x + (y / (T)2)) / y;
    3540 }
    3541 
    3542 /*
    3543 Returns true if given number is a power of two.
    3544 T must be unsigned integer number or signed integer but always nonnegative.
    3545 For 0 returns true.
    3546 */
    3547 template <typename T>
    3548 inline bool VmaIsPow2(T x)
    3549 {
    3550  return (x & (x-1)) == 0;
    3551 }
    3552 
    3553 // Returns smallest power of 2 greater or equal to v.
    3554 static inline uint32_t VmaNextPow2(uint32_t v)
    3555 {
    3556  v--;
    3557  v |= v >> 1;
    3558  v |= v >> 2;
    3559  v |= v >> 4;
    3560  v |= v >> 8;
    3561  v |= v >> 16;
    3562  v++;
    3563  return v;
    3564 }
    3565 static inline uint64_t VmaNextPow2(uint64_t v)
    3566 {
    3567  v--;
    3568  v |= v >> 1;
    3569  v |= v >> 2;
    3570  v |= v >> 4;
    3571  v |= v >> 8;
    3572  v |= v >> 16;
    3573  v |= v >> 32;
    3574  v++;
    3575  return v;
    3576 }
    3577 
    3578 // Returns largest power of 2 less or equal to v.
    3579 static inline uint32_t VmaPrevPow2(uint32_t v)
    3580 {
    3581  v |= v >> 1;
    3582  v |= v >> 2;
    3583  v |= v >> 4;
    3584  v |= v >> 8;
    3585  v |= v >> 16;
    3586  v = v ^ (v >> 1);
    3587  return v;
    3588 }
    3589 static inline uint64_t VmaPrevPow2(uint64_t v)
    3590 {
    3591  v |= v >> 1;
    3592  v |= v >> 2;
    3593  v |= v >> 4;
    3594  v |= v >> 8;
    3595  v |= v >> 16;
    3596  v |= v >> 32;
    3597  v = v ^ (v >> 1);
    3598  return v;
    3599 }
    3600 
    3601 static inline bool VmaStrIsEmpty(const char* pStr)
    3602 {
    3603  return pStr == VMA_NULL || *pStr == '\0';
    3604 }
    3605 
    3606 #if VMA_STATS_STRING_ENABLED
    3607 
    3608 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3609 {
    3610  switch(algorithm)
    3611  {
    3613  return "Linear";
    3615  return "Buddy";
    3616  case 0:
    3617  return "Default";
    3618  default:
    3619  VMA_ASSERT(0);
    3620  return "";
    3621  }
    3622 }
    3623 
    3624 #endif // #if VMA_STATS_STRING_ENABLED
    3625 
    3626 #ifndef VMA_SORT
    3627 
    3628 template<typename Iterator, typename Compare>
    3629 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3630 {
    3631  Iterator centerValue = end; --centerValue;
    3632  Iterator insertIndex = beg;
    3633  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3634  {
    3635  if(cmp(*memTypeIndex, *centerValue))
    3636  {
    3637  if(insertIndex != memTypeIndex)
    3638  {
    3639  VMA_SWAP(*memTypeIndex, *insertIndex);
    3640  }
    3641  ++insertIndex;
    3642  }
    3643  }
    3644  if(insertIndex != centerValue)
    3645  {
    3646  VMA_SWAP(*insertIndex, *centerValue);
    3647  }
    3648  return insertIndex;
    3649 }
    3650 
    3651 template<typename Iterator, typename Compare>
    3652 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3653 {
    3654  if(beg < end)
    3655  {
    3656  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3657  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3658  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3659  }
    3660 }
    3661 
    3662 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3663 
    3664 #endif // #ifndef VMA_SORT
    3665 
    3666 /*
    3667 Returns true if two memory blocks occupy overlapping pages.
    3668 ResourceA must be in less memory offset than ResourceB.
    3669 
    3670 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3671 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3672 */
    3673 static inline bool VmaBlocksOnSamePage(
    3674  VkDeviceSize resourceAOffset,
    3675  VkDeviceSize resourceASize,
    3676  VkDeviceSize resourceBOffset,
    3677  VkDeviceSize pageSize)
    3678 {
    3679  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3680  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3681  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3682  VkDeviceSize resourceBStart = resourceBOffset;
    3683  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3684  return resourceAEndPage == resourceBStartPage;
    3685 }
    3686 
    3687 enum VmaSuballocationType
    3688 {
    3689  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3690  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3691  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3692  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3693  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3694  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3695  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3696 };
    3697 
    3698 /*
    3699 Returns true if given suballocation types could conflict and must respect
    3700 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3701 or linear image and another one is optimal image. If type is unknown, behave
    3702 conservatively.
    3703 */
    3704 static inline bool VmaIsBufferImageGranularityConflict(
    3705  VmaSuballocationType suballocType1,
    3706  VmaSuballocationType suballocType2)
    3707 {
    3708  if(suballocType1 > suballocType2)
    3709  {
    3710  VMA_SWAP(suballocType1, suballocType2);
    3711  }
    3712 
    3713  switch(suballocType1)
    3714  {
    3715  case VMA_SUBALLOCATION_TYPE_FREE:
    3716  return false;
    3717  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3718  return true;
    3719  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3720  return
    3721  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3722  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3723  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3724  return
    3725  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3726  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3727  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3728  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3729  return
    3730  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3731  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3732  return false;
    3733  default:
    3734  VMA_ASSERT(0);
    3735  return true;
    3736  }
    3737 }
    3738 
    3739 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3740 {
    3741 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3742  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3743  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3744  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3745  {
    3746  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3747  }
    3748 #else
    3749  // no-op
    3750 #endif
    3751 }
    3752 
    3753 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3754 {
    3755 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3756  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3757  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3758  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3759  {
    3760  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3761  {
    3762  return false;
    3763  }
    3764  }
    3765 #endif
    3766  return true;
    3767 }
    3768 
    3769 /*
    3770 Fills structure with parameters of an example buffer to be used for transfers
    3771 during GPU memory defragmentation.
    3772 */
    3773 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3774 {
    3775  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3776  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3777  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3778  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3779 }
    3780 
    3781 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3782 struct VmaMutexLock
    3783 {
    3784  VMA_CLASS_NO_COPY(VmaMutexLock)
    3785 public:
    3786  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3787  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3788  { if(m_pMutex) { m_pMutex->Lock(); } }
    3789  ~VmaMutexLock()
    3790  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3791 private:
    3792  VMA_MUTEX* m_pMutex;
    3793 };
    3794 
    3795 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3796 struct VmaMutexLockRead
    3797 {
    3798  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3799 public:
    3800  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3801  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3802  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3803  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3804 private:
    3805  VMA_RW_MUTEX* m_pMutex;
    3806 };
    3807 
    3808 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3809 struct VmaMutexLockWrite
    3810 {
    3811  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3812 public:
    3813  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3814  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3815  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3816  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3817 private:
    3818  VMA_RW_MUTEX* m_pMutex;
    3819 };
    3820 
    3821 #if VMA_DEBUG_GLOBAL_MUTEX
    3822  static VMA_MUTEX gDebugGlobalMutex;
    3823  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3824 #else
    3825  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3826 #endif
    3827 
    3828 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3829 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3830 
    3831 /*
    3832 Performs binary search and returns iterator to first element that is greater or
    3833 equal to (key), according to comparison (cmp).
    3834 
    3835 Cmp should return true if first argument is less than second argument.
    3836 
    3837 Returned value is the found element, if present in the collection or place where
    3838 new element with value (key) should be inserted.
    3839 */
    3840 template <typename CmpLess, typename IterT, typename KeyT>
    3841 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3842 {
    3843  size_t down = 0, up = (end - beg);
    3844  while(down < up)
    3845  {
    3846  const size_t mid = (down + up) / 2;
    3847  if(cmp(*(beg+mid), key))
    3848  {
    3849  down = mid + 1;
    3850  }
    3851  else
    3852  {
    3853  up = mid;
    3854  }
    3855  }
    3856  return beg + down;
    3857 }
    3858 
    3859 template<typename CmpLess, typename IterT, typename KeyT>
    3860 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3861 {
    3862  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3863  beg, end, value, cmp);
    3864  if(it == end ||
    3865  (!cmp(*it, value) && !cmp(value, *it)))
    3866  {
    3867  return it;
    3868  }
    3869  return end;
    3870 }
    3871 
    3872 /*
    3873 Returns true if all pointers in the array are not-null and unique.
    3874 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3875 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3876 */
    3877 template<typename T>
    3878 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3879 {
    3880  for(uint32_t i = 0; i < count; ++i)
    3881  {
    3882  const T iPtr = arr[i];
    3883  if(iPtr == VMA_NULL)
    3884  {
    3885  return false;
    3886  }
    3887  for(uint32_t j = i + 1; j < count; ++j)
    3888  {
    3889  if(iPtr == arr[j])
    3890  {
    3891  return false;
    3892  }
    3893  }
    3894  }
    3895  return true;
    3896 }
    3897 
    3899 // Memory allocation
    3900 
    3901 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3902 {
    3903  if((pAllocationCallbacks != VMA_NULL) &&
    3904  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3905  {
    3906  return (*pAllocationCallbacks->pfnAllocation)(
    3907  pAllocationCallbacks->pUserData,
    3908  size,
    3909  alignment,
    3910  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3911  }
    3912  else
    3913  {
    3914  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3915  }
    3916 }
    3917 
    3918 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3919 {
    3920  if((pAllocationCallbacks != VMA_NULL) &&
    3921  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3922  {
    3923  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3924  }
    3925  else
    3926  {
    3927  VMA_SYSTEM_FREE(ptr);
    3928  }
    3929 }
    3930 
    3931 template<typename T>
    3932 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3933 {
    3934  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3935 }
    3936 
    3937 template<typename T>
    3938 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3939 {
    3940  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3941 }
    3942 
    3943 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3944 
    3945 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3946 
    3947 template<typename T>
    3948 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3949 {
    3950  ptr->~T();
    3951  VmaFree(pAllocationCallbacks, ptr);
    3952 }
    3953 
    3954 template<typename T>
    3955 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3956 {
    3957  if(ptr != VMA_NULL)
    3958  {
    3959  for(size_t i = count; i--; )
    3960  {
    3961  ptr[i].~T();
    3962  }
    3963  VmaFree(pAllocationCallbacks, ptr);
    3964  }
    3965 }
    3966 
    3967 // STL-compatible allocator.
    3968 template<typename T>
    3969 class VmaStlAllocator
    3970 {
    3971 public:
    3972  const VkAllocationCallbacks* const m_pCallbacks;
    3973  typedef T value_type;
    3974 
    3975  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3976  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3977 
    3978  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3979  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3980 
    3981  template<typename U>
    3982  bool operator==(const VmaStlAllocator<U>& rhs) const
    3983  {
    3984  return m_pCallbacks == rhs.m_pCallbacks;
    3985  }
    3986  template<typename U>
    3987  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3988  {
    3989  return m_pCallbacks != rhs.m_pCallbacks;
    3990  }
    3991 
    3992  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3993 };
    3994 
    3995 #if VMA_USE_STL_VECTOR
    3996 
    3997 #define VmaVector std::vector
    3998 
    3999 template<typename T, typename allocatorT>
    4000 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    4001 {
    4002  vec.insert(vec.begin() + index, item);
    4003 }
    4004 
    4005 template<typename T, typename allocatorT>
    4006 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    4007 {
    4008  vec.erase(vec.begin() + index);
    4009 }
    4010 
    4011 #else // #if VMA_USE_STL_VECTOR
    4012 
    4013 /* Class with interface compatible with subset of std::vector.
    4014 T must be POD because constructors and destructors are not called and memcpy is
    4015 used for these objects. */
    4016 template<typename T, typename AllocatorT>
    4017 class VmaVector
    4018 {
    4019 public:
    4020  typedef T value_type;
    4021 
    4022  VmaVector(const AllocatorT& allocator) :
    4023  m_Allocator(allocator),
    4024  m_pArray(VMA_NULL),
    4025  m_Count(0),
    4026  m_Capacity(0)
    4027  {
    4028  }
    4029 
    4030  VmaVector(size_t count, const AllocatorT& allocator) :
    4031  m_Allocator(allocator),
    4032  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4033  m_Count(count),
    4034  m_Capacity(count)
    4035  {
    4036  }
    4037 
    4038  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4039  m_Allocator(src.m_Allocator),
    4040  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4041  m_Count(src.m_Count),
    4042  m_Capacity(src.m_Count)
    4043  {
    4044  if(m_Count != 0)
    4045  {
    4046  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4047  }
    4048  }
    4049 
    4050  ~VmaVector()
    4051  {
    4052  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4053  }
    4054 
    4055  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4056  {
    4057  if(&rhs != this)
    4058  {
    4059  resize(rhs.m_Count);
    4060  if(m_Count != 0)
    4061  {
    4062  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4063  }
    4064  }
    4065  return *this;
    4066  }
    4067 
    4068  bool empty() const { return m_Count == 0; }
    4069  size_t size() const { return m_Count; }
    4070  T* data() { return m_pArray; }
    4071  const T* data() const { return m_pArray; }
    4072 
    4073  T& operator[](size_t index)
    4074  {
    4075  VMA_HEAVY_ASSERT(index < m_Count);
    4076  return m_pArray[index];
    4077  }
    4078  const T& operator[](size_t index) const
    4079  {
    4080  VMA_HEAVY_ASSERT(index < m_Count);
    4081  return m_pArray[index];
    4082  }
    4083 
    4084  T& front()
    4085  {
    4086  VMA_HEAVY_ASSERT(m_Count > 0);
    4087  return m_pArray[0];
    4088  }
    4089  const T& front() const
    4090  {
    4091  VMA_HEAVY_ASSERT(m_Count > 0);
    4092  return m_pArray[0];
    4093  }
    4094  T& back()
    4095  {
    4096  VMA_HEAVY_ASSERT(m_Count > 0);
    4097  return m_pArray[m_Count - 1];
    4098  }
    4099  const T& back() const
    4100  {
    4101  VMA_HEAVY_ASSERT(m_Count > 0);
    4102  return m_pArray[m_Count - 1];
    4103  }
    4104 
    4105  void reserve(size_t newCapacity, bool freeMemory = false)
    4106  {
    4107  newCapacity = VMA_MAX(newCapacity, m_Count);
    4108 
    4109  if((newCapacity < m_Capacity) && !freeMemory)
    4110  {
    4111  newCapacity = m_Capacity;
    4112  }
    4113 
    4114  if(newCapacity != m_Capacity)
    4115  {
    4116  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4117  if(m_Count != 0)
    4118  {
    4119  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4120  }
    4121  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4122  m_Capacity = newCapacity;
    4123  m_pArray = newArray;
    4124  }
    4125  }
    4126 
    4127  void resize(size_t newCount, bool freeMemory = false)
    4128  {
    4129  size_t newCapacity = m_Capacity;
    4130  if(newCount > m_Capacity)
    4131  {
    4132  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4133  }
    4134  else if(freeMemory)
    4135  {
    4136  newCapacity = newCount;
    4137  }
    4138 
    4139  if(newCapacity != m_Capacity)
    4140  {
    4141  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4142  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4143  if(elementsToCopy != 0)
    4144  {
    4145  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4146  }
    4147  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4148  m_Capacity = newCapacity;
    4149  m_pArray = newArray;
    4150  }
    4151 
    4152  m_Count = newCount;
    4153  }
    4154 
    4155  void clear(bool freeMemory = false)
    4156  {
    4157  resize(0, freeMemory);
    4158  }
    4159 
    4160  void insert(size_t index, const T& src)
    4161  {
    4162  VMA_HEAVY_ASSERT(index <= m_Count);
    4163  const size_t oldCount = size();
    4164  resize(oldCount + 1);
    4165  if(index < oldCount)
    4166  {
    4167  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4168  }
    4169  m_pArray[index] = src;
    4170  }
    4171 
    4172  void remove(size_t index)
    4173  {
    4174  VMA_HEAVY_ASSERT(index < m_Count);
    4175  const size_t oldCount = size();
    4176  if(index < oldCount - 1)
    4177  {
    4178  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4179  }
    4180  resize(oldCount - 1);
    4181  }
    4182 
    4183  void push_back(const T& src)
    4184  {
    4185  const size_t newIndex = size();
    4186  resize(newIndex + 1);
    4187  m_pArray[newIndex] = src;
    4188  }
    4189 
    4190  void pop_back()
    4191  {
    4192  VMA_HEAVY_ASSERT(m_Count > 0);
    4193  resize(size() - 1);
    4194  }
    4195 
    4196  void push_front(const T& src)
    4197  {
    4198  insert(0, src);
    4199  }
    4200 
    4201  void pop_front()
    4202  {
    4203  VMA_HEAVY_ASSERT(m_Count > 0);
    4204  remove(0);
    4205  }
    4206 
    4207  typedef T* iterator;
    4208 
    4209  iterator begin() { return m_pArray; }
    4210  iterator end() { return m_pArray + m_Count; }
    4211 
    4212 private:
    4213  AllocatorT m_Allocator;
    4214  T* m_pArray;
    4215  size_t m_Count;
    4216  size_t m_Capacity;
    4217 };
    4218 
    4219 template<typename T, typename allocatorT>
    4220 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4221 {
    4222  vec.insert(index, item);
    4223 }
    4224 
    4225 template<typename T, typename allocatorT>
    4226 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4227 {
    4228  vec.remove(index);
    4229 }
    4230 
    4231 #endif // #if VMA_USE_STL_VECTOR
    4232 
    4233 template<typename CmpLess, typename VectorT>
    4234 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4235 {
    4236  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4237  vector.data(),
    4238  vector.data() + vector.size(),
    4239  value,
    4240  CmpLess()) - vector.data();
    4241  VmaVectorInsert(vector, indexToInsert, value);
    4242  return indexToInsert;
    4243 }
    4244 
    4245 template<typename CmpLess, typename VectorT>
    4246 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4247 {
    4248  CmpLess comparator;
    4249  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4250  vector.begin(),
    4251  vector.end(),
    4252  value,
    4253  comparator);
    4254  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4255  {
    4256  size_t indexToRemove = it - vector.begin();
    4257  VmaVectorRemove(vector, indexToRemove);
    4258  return true;
    4259  }
    4260  return false;
    4261 }
    4262 
    4264 // class VmaPoolAllocator
    4265 
    4266 /*
    4267 Allocator for objects of type T using a list of arrays (pools) to speed up
    4268 allocation. Number of elements that can be allocated is not bounded because
    4269 allocator can create multiple blocks.
    4270 */
    4271 template<typename T>
    4272 class VmaPoolAllocator
    4273 {
    4274  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4275 public:
    4276  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4277  ~VmaPoolAllocator();
    4278  void Clear();
    4279  T* Alloc();
    4280  void Free(T* ptr);
    4281 
    4282 private:
    4283  union Item
    4284  {
    4285  uint32_t NextFreeIndex;
    4286  T Value;
    4287  };
    4288 
    4289  struct ItemBlock
    4290  {
    4291  Item* pItems;
    4292  uint32_t Capacity;
    4293  uint32_t FirstFreeIndex;
    4294  };
    4295 
    4296  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4297  const uint32_t m_FirstBlockCapacity;
    4298  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4299 
    4300  ItemBlock& CreateNewBlock();
    4301 };
    4302 
    4303 template<typename T>
    4304 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4305  m_pAllocationCallbacks(pAllocationCallbacks),
    4306  m_FirstBlockCapacity(firstBlockCapacity),
    4307  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4308 {
    4309  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4310 }
    4311 
    4312 template<typename T>
    4313 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4314 {
    4315  Clear();
    4316 }
    4317 
    4318 template<typename T>
    4319 void VmaPoolAllocator<T>::Clear()
    4320 {
    4321  for(size_t i = m_ItemBlocks.size(); i--; )
    4322  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4323  m_ItemBlocks.clear();
    4324 }
    4325 
    4326 template<typename T>
    4327 T* VmaPoolAllocator<T>::Alloc()
    4328 {
    4329  for(size_t i = m_ItemBlocks.size(); i--; )
    4330  {
    4331  ItemBlock& block = m_ItemBlocks[i];
    4332  // This block has some free items: Use first one.
    4333  if(block.FirstFreeIndex != UINT32_MAX)
    4334  {
    4335  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4336  block.FirstFreeIndex = pItem->NextFreeIndex;
    4337  return &pItem->Value;
    4338  }
    4339  }
    4340 
    4341  // No block has free item: Create new one and use it.
    4342  ItemBlock& newBlock = CreateNewBlock();
    4343  Item* const pItem = &newBlock.pItems[0];
    4344  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4345  return &pItem->Value;
    4346 }
    4347 
    4348 template<typename T>
    4349 void VmaPoolAllocator<T>::Free(T* ptr)
    4350 {
    4351  // Search all memory blocks to find ptr.
    4352  for(size_t i = m_ItemBlocks.size(); i--; )
    4353  {
    4354  ItemBlock& block = m_ItemBlocks[i];
    4355 
    4356  // Casting to union.
    4357  Item* pItemPtr;
    4358  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4359 
    4360  // Check if pItemPtr is in address range of this block.
    4361  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4362  {
    4363  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4364  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4365  block.FirstFreeIndex = index;
    4366  return;
    4367  }
    4368  }
    4369  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4370 }
    4371 
    4372 template<typename T>
    4373 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4374 {
    4375  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4376  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4377 
    4378  const ItemBlock newBlock = {
    4379  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4380  newBlockCapacity,
    4381  0 };
    4382 
    4383  m_ItemBlocks.push_back(newBlock);
    4384 
    4385  // Setup singly-linked list of all free items in this block.
    4386  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4387  newBlock.pItems[i].NextFreeIndex = i + 1;
    4388  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4389  return m_ItemBlocks.back();
    4390 }
    4391 
    4393 // class VmaRawList, VmaList
    4394 
    4395 #if VMA_USE_STL_LIST
    4396 
    4397 #define VmaList std::list
    4398 
    4399 #else // #if VMA_USE_STL_LIST
    4400 
    4401 template<typename T>
    4402 struct VmaListItem
    4403 {
    4404  VmaListItem* pPrev;
    4405  VmaListItem* pNext;
    4406  T Value;
    4407 };
    4408 
    4409 // Doubly linked list.
    4410 template<typename T>
    4411 class VmaRawList
    4412 {
    4413  VMA_CLASS_NO_COPY(VmaRawList)
    4414 public:
    4415  typedef VmaListItem<T> ItemType;
    4416 
    4417  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4418  ~VmaRawList();
    4419  void Clear();
    4420 
    4421  size_t GetCount() const { return m_Count; }
    4422  bool IsEmpty() const { return m_Count == 0; }
    4423 
    4424  ItemType* Front() { return m_pFront; }
    4425  const ItemType* Front() const { return m_pFront; }
    4426  ItemType* Back() { return m_pBack; }
    4427  const ItemType* Back() const { return m_pBack; }
    4428 
    4429  ItemType* PushBack();
    4430  ItemType* PushFront();
    4431  ItemType* PushBack(const T& value);
    4432  ItemType* PushFront(const T& value);
    4433  void PopBack();
    4434  void PopFront();
    4435 
    4436  // Item can be null - it means PushBack.
    4437  ItemType* InsertBefore(ItemType* pItem);
    4438  // Item can be null - it means PushFront.
    4439  ItemType* InsertAfter(ItemType* pItem);
    4440 
    4441  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4442  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4443 
    4444  void Remove(ItemType* pItem);
    4445 
    4446 private:
    4447  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4448  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4449  ItemType* m_pFront;
    4450  ItemType* m_pBack;
    4451  size_t m_Count;
    4452 };
    4453 
    4454 template<typename T>
    4455 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4456  m_pAllocationCallbacks(pAllocationCallbacks),
    4457  m_ItemAllocator(pAllocationCallbacks, 128),
    4458  m_pFront(VMA_NULL),
    4459  m_pBack(VMA_NULL),
    4460  m_Count(0)
    4461 {
    4462 }
    4463 
    4464 template<typename T>
    4465 VmaRawList<T>::~VmaRawList()
    4466 {
    4467  // Intentionally not calling Clear, because that would be unnecessary
    4468  // computations to return all items to m_ItemAllocator as free.
    4469 }
    4470 
    4471 template<typename T>
    4472 void VmaRawList<T>::Clear()
    4473 {
    4474  if(IsEmpty() == false)
    4475  {
    4476  ItemType* pItem = m_pBack;
    4477  while(pItem != VMA_NULL)
    4478  {
    4479  ItemType* const pPrevItem = pItem->pPrev;
    4480  m_ItemAllocator.Free(pItem);
    4481  pItem = pPrevItem;
    4482  }
    4483  m_pFront = VMA_NULL;
    4484  m_pBack = VMA_NULL;
    4485  m_Count = 0;
    4486  }
    4487 }
    4488 
    4489 template<typename T>
    4490 VmaListItem<T>* VmaRawList<T>::PushBack()
    4491 {
    4492  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4493  pNewItem->pNext = VMA_NULL;
    4494  if(IsEmpty())
    4495  {
    4496  pNewItem->pPrev = VMA_NULL;
    4497  m_pFront = pNewItem;
    4498  m_pBack = pNewItem;
    4499  m_Count = 1;
    4500  }
    4501  else
    4502  {
    4503  pNewItem->pPrev = m_pBack;
    4504  m_pBack->pNext = pNewItem;
    4505  m_pBack = pNewItem;
    4506  ++m_Count;
    4507  }
    4508  return pNewItem;
    4509 }
    4510 
    4511 template<typename T>
    4512 VmaListItem<T>* VmaRawList<T>::PushFront()
    4513 {
    4514  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4515  pNewItem->pPrev = VMA_NULL;
    4516  if(IsEmpty())
    4517  {
    4518  pNewItem->pNext = VMA_NULL;
    4519  m_pFront = pNewItem;
    4520  m_pBack = pNewItem;
    4521  m_Count = 1;
    4522  }
    4523  else
    4524  {
    4525  pNewItem->pNext = m_pFront;
    4526  m_pFront->pPrev = pNewItem;
    4527  m_pFront = pNewItem;
    4528  ++m_Count;
    4529  }
    4530  return pNewItem;
    4531 }
    4532 
    4533 template<typename T>
    4534 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4535 {
    4536  ItemType* const pNewItem = PushBack();
    4537  pNewItem->Value = value;
    4538  return pNewItem;
    4539 }
    4540 
    4541 template<typename T>
    4542 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4543 {
    4544  ItemType* const pNewItem = PushFront();
    4545  pNewItem->Value = value;
    4546  return pNewItem;
    4547 }
    4548 
    4549 template<typename T>
    4550 void VmaRawList<T>::PopBack()
    4551 {
    4552  VMA_HEAVY_ASSERT(m_Count > 0);
    4553  ItemType* const pBackItem = m_pBack;
    4554  ItemType* const pPrevItem = pBackItem->pPrev;
    4555  if(pPrevItem != VMA_NULL)
    4556  {
    4557  pPrevItem->pNext = VMA_NULL;
    4558  }
    4559  m_pBack = pPrevItem;
    4560  m_ItemAllocator.Free(pBackItem);
    4561  --m_Count;
    4562 }
    4563 
    4564 template<typename T>
    4565 void VmaRawList<T>::PopFront()
    4566 {
    4567  VMA_HEAVY_ASSERT(m_Count > 0);
    4568  ItemType* const pFrontItem = m_pFront;
    4569  ItemType* const pNextItem = pFrontItem->pNext;
    4570  if(pNextItem != VMA_NULL)
    4571  {
    4572  pNextItem->pPrev = VMA_NULL;
    4573  }
    4574  m_pFront = pNextItem;
    4575  m_ItemAllocator.Free(pFrontItem);
    4576  --m_Count;
    4577 }
    4578 
    4579 template<typename T>
    4580 void VmaRawList<T>::Remove(ItemType* pItem)
    4581 {
    4582  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4583  VMA_HEAVY_ASSERT(m_Count > 0);
    4584 
    4585  if(pItem->pPrev != VMA_NULL)
    4586  {
    4587  pItem->pPrev->pNext = pItem->pNext;
    4588  }
    4589  else
    4590  {
    4591  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4592  m_pFront = pItem->pNext;
    4593  }
    4594 
    4595  if(pItem->pNext != VMA_NULL)
    4596  {
    4597  pItem->pNext->pPrev = pItem->pPrev;
    4598  }
    4599  else
    4600  {
    4601  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4602  m_pBack = pItem->pPrev;
    4603  }
    4604 
    4605  m_ItemAllocator.Free(pItem);
    4606  --m_Count;
    4607 }
    4608 
    4609 template<typename T>
    4610 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4611 {
    4612  if(pItem != VMA_NULL)
    4613  {
    4614  ItemType* const prevItem = pItem->pPrev;
    4615  ItemType* const newItem = m_ItemAllocator.Alloc();
    4616  newItem->pPrev = prevItem;
    4617  newItem->pNext = pItem;
    4618  pItem->pPrev = newItem;
    4619  if(prevItem != VMA_NULL)
    4620  {
    4621  prevItem->pNext = newItem;
    4622  }
    4623  else
    4624  {
    4625  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4626  m_pFront = newItem;
    4627  }
    4628  ++m_Count;
    4629  return newItem;
    4630  }
    4631  else
    4632  return PushBack();
    4633 }
    4634 
    4635 template<typename T>
    4636 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4637 {
    4638  if(pItem != VMA_NULL)
    4639  {
    4640  ItemType* const nextItem = pItem->pNext;
    4641  ItemType* const newItem = m_ItemAllocator.Alloc();
    4642  newItem->pNext = nextItem;
    4643  newItem->pPrev = pItem;
    4644  pItem->pNext = newItem;
    4645  if(nextItem != VMA_NULL)
    4646  {
    4647  nextItem->pPrev = newItem;
    4648  }
    4649  else
    4650  {
    4651  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4652  m_pBack = newItem;
    4653  }
    4654  ++m_Count;
    4655  return newItem;
    4656  }
    4657  else
    4658  return PushFront();
    4659 }
    4660 
    4661 template<typename T>
    4662 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4663 {
    4664  ItemType* const newItem = InsertBefore(pItem);
    4665  newItem->Value = value;
    4666  return newItem;
    4667 }
    4668 
    4669 template<typename T>
    4670 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4671 {
    4672  ItemType* const newItem = InsertAfter(pItem);
    4673  newItem->Value = value;
    4674  return newItem;
    4675 }
    4676 
    4677 template<typename T, typename AllocatorT>
    4678 class VmaList
    4679 {
    4680  VMA_CLASS_NO_COPY(VmaList)
    4681 public:
    4682  class iterator
    4683  {
    4684  public:
    4685  iterator() :
    4686  m_pList(VMA_NULL),
    4687  m_pItem(VMA_NULL)
    4688  {
    4689  }
    4690 
    4691  T& operator*() const
    4692  {
    4693  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4694  return m_pItem->Value;
    4695  }
    4696  T* operator->() const
    4697  {
    4698  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4699  return &m_pItem->Value;
    4700  }
    4701 
    4702  iterator& operator++()
    4703  {
    4704  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4705  m_pItem = m_pItem->pNext;
    4706  return *this;
    4707  }
    4708  iterator& operator--()
    4709  {
    4710  if(m_pItem != VMA_NULL)
    4711  {
    4712  m_pItem = m_pItem->pPrev;
    4713  }
    4714  else
    4715  {
    4716  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4717  m_pItem = m_pList->Back();
    4718  }
    4719  return *this;
    4720  }
    4721 
    4722  iterator operator++(int)
    4723  {
    4724  iterator result = *this;
    4725  ++*this;
    4726  return result;
    4727  }
    4728  iterator operator--(int)
    4729  {
    4730  iterator result = *this;
    4731  --*this;
    4732  return result;
    4733  }
    4734 
    4735  bool operator==(const iterator& rhs) const
    4736  {
    4737  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4738  return m_pItem == rhs.m_pItem;
    4739  }
    4740  bool operator!=(const iterator& rhs) const
    4741  {
    4742  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4743  return m_pItem != rhs.m_pItem;
    4744  }
    4745 
    4746  private:
    4747  VmaRawList<T>* m_pList;
    4748  VmaListItem<T>* m_pItem;
    4749 
    4750  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4751  m_pList(pList),
    4752  m_pItem(pItem)
    4753  {
    4754  }
    4755 
    4756  friend class VmaList<T, AllocatorT>;
    4757  };
    4758 
    4759  class const_iterator
    4760  {
    4761  public:
    4762  const_iterator() :
    4763  m_pList(VMA_NULL),
    4764  m_pItem(VMA_NULL)
    4765  {
    4766  }
    4767 
    4768  const_iterator(const iterator& src) :
    4769  m_pList(src.m_pList),
    4770  m_pItem(src.m_pItem)
    4771  {
    4772  }
    4773 
    4774  const T& operator*() const
    4775  {
    4776  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4777  return m_pItem->Value;
    4778  }
    4779  const T* operator->() const
    4780  {
    4781  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4782  return &m_pItem->Value;
    4783  }
    4784 
    4785  const_iterator& operator++()
    4786  {
    4787  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4788  m_pItem = m_pItem->pNext;
    4789  return *this;
    4790  }
    4791  const_iterator& operator--()
    4792  {
    4793  if(m_pItem != VMA_NULL)
    4794  {
    4795  m_pItem = m_pItem->pPrev;
    4796  }
    4797  else
    4798  {
    4799  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4800  m_pItem = m_pList->Back();
    4801  }
    4802  return *this;
    4803  }
    4804 
    4805  const_iterator operator++(int)
    4806  {
    4807  const_iterator result = *this;
    4808  ++*this;
    4809  return result;
    4810  }
    4811  const_iterator operator--(int)
    4812  {
    4813  const_iterator result = *this;
    4814  --*this;
    4815  return result;
    4816  }
    4817 
    4818  bool operator==(const const_iterator& rhs) const
    4819  {
    4820  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4821  return m_pItem == rhs.m_pItem;
    4822  }
    4823  bool operator!=(const const_iterator& rhs) const
    4824  {
    4825  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4826  return m_pItem != rhs.m_pItem;
    4827  }
    4828 
    4829  private:
    4830  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4831  m_pList(pList),
    4832  m_pItem(pItem)
    4833  {
    4834  }
    4835 
    4836  const VmaRawList<T>* m_pList;
    4837  const VmaListItem<T>* m_pItem;
    4838 
    4839  friend class VmaList<T, AllocatorT>;
    4840  };
    4841 
    4842  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4843 
    4844  bool empty() const { return m_RawList.IsEmpty(); }
    4845  size_t size() const { return m_RawList.GetCount(); }
    4846 
    4847  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4848  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4849 
    4850  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4851  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4852 
    4853  void clear() { m_RawList.Clear(); }
    4854  void push_back(const T& value) { m_RawList.PushBack(value); }
    4855  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4856  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4857 
    4858 private:
    4859  VmaRawList<T> m_RawList;
    4860 };
    4861 
    4862 #endif // #if VMA_USE_STL_LIST
    4863 
    4865 // class VmaMap
    4866 
    4867 // Unused in this version.
    4868 #if 0
    4869 
    4870 #if VMA_USE_STL_UNORDERED_MAP
    4871 
    4872 #define VmaPair std::pair
    4873 
    4874 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4875  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4876 
    4877 #else // #if VMA_USE_STL_UNORDERED_MAP
    4878 
    4879 template<typename T1, typename T2>
    4880 struct VmaPair
    4881 {
    4882  T1 first;
    4883  T2 second;
    4884 
    4885  VmaPair() : first(), second() { }
    4886  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4887 };
    4888 
    4889 /* Class compatible with subset of interface of std::unordered_map.
    4890 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4891 */
    4892 template<typename KeyT, typename ValueT>
    4893 class VmaMap
    4894 {
    4895 public:
    4896  typedef VmaPair<KeyT, ValueT> PairType;
    4897  typedef PairType* iterator;
    4898 
    4899  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4900 
    4901  iterator begin() { return m_Vector.begin(); }
    4902  iterator end() { return m_Vector.end(); }
    4903 
    4904  void insert(const PairType& pair);
    4905  iterator find(const KeyT& key);
    4906  void erase(iterator it);
    4907 
    4908 private:
    4909  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4910 };
    4911 
    4912 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4913 
    4914 template<typename FirstT, typename SecondT>
    4915 struct VmaPairFirstLess
    4916 {
    4917  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4918  {
    4919  return lhs.first < rhs.first;
    4920  }
    4921  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4922  {
    4923  return lhs.first < rhsFirst;
    4924  }
    4925 };
    4926 
    4927 template<typename KeyT, typename ValueT>
    4928 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4929 {
    4930  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4931  m_Vector.data(),
    4932  m_Vector.data() + m_Vector.size(),
    4933  pair,
    4934  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4935  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4936 }
    4937 
    4938 template<typename KeyT, typename ValueT>
    4939 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4940 {
    4941  PairType* it = VmaBinaryFindFirstNotLess(
    4942  m_Vector.data(),
    4943  m_Vector.data() + m_Vector.size(),
    4944  key,
    4945  VmaPairFirstLess<KeyT, ValueT>());
    4946  if((it != m_Vector.end()) && (it->first == key))
    4947  {
    4948  return it;
    4949  }
    4950  else
    4951  {
    4952  return m_Vector.end();
    4953  }
    4954 }
    4955 
    4956 template<typename KeyT, typename ValueT>
    4957 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4958 {
    4959  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4960 }
    4961 
    4962 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4963 
    4964 #endif // #if 0
    4965 
    4967 
    4968 class VmaDeviceMemoryBlock;
    4969 
    4970 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4971 
    4972 struct VmaAllocation_T
    4973 {
    4974 private:
    4975  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4976 
    4977  enum FLAGS
    4978  {
    4979  FLAG_USER_DATA_STRING = 0x01,
    4980  };
    4981 
    4982 public:
    4983  enum ALLOCATION_TYPE
    4984  {
    4985  ALLOCATION_TYPE_NONE,
    4986  ALLOCATION_TYPE_BLOCK,
    4987  ALLOCATION_TYPE_DEDICATED,
    4988  };
    4989 
    4990  /*
    4991  This struct cannot have constructor or destructor. It must be POD because it is
    4992  allocated using VmaPoolAllocator.
    4993  */
    4994 
    4995  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    4996  {
    4997  m_Alignment = 1;
    4998  m_Size = 0;
    4999  m_pUserData = VMA_NULL;
    5000  m_LastUseFrameIndex = currentFrameIndex;
    5001  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    5002  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    5003  m_MapCount = 0;
    5004  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    5005 
    5006 #if VMA_STATS_STRING_ENABLED
    5007  m_CreationFrameIndex = currentFrameIndex;
    5008  m_BufferImageUsage = 0;
    5009 #endif
    5010  }
    5011 
    5012  void Dtor()
    5013  {
    5014  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5015 
    5016  // Check if owned string was freed.
    5017  VMA_ASSERT(m_pUserData == VMA_NULL);
    5018  }
    5019 
    5020  void InitBlockAllocation(
    5021  VmaDeviceMemoryBlock* block,
    5022  VkDeviceSize offset,
    5023  VkDeviceSize alignment,
    5024  VkDeviceSize size,
    5025  VmaSuballocationType suballocationType,
    5026  bool mapped,
    5027  bool canBecomeLost)
    5028  {
    5029  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5030  VMA_ASSERT(block != VMA_NULL);
    5031  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5032  m_Alignment = alignment;
    5033  m_Size = size;
    5034  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5035  m_SuballocationType = (uint8_t)suballocationType;
    5036  m_BlockAllocation.m_Block = block;
    5037  m_BlockAllocation.m_Offset = offset;
    5038  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5039  }
    5040 
    5041  void InitLost()
    5042  {
    5043  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5044  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5045  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5046  m_BlockAllocation.m_Block = VMA_NULL;
    5047  m_BlockAllocation.m_Offset = 0;
    5048  m_BlockAllocation.m_CanBecomeLost = true;
    5049  }
    5050 
    5051  void ChangeBlockAllocation(
    5052  VmaAllocator hAllocator,
    5053  VmaDeviceMemoryBlock* block,
    5054  VkDeviceSize offset);
    5055 
    5056  void ChangeOffset(VkDeviceSize newOffset);
    5057 
    5058  // pMappedData not null means allocation is created with MAPPED flag.
    5059  void InitDedicatedAllocation(
    5060  uint32_t memoryTypeIndex,
    5061  VkDeviceMemory hMemory,
    5062  VmaSuballocationType suballocationType,
    5063  void* pMappedData,
    5064  VkDeviceSize size)
    5065  {
    5066  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5067  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5068  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5069  m_Alignment = 0;
    5070  m_Size = size;
    5071  m_SuballocationType = (uint8_t)suballocationType;
    5072  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5073  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5074  m_DedicatedAllocation.m_hMemory = hMemory;
    5075  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5076  }
    5077 
    5078  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5079  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5080  VkDeviceSize GetSize() const { return m_Size; }
    5081  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5082  void* GetUserData() const { return m_pUserData; }
    5083  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5084  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5085 
    5086  VmaDeviceMemoryBlock* GetBlock() const
    5087  {
    5088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5089  return m_BlockAllocation.m_Block;
    5090  }
    5091  VkDeviceSize GetOffset() const;
    5092  VkDeviceMemory GetMemory() const;
    5093  uint32_t GetMemoryTypeIndex() const;
    5094  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5095  void* GetMappedData() const;
    5096  bool CanBecomeLost() const;
    5097 
    5098  uint32_t GetLastUseFrameIndex() const
    5099  {
    5100  return m_LastUseFrameIndex.load();
    5101  }
    5102  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5103  {
    5104  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5105  }
    5106  /*
    5107  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5108  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5109  - Else, returns false.
    5110 
    5111  If hAllocation is already lost, assert - you should not call it then.
    5112  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5113  */
    5114  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5115 
    5116  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5117  {
    5118  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5119  outInfo.blockCount = 1;
    5120  outInfo.allocationCount = 1;
    5121  outInfo.unusedRangeCount = 0;
    5122  outInfo.usedBytes = m_Size;
    5123  outInfo.unusedBytes = 0;
    5124  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5125  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5126  outInfo.unusedRangeSizeMax = 0;
    5127  }
    5128 
    5129  void BlockAllocMap();
    5130  void BlockAllocUnmap();
    5131  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5132  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5133 
    5134 #if VMA_STATS_STRING_ENABLED
    5135  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5136  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5137 
    5138  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5139  {
    5140  VMA_ASSERT(m_BufferImageUsage == 0);
    5141  m_BufferImageUsage = bufferImageUsage;
    5142  }
    5143 
    5144  void PrintParameters(class VmaJsonWriter& json) const;
    5145 #endif
    5146 
    5147 private:
    5148  VkDeviceSize m_Alignment;
    5149  VkDeviceSize m_Size;
    5150  void* m_pUserData;
    5151  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5152  uint8_t m_Type; // ALLOCATION_TYPE
    5153  uint8_t m_SuballocationType; // VmaSuballocationType
    5154  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5155  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5156  uint8_t m_MapCount;
    5157  uint8_t m_Flags; // enum FLAGS
    5158 
    5159  // Allocation out of VmaDeviceMemoryBlock.
    5160  struct BlockAllocation
    5161  {
    5162  VmaDeviceMemoryBlock* m_Block;
    5163  VkDeviceSize m_Offset;
    5164  bool m_CanBecomeLost;
    5165  };
    5166 
    5167  // Allocation for an object that has its own private VkDeviceMemory.
    5168  struct DedicatedAllocation
    5169  {
    5170  uint32_t m_MemoryTypeIndex;
    5171  VkDeviceMemory m_hMemory;
    5172  void* m_pMappedData; // Not null means memory is mapped.
    5173  };
    5174 
    5175  union
    5176  {
    5177  // Allocation out of VmaDeviceMemoryBlock.
    5178  BlockAllocation m_BlockAllocation;
    5179  // Allocation for an object that has its own private VkDeviceMemory.
    5180  DedicatedAllocation m_DedicatedAllocation;
    5181  };
    5182 
    5183 #if VMA_STATS_STRING_ENABLED
    5184  uint32_t m_CreationFrameIndex;
    5185  uint32_t m_BufferImageUsage; // 0 if unknown.
    5186 #endif
    5187 
    5188  void FreeUserDataString(VmaAllocator hAllocator);
    5189 };
    5190 
    5191 /*
    5192 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5193 allocated memory block or free.
    5194 */
    5195 struct VmaSuballocation
    5196 {
    5197  VkDeviceSize offset;
    5198  VkDeviceSize size;
    5199  VmaAllocation hAllocation;
    5200  VmaSuballocationType type;
    5201 };
    5202 
    5203 // Comparator for offsets.
    5204 struct VmaSuballocationOffsetLess
    5205 {
    5206  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5207  {
    5208  return lhs.offset < rhs.offset;
    5209  }
    5210 };
    5211 struct VmaSuballocationOffsetGreater
    5212 {
    5213  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5214  {
    5215  return lhs.offset > rhs.offset;
    5216  }
    5217 };
    5218 
    5219 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5220 
    5221 // Cost of one additional allocation lost, as equivalent in bytes.
    5222 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5223 
    5224 enum class VmaAllocationRequestType
    5225 {
    5226  Normal,
    5227  // Used by "Linear" algorithm.
    5228  UpperAddress,
    5229  EndOf1st,
    5230  EndOf2nd,
    5231 };
    5232 
    5233 /*
    5234 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5235 
    5236 If canMakeOtherLost was false:
    5237 - item points to a FREE suballocation.
    5238 - itemsToMakeLostCount is 0.
    5239 
    5240 If canMakeOtherLost was true:
    5241 - item points to first of sequence of suballocations, which are either FREE,
    5242  or point to VmaAllocations that can become lost.
    5243 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5244  the requested allocation to succeed.
    5245 */
    5246 struct VmaAllocationRequest
    5247 {
    5248  VkDeviceSize offset;
    5249  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5250  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5251  VmaSuballocationList::iterator item;
    5252  size_t itemsToMakeLostCount;
    5253  void* customData;
    5254  VmaAllocationRequestType type;
    5255 
    5256  VkDeviceSize CalcCost() const
    5257  {
    5258  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5259  }
    5260 };
    5261 
    5262 /*
    5263 Data structure used for bookkeeping of allocations and unused ranges of memory
    5264 in a single VkDeviceMemory block.
    5265 */
    5266 class VmaBlockMetadata
    5267 {
    5268 public:
    5269  VmaBlockMetadata(VmaAllocator hAllocator);
    5270  virtual ~VmaBlockMetadata() { }
    5271  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5272 
    5273  // Validates all data structures inside this object. If not valid, returns false.
    5274  virtual bool Validate() const = 0;
    5275  VkDeviceSize GetSize() const { return m_Size; }
    5276  virtual size_t GetAllocationCount() const = 0;
    5277  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5278  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5279  // Returns true if this block is empty - contains only single free suballocation.
    5280  virtual bool IsEmpty() const = 0;
    5281 
    5282  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5283  // Shouldn't modify blockCount.
    5284  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5285 
    5286 #if VMA_STATS_STRING_ENABLED
    5287  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5288 #endif
    5289 
    5290  // Tries to find a place for suballocation with given parameters inside this block.
    5291  // If succeeded, fills pAllocationRequest and returns true.
    5292  // If failed, returns false.
    5293  virtual bool CreateAllocationRequest(
    5294  uint32_t currentFrameIndex,
    5295  uint32_t frameInUseCount,
    5296  VkDeviceSize bufferImageGranularity,
    5297  VkDeviceSize allocSize,
    5298  VkDeviceSize allocAlignment,
    5299  bool upperAddress,
    5300  VmaSuballocationType allocType,
    5301  bool canMakeOtherLost,
    5302  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5303  uint32_t strategy,
    5304  VmaAllocationRequest* pAllocationRequest) = 0;
    5305 
    5306  virtual bool MakeRequestedAllocationsLost(
    5307  uint32_t currentFrameIndex,
    5308  uint32_t frameInUseCount,
    5309  VmaAllocationRequest* pAllocationRequest) = 0;
    5310 
    5311  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5312 
    5313  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5314 
    5315  // Makes actual allocation based on request. Request must already be checked and valid.
    5316  virtual void Alloc(
    5317  const VmaAllocationRequest& request,
    5318  VmaSuballocationType type,
    5319  VkDeviceSize allocSize,
    5320  VmaAllocation hAllocation) = 0;
    5321 
    5322  // Frees suballocation assigned to given memory region.
    5323  virtual void Free(const VmaAllocation allocation) = 0;
    5324  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5325 
    5326 protected:
    5327  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5328 
    5329 #if VMA_STATS_STRING_ENABLED
    5330  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5331  VkDeviceSize unusedBytes,
    5332  size_t allocationCount,
    5333  size_t unusedRangeCount) const;
    5334  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5335  VkDeviceSize offset,
    5336  VmaAllocation hAllocation) const;
    5337  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5338  VkDeviceSize offset,
    5339  VkDeviceSize size) const;
    5340  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5341 #endif
    5342 
    5343 private:
    5344  VkDeviceSize m_Size;
    5345  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5346 };
    5347 
    5348 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5349  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5350  return false; \
    5351  } } while(false)
    5352 
    5353 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5354 {
    5355  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5356 public:
    5357  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5358  virtual ~VmaBlockMetadata_Generic();
    5359  virtual void Init(VkDeviceSize size);
    5360 
    5361  virtual bool Validate() const;
    5362  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5363  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5364  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5365  virtual bool IsEmpty() const;
    5366 
    5367  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5368  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5369 
    5370 #if VMA_STATS_STRING_ENABLED
    5371  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5372 #endif
    5373 
    5374  virtual bool CreateAllocationRequest(
    5375  uint32_t currentFrameIndex,
    5376  uint32_t frameInUseCount,
    5377  VkDeviceSize bufferImageGranularity,
    5378  VkDeviceSize allocSize,
    5379  VkDeviceSize allocAlignment,
    5380  bool upperAddress,
    5381  VmaSuballocationType allocType,
    5382  bool canMakeOtherLost,
    5383  uint32_t strategy,
    5384  VmaAllocationRequest* pAllocationRequest);
    5385 
    5386  virtual bool MakeRequestedAllocationsLost(
    5387  uint32_t currentFrameIndex,
    5388  uint32_t frameInUseCount,
    5389  VmaAllocationRequest* pAllocationRequest);
    5390 
    5391  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5392 
    5393  virtual VkResult CheckCorruption(const void* pBlockData);
    5394 
    5395  virtual void Alloc(
    5396  const VmaAllocationRequest& request,
    5397  VmaSuballocationType type,
    5398  VkDeviceSize allocSize,
    5399  VmaAllocation hAllocation);
    5400 
    5401  virtual void Free(const VmaAllocation allocation);
    5402  virtual void FreeAtOffset(VkDeviceSize offset);
    5403 
    5405  // For defragmentation
    5406 
    5407  bool IsBufferImageGranularityConflictPossible(
    5408  VkDeviceSize bufferImageGranularity,
    5409  VmaSuballocationType& inOutPrevSuballocType) const;
    5410 
    5411 private:
    5412  friend class VmaDefragmentationAlgorithm_Generic;
    5413  friend class VmaDefragmentationAlgorithm_Fast;
    5414 
    5415  uint32_t m_FreeCount;
    5416  VkDeviceSize m_SumFreeSize;
    5417  VmaSuballocationList m_Suballocations;
    5418  // Suballocations that are free and have size greater than certain threshold.
    5419  // Sorted by size, ascending.
    5420  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5421 
    5422  bool ValidateFreeSuballocationList() const;
    5423 
    5424  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5425  // If yes, fills pOffset and returns true. If no, returns false.
    5426  bool CheckAllocation(
    5427  uint32_t currentFrameIndex,
    5428  uint32_t frameInUseCount,
    5429  VkDeviceSize bufferImageGranularity,
    5430  VkDeviceSize allocSize,
    5431  VkDeviceSize allocAlignment,
    5432  VmaSuballocationType allocType,
    5433  VmaSuballocationList::const_iterator suballocItem,
    5434  bool canMakeOtherLost,
    5435  VkDeviceSize* pOffset,
    5436  size_t* itemsToMakeLostCount,
    5437  VkDeviceSize* pSumFreeSize,
    5438  VkDeviceSize* pSumItemSize) const;
    5439  // Given free suballocation, it merges it with following one, which must also be free.
    5440  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5441  // Releases given suballocation, making it free.
    5442  // Merges it with adjacent free suballocations if applicable.
    5443  // Returns iterator to new free suballocation at this place.
    5444  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5445  // Given free suballocation, it inserts it into sorted list of
    5446  // m_FreeSuballocationsBySize if it's suitable.
    5447  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5448  // Given free suballocation, it removes it from sorted list of
    5449  // m_FreeSuballocationsBySize if it's suitable.
    5450  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5451 };
    5452 
    5453 /*
    5454 Allocations and their references in internal data structure look like this:
    5455 
    5456 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5457 
    5458  0 +-------+
    5459  | |
    5460  | |
    5461  | |
    5462  +-------+
    5463  | Alloc | 1st[m_1stNullItemsBeginCount]
    5464  +-------+
    5465  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5466  +-------+
    5467  | ... |
    5468  +-------+
    5469  | Alloc | 1st[1st.size() - 1]
    5470  +-------+
    5471  | |
    5472  | |
    5473  | |
    5474 GetSize() +-------+
    5475 
    5476 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5477 
    5478  0 +-------+
    5479  | Alloc | 2nd[0]
    5480  +-------+
    5481  | Alloc | 2nd[1]
    5482  +-------+
    5483  | ... |
    5484  +-------+
    5485  | Alloc | 2nd[2nd.size() - 1]
    5486  +-------+
    5487  | |
    5488  | |
    5489  | |
    5490  +-------+
    5491  | Alloc | 1st[m_1stNullItemsBeginCount]
    5492  +-------+
    5493  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5494  +-------+
    5495  | ... |
    5496  +-------+
    5497  | Alloc | 1st[1st.size() - 1]
    5498  +-------+
    5499  | |
    5500 GetSize() +-------+
    5501 
    5502 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5503 
    5504  0 +-------+
    5505  | |
    5506  | |
    5507  | |
    5508  +-------+
    5509  | Alloc | 1st[m_1stNullItemsBeginCount]
    5510  +-------+
    5511  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5512  +-------+
    5513  | ... |
    5514  +-------+
    5515  | Alloc | 1st[1st.size() - 1]
    5516  +-------+
    5517  | |
    5518  | |
    5519  | |
    5520  +-------+
    5521  | Alloc | 2nd[2nd.size() - 1]
    5522  +-------+
    5523  | ... |
    5524  +-------+
    5525  | Alloc | 2nd[1]
    5526  +-------+
    5527  | Alloc | 2nd[0]
    5528 GetSize() +-------+
    5529 
    5530 */
    5531 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5532 {
    5533  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5534 public:
    5535  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5536  virtual ~VmaBlockMetadata_Linear();
    5537  virtual void Init(VkDeviceSize size);
    5538 
    5539  virtual bool Validate() const;
    5540  virtual size_t GetAllocationCount() const;
    5541  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5542  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5543  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5544 
    5545  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5546  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5547 
    5548 #if VMA_STATS_STRING_ENABLED
    5549  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5550 #endif
    5551 
    5552  virtual bool CreateAllocationRequest(
    5553  uint32_t currentFrameIndex,
    5554  uint32_t frameInUseCount,
    5555  VkDeviceSize bufferImageGranularity,
    5556  VkDeviceSize allocSize,
    5557  VkDeviceSize allocAlignment,
    5558  bool upperAddress,
    5559  VmaSuballocationType allocType,
    5560  bool canMakeOtherLost,
    5561  uint32_t strategy,
    5562  VmaAllocationRequest* pAllocationRequest);
    5563 
    5564  virtual bool MakeRequestedAllocationsLost(
    5565  uint32_t currentFrameIndex,
    5566  uint32_t frameInUseCount,
    5567  VmaAllocationRequest* pAllocationRequest);
    5568 
    5569  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5570 
    5571  virtual VkResult CheckCorruption(const void* pBlockData);
    5572 
    5573  virtual void Alloc(
    5574  const VmaAllocationRequest& request,
    5575  VmaSuballocationType type,
    5576  VkDeviceSize allocSize,
    5577  VmaAllocation hAllocation);
    5578 
    5579  virtual void Free(const VmaAllocation allocation);
    5580  virtual void FreeAtOffset(VkDeviceSize offset);
    5581 
    5582 private:
    5583  /*
    5584  There are two suballocation vectors, used in ping-pong way.
    5585  The one with index m_1stVectorIndex is called 1st.
    5586  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5587  2nd can be non-empty only when 1st is not empty.
    5588  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5589  */
    5590  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5591 
    5592  enum SECOND_VECTOR_MODE
    5593  {
    5594  SECOND_VECTOR_EMPTY,
    5595  /*
    5596  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5597  all have smaller offset.
    5598  */
    5599  SECOND_VECTOR_RING_BUFFER,
    5600  /*
    5601  Suballocations in 2nd vector are upper side of double stack.
    5602  They all have offsets higher than those in 1st vector.
    5603  Top of this stack means smaller offsets, but higher indices in this vector.
    5604  */
    5605  SECOND_VECTOR_DOUBLE_STACK,
    5606  };
    5607 
    5608  VkDeviceSize m_SumFreeSize;
    5609  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5610  uint32_t m_1stVectorIndex;
    5611  SECOND_VECTOR_MODE m_2ndVectorMode;
    5612 
    5613  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5614  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5615  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5616  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5617 
    5618  // Number of items in 1st vector with hAllocation = null at the beginning.
    5619  size_t m_1stNullItemsBeginCount;
    5620  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5621  size_t m_1stNullItemsMiddleCount;
    5622  // Number of items in 2nd vector with hAllocation = null.
    5623  size_t m_2ndNullItemsCount;
    5624 
    5625  bool ShouldCompact1st() const;
    5626  void CleanupAfterFree();
    5627 
    5628  bool CreateAllocationRequest_LowerAddress(
    5629  uint32_t currentFrameIndex,
    5630  uint32_t frameInUseCount,
    5631  VkDeviceSize bufferImageGranularity,
    5632  VkDeviceSize allocSize,
    5633  VkDeviceSize allocAlignment,
    5634  VmaSuballocationType allocType,
    5635  bool canMakeOtherLost,
    5636  uint32_t strategy,
    5637  VmaAllocationRequest* pAllocationRequest);
    5638  bool CreateAllocationRequest_UpperAddress(
    5639  uint32_t currentFrameIndex,
    5640  uint32_t frameInUseCount,
    5641  VkDeviceSize bufferImageGranularity,
    5642  VkDeviceSize allocSize,
    5643  VkDeviceSize allocAlignment,
    5644  VmaSuballocationType allocType,
    5645  bool canMakeOtherLost,
    5646  uint32_t strategy,
    5647  VmaAllocationRequest* pAllocationRequest);
    5648 };
    5649 
    5650 /*
    5651 - GetSize() is the original size of allocated memory block.
    5652 - m_UsableSize is this size aligned down to a power of two.
    5653  All allocations and calculations happen relative to m_UsableSize.
    5654 - GetUnusableSize() is the difference between them.
    5655  It is repoted as separate, unused range, not available for allocations.
    5656 
    5657 Node at level 0 has size = m_UsableSize.
    5658 Each next level contains nodes with size 2 times smaller than current level.
    5659 m_LevelCount is the maximum number of levels to use in the current object.
    5660 */
    5661 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5662 {
    5663  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5664 public:
    5665  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5666  virtual ~VmaBlockMetadata_Buddy();
    5667  virtual void Init(VkDeviceSize size);
    5668 
    5669  virtual bool Validate() const;
    5670  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5671  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5672  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5673  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5674 
    5675  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5676  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5677 
    5678 #if VMA_STATS_STRING_ENABLED
    5679  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5680 #endif
    5681 
    5682  virtual bool CreateAllocationRequest(
    5683  uint32_t currentFrameIndex,
    5684  uint32_t frameInUseCount,
    5685  VkDeviceSize bufferImageGranularity,
    5686  VkDeviceSize allocSize,
    5687  VkDeviceSize allocAlignment,
    5688  bool upperAddress,
    5689  VmaSuballocationType allocType,
    5690  bool canMakeOtherLost,
    5691  uint32_t strategy,
    5692  VmaAllocationRequest* pAllocationRequest);
    5693 
    5694  virtual bool MakeRequestedAllocationsLost(
    5695  uint32_t currentFrameIndex,
    5696  uint32_t frameInUseCount,
    5697  VmaAllocationRequest* pAllocationRequest);
    5698 
    5699  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5700 
    5701  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5702 
    5703  virtual void Alloc(
    5704  const VmaAllocationRequest& request,
    5705  VmaSuballocationType type,
    5706  VkDeviceSize allocSize,
    5707  VmaAllocation hAllocation);
    5708 
    5709  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5710  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5711 
    5712 private:
    5713  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5714  static const size_t MAX_LEVELS = 30;
    5715 
    5716  struct ValidationContext
    5717  {
    5718  size_t calculatedAllocationCount;
    5719  size_t calculatedFreeCount;
    5720  VkDeviceSize calculatedSumFreeSize;
    5721 
    5722  ValidationContext() :
    5723  calculatedAllocationCount(0),
    5724  calculatedFreeCount(0),
    5725  calculatedSumFreeSize(0) { }
    5726  };
    5727 
    5728  struct Node
    5729  {
    5730  VkDeviceSize offset;
    5731  enum TYPE
    5732  {
    5733  TYPE_FREE,
    5734  TYPE_ALLOCATION,
    5735  TYPE_SPLIT,
    5736  TYPE_COUNT
    5737  } type;
    5738  Node* parent;
    5739  Node* buddy;
    5740 
    5741  union
    5742  {
    5743  struct
    5744  {
    5745  Node* prev;
    5746  Node* next;
    5747  } free;
    5748  struct
    5749  {
    5750  VmaAllocation alloc;
    5751  } allocation;
    5752  struct
    5753  {
    5754  Node* leftChild;
    5755  } split;
    5756  };
    5757  };
    5758 
    5759  // Size of the memory block aligned down to a power of two.
    5760  VkDeviceSize m_UsableSize;
    5761  uint32_t m_LevelCount;
    5762 
    5763  Node* m_Root;
    5764  struct {
    5765  Node* front;
    5766  Node* back;
    5767  } m_FreeList[MAX_LEVELS];
    5768  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5769  size_t m_AllocationCount;
    5770  // Number of nodes in the tree with type == TYPE_FREE.
    5771  size_t m_FreeCount;
    5772  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5773  VkDeviceSize m_SumFreeSize;
    5774 
    5775  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5776  void DeleteNode(Node* node);
    5777  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5778  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5779  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5780  // Alloc passed just for validation. Can be null.
    5781  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5782  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5783  // Adds node to the front of FreeList at given level.
    5784  // node->type must be FREE.
    5785  // node->free.prev, next can be undefined.
    5786  void AddToFreeListFront(uint32_t level, Node* node);
    5787  // Removes node from FreeList at given level.
    5788  // node->type must be FREE.
    5789  // node->free.prev, next stay untouched.
    5790  void RemoveFromFreeList(uint32_t level, Node* node);
    5791 
    5792 #if VMA_STATS_STRING_ENABLED
    5793  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5794 #endif
    5795 };
    5796 
    5797 /*
    5798 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5799 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5800 
    5801 Thread-safety: This class must be externally synchronized.
    5802 */
    5803 class VmaDeviceMemoryBlock
    5804 {
    5805  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5806 public:
    5807  VmaBlockMetadata* m_pMetadata;
    5808 
    5809  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5810 
    5811  ~VmaDeviceMemoryBlock()
    5812  {
    5813  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5814  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5815  }
    5816 
    5817  // Always call after construction.
    5818  void Init(
    5819  VmaAllocator hAllocator,
    5820  VmaPool hParentPool,
    5821  uint32_t newMemoryTypeIndex,
    5822  VkDeviceMemory newMemory,
    5823  VkDeviceSize newSize,
    5824  uint32_t id,
    5825  uint32_t algorithm);
    5826  // Always call before destruction.
    5827  void Destroy(VmaAllocator allocator);
    5828 
    5829  VmaPool GetParentPool() const { return m_hParentPool; }
    5830  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5831  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5832  uint32_t GetId() const { return m_Id; }
    5833  void* GetMappedData() const { return m_pMappedData; }
    5834 
    5835  // Validates all data structures inside this object. If not valid, returns false.
    5836  bool Validate() const;
    5837 
    5838  VkResult CheckCorruption(VmaAllocator hAllocator);
    5839 
    5840  // ppData can be null.
    5841  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5842  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5843 
    5844  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5845  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5846 
    5847  VkResult BindBufferMemory(
    5848  const VmaAllocator hAllocator,
    5849  const VmaAllocation hAllocation,
    5850  VkBuffer hBuffer);
    5851  VkResult BindImageMemory(
    5852  const VmaAllocator hAllocator,
    5853  const VmaAllocation hAllocation,
    5854  VkImage hImage);
    5855 
    5856 private:
    5857  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5858  uint32_t m_MemoryTypeIndex;
    5859  uint32_t m_Id;
    5860  VkDeviceMemory m_hMemory;
    5861 
    5862  /*
    5863  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5864  Also protects m_MapCount, m_pMappedData.
    5865  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5866  */
    5867  VMA_MUTEX m_Mutex;
    5868  uint32_t m_MapCount;
    5869  void* m_pMappedData;
    5870 };
    5871 
    5872 struct VmaPointerLess
    5873 {
    5874  bool operator()(const void* lhs, const void* rhs) const
    5875  {
    5876  return lhs < rhs;
    5877  }
    5878 };
    5879 
    5880 struct VmaDefragmentationMove
    5881 {
    5882  size_t srcBlockIndex;
    5883  size_t dstBlockIndex;
    5884  VkDeviceSize srcOffset;
    5885  VkDeviceSize dstOffset;
    5886  VkDeviceSize size;
    5887 };
    5888 
    5889 class VmaDefragmentationAlgorithm;
    5890 
    5891 /*
    5892 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5893 Vulkan memory type.
    5894 
    5895 Synchronized internally with a mutex.
    5896 */
    5897 struct VmaBlockVector
    5898 {
    5899  VMA_CLASS_NO_COPY(VmaBlockVector)
    5900 public:
    5901  VmaBlockVector(
    5902  VmaAllocator hAllocator,
    5903  VmaPool hParentPool,
    5904  uint32_t memoryTypeIndex,
    5905  VkDeviceSize preferredBlockSize,
    5906  size_t minBlockCount,
    5907  size_t maxBlockCount,
    5908  VkDeviceSize bufferImageGranularity,
    5909  uint32_t frameInUseCount,
    5910  bool isCustomPool,
    5911  bool explicitBlockSize,
    5912  uint32_t algorithm);
    5913  ~VmaBlockVector();
    5914 
    5915  VkResult CreateMinBlocks();
    5916 
    5917  VmaPool GetParentPool() const { return m_hParentPool; }
    5918  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5919  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5920  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5921  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5922  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5923 
    5924  void GetPoolStats(VmaPoolStats* pStats);
    5925 
    5926  bool IsEmpty() const { return m_Blocks.empty(); }
    5927  bool IsCorruptionDetectionEnabled() const;
    5928 
    5929  VkResult Allocate(
    5930  uint32_t currentFrameIndex,
    5931  VkDeviceSize size,
    5932  VkDeviceSize alignment,
    5933  const VmaAllocationCreateInfo& createInfo,
    5934  VmaSuballocationType suballocType,
    5935  size_t allocationCount,
    5936  VmaAllocation* pAllocations);
    5937 
    5938  void Free(
    5939  VmaAllocation hAllocation);
    5940 
    5941  // Adds statistics of this BlockVector to pStats.
    5942  void AddStats(VmaStats* pStats);
    5943 
    5944 #if VMA_STATS_STRING_ENABLED
    5945  void PrintDetailedMap(class VmaJsonWriter& json);
    5946 #endif
    5947 
    5948  void MakePoolAllocationsLost(
    5949  uint32_t currentFrameIndex,
    5950  size_t* pLostAllocationCount);
    5951  VkResult CheckCorruption();
    5952 
    5953  // Saves results in pCtx->res.
    5954  void Defragment(
    5955  class VmaBlockVectorDefragmentationContext* pCtx,
    5956  VmaDefragmentationStats* pStats,
    5957  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5958  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5959  VkCommandBuffer commandBuffer);
    5960  void DefragmentationEnd(
    5961  class VmaBlockVectorDefragmentationContext* pCtx,
    5962  VmaDefragmentationStats* pStats);
    5963 
    5965  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5966 
    5967  size_t GetBlockCount() const { return m_Blocks.size(); }
    5968  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5969  size_t CalcAllocationCount() const;
    5970  bool IsBufferImageGranularityConflictPossible() const;
    5971 
    5972 private:
    5973  friend class VmaDefragmentationAlgorithm_Generic;
    5974 
    5975  const VmaAllocator m_hAllocator;
    5976  const VmaPool m_hParentPool;
    5977  const uint32_t m_MemoryTypeIndex;
    5978  const VkDeviceSize m_PreferredBlockSize;
    5979  const size_t m_MinBlockCount;
    5980  const size_t m_MaxBlockCount;
    5981  const VkDeviceSize m_BufferImageGranularity;
    5982  const uint32_t m_FrameInUseCount;
    5983  const bool m_IsCustomPool;
    5984  const bool m_ExplicitBlockSize;
    5985  const uint32_t m_Algorithm;
    5986  /* There can be at most one allocation that is completely empty - a
    5987  hysteresis to avoid pessimistic case of alternating creation and destruction
    5988  of a VkDeviceMemory. */
    5989  bool m_HasEmptyBlock;
    5990  VMA_RW_MUTEX m_Mutex;
    5991  // Incrementally sorted by sumFreeSize, ascending.
    5992  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5993  uint32_t m_NextBlockId;
    5994 
    5995  VkDeviceSize CalcMaxBlockSize() const;
    5996 
    5997  // Finds and removes given block from vector.
    5998  void Remove(VmaDeviceMemoryBlock* pBlock);
    5999 
    6000  // Performs single step in sorting m_Blocks. They may not be fully sorted
    6001  // after this call.
    6002  void IncrementallySortBlocks();
    6003 
    6004  VkResult AllocatePage(
    6005  uint32_t currentFrameIndex,
    6006  VkDeviceSize size,
    6007  VkDeviceSize alignment,
    6008  const VmaAllocationCreateInfo& createInfo,
    6009  VmaSuballocationType suballocType,
    6010  VmaAllocation* pAllocation);
    6011 
    6012  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6013  VkResult AllocateFromBlock(
    6014  VmaDeviceMemoryBlock* pBlock,
    6015  uint32_t currentFrameIndex,
    6016  VkDeviceSize size,
    6017  VkDeviceSize alignment,
    6018  VmaAllocationCreateFlags allocFlags,
    6019  void* pUserData,
    6020  VmaSuballocationType suballocType,
    6021  uint32_t strategy,
    6022  VmaAllocation* pAllocation);
    6023 
    6024  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6025 
    6026  // Saves result to pCtx->res.
    6027  void ApplyDefragmentationMovesCpu(
    6028  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6029  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6030  // Saves result to pCtx->res.
    6031  void ApplyDefragmentationMovesGpu(
    6032  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6033  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6034  VkCommandBuffer commandBuffer);
    6035 
    6036  /*
    6037  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6038  - updated with new data.
    6039  */
    6040  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6041 };
    6042 
    6043 struct VmaPool_T
    6044 {
    6045  VMA_CLASS_NO_COPY(VmaPool_T)
    6046 public:
    6047  VmaBlockVector m_BlockVector;
    6048 
    6049  VmaPool_T(
    6050  VmaAllocator hAllocator,
    6051  const VmaPoolCreateInfo& createInfo,
    6052  VkDeviceSize preferredBlockSize);
    6053  ~VmaPool_T();
    6054 
    6055  uint32_t GetId() const { return m_Id; }
    6056  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6057 
    6058 #if VMA_STATS_STRING_ENABLED
    6059  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6060 #endif
    6061 
    6062 private:
    6063  uint32_t m_Id;
    6064 };
    6065 
    6066 /*
    6067 Performs defragmentation:
    6068 
    6069 - Updates `pBlockVector->m_pMetadata`.
    6070 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6071 - Does not move actual data, only returns requested moves as `moves`.
    6072 */
    6073 class VmaDefragmentationAlgorithm
    6074 {
    6075  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6076 public:
    6077  VmaDefragmentationAlgorithm(
    6078  VmaAllocator hAllocator,
    6079  VmaBlockVector* pBlockVector,
    6080  uint32_t currentFrameIndex) :
    6081  m_hAllocator(hAllocator),
    6082  m_pBlockVector(pBlockVector),
    6083  m_CurrentFrameIndex(currentFrameIndex)
    6084  {
    6085  }
    6086  virtual ~VmaDefragmentationAlgorithm()
    6087  {
    6088  }
    6089 
    6090  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6091  virtual void AddAll() = 0;
    6092 
    6093  virtual VkResult Defragment(
    6094  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6095  VkDeviceSize maxBytesToMove,
    6096  uint32_t maxAllocationsToMove) = 0;
    6097 
    6098  virtual VkDeviceSize GetBytesMoved() const = 0;
    6099  virtual uint32_t GetAllocationsMoved() const = 0;
    6100 
    6101 protected:
    6102  VmaAllocator const m_hAllocator;
    6103  VmaBlockVector* const m_pBlockVector;
    6104  const uint32_t m_CurrentFrameIndex;
    6105 
    6106  struct AllocationInfo
    6107  {
    6108  VmaAllocation m_hAllocation;
    6109  VkBool32* m_pChanged;
    6110 
    6111  AllocationInfo() :
    6112  m_hAllocation(VK_NULL_HANDLE),
    6113  m_pChanged(VMA_NULL)
    6114  {
    6115  }
    6116  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6117  m_hAllocation(hAlloc),
    6118  m_pChanged(pChanged)
    6119  {
    6120  }
    6121  };
    6122 };
    6123 
    6124 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6125 {
    6126  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6127 public:
    6128  VmaDefragmentationAlgorithm_Generic(
    6129  VmaAllocator hAllocator,
    6130  VmaBlockVector* pBlockVector,
    6131  uint32_t currentFrameIndex,
    6132  bool overlappingMoveSupported);
    6133  virtual ~VmaDefragmentationAlgorithm_Generic();
    6134 
    6135  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6136  virtual void AddAll() { m_AllAllocations = true; }
    6137 
    6138  virtual VkResult Defragment(
    6139  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6140  VkDeviceSize maxBytesToMove,
    6141  uint32_t maxAllocationsToMove);
    6142 
    6143  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6144  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6145 
    6146 private:
    6147  uint32_t m_AllocationCount;
    6148  bool m_AllAllocations;
    6149 
    6150  VkDeviceSize m_BytesMoved;
    6151  uint32_t m_AllocationsMoved;
    6152 
    6153  struct AllocationInfoSizeGreater
    6154  {
    6155  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6156  {
    6157  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6158  }
    6159  };
    6160 
    6161  struct AllocationInfoOffsetGreater
    6162  {
    6163  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6164  {
    6165  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6166  }
    6167  };
    6168 
    6169  struct BlockInfo
    6170  {
    6171  size_t m_OriginalBlockIndex;
    6172  VmaDeviceMemoryBlock* m_pBlock;
    6173  bool m_HasNonMovableAllocations;
    6174  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6175 
    6176  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6177  m_OriginalBlockIndex(SIZE_MAX),
    6178  m_pBlock(VMA_NULL),
    6179  m_HasNonMovableAllocations(true),
    6180  m_Allocations(pAllocationCallbacks)
    6181  {
    6182  }
    6183 
    6184  void CalcHasNonMovableAllocations()
    6185  {
    6186  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6187  const size_t defragmentAllocCount = m_Allocations.size();
    6188  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6189  }
    6190 
    6191  void SortAllocationsBySizeDescending()
    6192  {
    6193  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6194  }
    6195 
    6196  void SortAllocationsByOffsetDescending()
    6197  {
    6198  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6199  }
    6200  };
    6201 
    6202  struct BlockPointerLess
    6203  {
    6204  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6205  {
    6206  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6207  }
    6208  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6209  {
    6210  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6211  }
    6212  };
    6213 
    6214  // 1. Blocks with some non-movable allocations go first.
    6215  // 2. Blocks with smaller sumFreeSize go first.
    6216  struct BlockInfoCompareMoveDestination
    6217  {
    6218  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6219  {
    6220  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6221  {
    6222  return true;
    6223  }
    6224  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6225  {
    6226  return false;
    6227  }
    6228  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6229  {
    6230  return true;
    6231  }
    6232  return false;
    6233  }
    6234  };
    6235 
    6236  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6237  BlockInfoVector m_Blocks;
    6238 
    6239  VkResult DefragmentRound(
    6240  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6241  VkDeviceSize maxBytesToMove,
    6242  uint32_t maxAllocationsToMove);
    6243 
    6244  size_t CalcBlocksWithNonMovableCount() const;
    6245 
    6246  static bool MoveMakesSense(
    6247  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6248  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6249 };
    6250 
    6251 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6252 {
    6253  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6254 public:
    6255  VmaDefragmentationAlgorithm_Fast(
    6256  VmaAllocator hAllocator,
    6257  VmaBlockVector* pBlockVector,
    6258  uint32_t currentFrameIndex,
    6259  bool overlappingMoveSupported);
    6260  virtual ~VmaDefragmentationAlgorithm_Fast();
    6261 
    6262  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6263  virtual void AddAll() { m_AllAllocations = true; }
    6264 
    6265  virtual VkResult Defragment(
    6266  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6267  VkDeviceSize maxBytesToMove,
    6268  uint32_t maxAllocationsToMove);
    6269 
    6270  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6271  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6272 
    6273 private:
    6274  struct BlockInfo
    6275  {
    6276  size_t origBlockIndex;
    6277  };
    6278 
    6279  class FreeSpaceDatabase
    6280  {
    6281  public:
    6282  FreeSpaceDatabase()
    6283  {
    6284  FreeSpace s = {};
    6285  s.blockInfoIndex = SIZE_MAX;
    6286  for(size_t i = 0; i < MAX_COUNT; ++i)
    6287  {
    6288  m_FreeSpaces[i] = s;
    6289  }
    6290  }
    6291 
    6292  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6293  {
    6294  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6295  {
    6296  return;
    6297  }
    6298 
    6299  // Find first invalid or the smallest structure.
    6300  size_t bestIndex = SIZE_MAX;
    6301  for(size_t i = 0; i < MAX_COUNT; ++i)
    6302  {
    6303  // Empty structure.
    6304  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6305  {
    6306  bestIndex = i;
    6307  break;
    6308  }
    6309  if(m_FreeSpaces[i].size < size &&
    6310  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6311  {
    6312  bestIndex = i;
    6313  }
    6314  }
    6315 
    6316  if(bestIndex != SIZE_MAX)
    6317  {
    6318  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6319  m_FreeSpaces[bestIndex].offset = offset;
    6320  m_FreeSpaces[bestIndex].size = size;
    6321  }
    6322  }
    6323 
    6324  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6325  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6326  {
    6327  size_t bestIndex = SIZE_MAX;
    6328  VkDeviceSize bestFreeSpaceAfter = 0;
    6329  for(size_t i = 0; i < MAX_COUNT; ++i)
    6330  {
    6331  // Structure is valid.
    6332  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6333  {
    6334  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6335  // Allocation fits into this structure.
    6336  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6337  {
    6338  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6339  (dstOffset + size);
    6340  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6341  {
    6342  bestIndex = i;
    6343  bestFreeSpaceAfter = freeSpaceAfter;
    6344  }
    6345  }
    6346  }
    6347  }
    6348 
    6349  if(bestIndex != SIZE_MAX)
    6350  {
    6351  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6352  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6353 
    6354  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6355  {
    6356  // Leave this structure for remaining empty space.
    6357  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6358  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6359  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6360  }
    6361  else
    6362  {
    6363  // This structure becomes invalid.
    6364  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6365  }
    6366 
    6367  return true;
    6368  }
    6369 
    6370  return false;
    6371  }
    6372 
    6373  private:
    6374  static const size_t MAX_COUNT = 4;
    6375 
    6376  struct FreeSpace
    6377  {
    6378  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6379  VkDeviceSize offset;
    6380  VkDeviceSize size;
    6381  } m_FreeSpaces[MAX_COUNT];
    6382  };
    6383 
    6384  const bool m_OverlappingMoveSupported;
    6385 
    6386  uint32_t m_AllocationCount;
    6387  bool m_AllAllocations;
    6388 
    6389  VkDeviceSize m_BytesMoved;
    6390  uint32_t m_AllocationsMoved;
    6391 
    6392  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6393 
    6394  void PreprocessMetadata();
    6395  void PostprocessMetadata();
    6396  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6397 };
    6398 
    6399 struct VmaBlockDefragmentationContext
    6400 {
    6401  enum BLOCK_FLAG
    6402  {
    6403  BLOCK_FLAG_USED = 0x00000001,
    6404  };
    6405  uint32_t flags;
    6406  VkBuffer hBuffer;
    6407 };
    6408 
    6409 class VmaBlockVectorDefragmentationContext
    6410 {
    6411  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6412 public:
    6413  VkResult res;
    6414  bool mutexLocked;
    6415  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6416 
    6417  VmaBlockVectorDefragmentationContext(
    6418  VmaAllocator hAllocator,
    6419  VmaPool hCustomPool, // Optional.
    6420  VmaBlockVector* pBlockVector,
    6421  uint32_t currFrameIndex);
    6422  ~VmaBlockVectorDefragmentationContext();
    6423 
    6424  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6425  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6426  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6427 
    6428  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6429  void AddAll() { m_AllAllocations = true; }
    6430 
    6431  void Begin(bool overlappingMoveSupported);
    6432 
    6433 private:
    6434  const VmaAllocator m_hAllocator;
    6435  // Null if not from custom pool.
    6436  const VmaPool m_hCustomPool;
    6437  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6438  VmaBlockVector* const m_pBlockVector;
    6439  const uint32_t m_CurrFrameIndex;
    6440  // Owner of this object.
    6441  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6442 
    6443  struct AllocInfo
    6444  {
    6445  VmaAllocation hAlloc;
    6446  VkBool32* pChanged;
    6447  };
    6448  // Used between constructor and Begin.
    6449  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6450  bool m_AllAllocations;
    6451 };
    6452 
    6453 struct VmaDefragmentationContext_T
    6454 {
    6455 private:
    6456  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6457 public:
    6458  VmaDefragmentationContext_T(
    6459  VmaAllocator hAllocator,
    6460  uint32_t currFrameIndex,
    6461  uint32_t flags,
    6462  VmaDefragmentationStats* pStats);
    6463  ~VmaDefragmentationContext_T();
    6464 
    6465  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6466  void AddAllocations(
    6467  uint32_t allocationCount,
    6468  VmaAllocation* pAllocations,
    6469  VkBool32* pAllocationsChanged);
    6470 
    6471  /*
    6472  Returns:
    6473  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6474  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6475  - Negative value if error occured and object can be destroyed immediately.
    6476  */
    6477  VkResult Defragment(
    6478  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6479  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6480  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6481 
    6482 private:
    6483  const VmaAllocator m_hAllocator;
    6484  const uint32_t m_CurrFrameIndex;
    6485  const uint32_t m_Flags;
    6486  VmaDefragmentationStats* const m_pStats;
    6487  // Owner of these objects.
    6488  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6489  // Owner of these objects.
    6490  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6491 };
    6492 
    6493 #if VMA_RECORDING_ENABLED
    6494 
    6495 class VmaRecorder
    6496 {
    6497 public:
    6498  VmaRecorder();
    6499  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6500  void WriteConfiguration(
    6501  const VkPhysicalDeviceProperties& devProps,
    6502  const VkPhysicalDeviceMemoryProperties& memProps,
    6503  bool dedicatedAllocationExtensionEnabled);
    6504  ~VmaRecorder();
    6505 
    6506  void RecordCreateAllocator(uint32_t frameIndex);
    6507  void RecordDestroyAllocator(uint32_t frameIndex);
    6508  void RecordCreatePool(uint32_t frameIndex,
    6509  const VmaPoolCreateInfo& createInfo,
    6510  VmaPool pool);
    6511  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6512  void RecordAllocateMemory(uint32_t frameIndex,
    6513  const VkMemoryRequirements& vkMemReq,
    6514  const VmaAllocationCreateInfo& createInfo,
    6515  VmaAllocation allocation);
    6516  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6517  const VkMemoryRequirements& vkMemReq,
    6518  const VmaAllocationCreateInfo& createInfo,
    6519  uint64_t allocationCount,
    6520  const VmaAllocation* pAllocations);
    6521  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6522  const VkMemoryRequirements& vkMemReq,
    6523  bool requiresDedicatedAllocation,
    6524  bool prefersDedicatedAllocation,
    6525  const VmaAllocationCreateInfo& createInfo,
    6526  VmaAllocation allocation);
    6527  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6528  const VkMemoryRequirements& vkMemReq,
    6529  bool requiresDedicatedAllocation,
    6530  bool prefersDedicatedAllocation,
    6531  const VmaAllocationCreateInfo& createInfo,
    6532  VmaAllocation allocation);
    6533  void RecordFreeMemory(uint32_t frameIndex,
    6534  VmaAllocation allocation);
    6535  void RecordFreeMemoryPages(uint32_t frameIndex,
    6536  uint64_t allocationCount,
    6537  const VmaAllocation* pAllocations);
    6538  void RecordSetAllocationUserData(uint32_t frameIndex,
    6539  VmaAllocation allocation,
    6540  const void* pUserData);
    6541  void RecordCreateLostAllocation(uint32_t frameIndex,
    6542  VmaAllocation allocation);
    6543  void RecordMapMemory(uint32_t frameIndex,
    6544  VmaAllocation allocation);
    6545  void RecordUnmapMemory(uint32_t frameIndex,
    6546  VmaAllocation allocation);
    6547  void RecordFlushAllocation(uint32_t frameIndex,
    6548  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6549  void RecordInvalidateAllocation(uint32_t frameIndex,
    6550  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6551  void RecordCreateBuffer(uint32_t frameIndex,
    6552  const VkBufferCreateInfo& bufCreateInfo,
    6553  const VmaAllocationCreateInfo& allocCreateInfo,
    6554  VmaAllocation allocation);
    6555  void RecordCreateImage(uint32_t frameIndex,
    6556  const VkImageCreateInfo& imageCreateInfo,
    6557  const VmaAllocationCreateInfo& allocCreateInfo,
    6558  VmaAllocation allocation);
    6559  void RecordDestroyBuffer(uint32_t frameIndex,
    6560  VmaAllocation allocation);
    6561  void RecordDestroyImage(uint32_t frameIndex,
    6562  VmaAllocation allocation);
    6563  void RecordTouchAllocation(uint32_t frameIndex,
    6564  VmaAllocation allocation);
    6565  void RecordGetAllocationInfo(uint32_t frameIndex,
    6566  VmaAllocation allocation);
    6567  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6568  VmaPool pool);
    6569  void RecordDefragmentationBegin(uint32_t frameIndex,
    6570  const VmaDefragmentationInfo2& info,
    6572  void RecordDefragmentationEnd(uint32_t frameIndex,
    6574 
    6575 private:
    6576  struct CallParams
    6577  {
    6578  uint32_t threadId;
    6579  double time;
    6580  };
    6581 
    6582  class UserDataString
    6583  {
    6584  public:
    6585  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6586  const char* GetString() const { return m_Str; }
    6587 
    6588  private:
    6589  char m_PtrStr[17];
    6590  const char* m_Str;
    6591  };
    6592 
    6593  bool m_UseMutex;
    6594  VmaRecordFlags m_Flags;
    6595  FILE* m_File;
    6596  VMA_MUTEX m_FileMutex;
    6597  int64_t m_Freq;
    6598  int64_t m_StartCounter;
    6599 
    6600  void GetBasicParams(CallParams& outParams);
    6601 
    6602  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6603  template<typename T>
    6604  void PrintPointerList(uint64_t count, const T* pItems)
    6605  {
    6606  if(count)
    6607  {
    6608  fprintf(m_File, "%p", pItems[0]);
    6609  for(uint64_t i = 1; i < count; ++i)
    6610  {
    6611  fprintf(m_File, " %p", pItems[i]);
    6612  }
    6613  }
    6614  }
    6615 
    6616  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6617  void Flush();
    6618 };
    6619 
    6620 #endif // #if VMA_RECORDING_ENABLED
    6621 
    6622 /*
    6623 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6624 */
    6625 class VmaAllocationObjectAllocator
    6626 {
    6627  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6628 public:
    6629  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6630 
    6631  VmaAllocation Allocate();
    6632  void Free(VmaAllocation hAlloc);
    6633 
    6634 private:
    6635  VMA_MUTEX m_Mutex;
    6636  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6637 };
    6638 
    6639 // Main allocator object.
    6640 struct VmaAllocator_T
    6641 {
    6642  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6643 public:
    6644  bool m_UseMutex;
    6645  bool m_UseKhrDedicatedAllocation;
    6646  VkDevice m_hDevice;
    6647  bool m_AllocationCallbacksSpecified;
    6648  VkAllocationCallbacks m_AllocationCallbacks;
    6649  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6650  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6651 
    6652  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6653  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6654  VMA_MUTEX m_HeapSizeLimitMutex;
    6655 
    6656  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6657  VkPhysicalDeviceMemoryProperties m_MemProps;
    6658 
    6659  // Default pools.
    6660  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6661 
    6662  // Each vector is sorted by memory (handle value).
    6663  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6664  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6665  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6666 
    6667  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6668  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6669  ~VmaAllocator_T();
    6670 
    6671  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6672  {
    6673  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6674  }
    6675  const VmaVulkanFunctions& GetVulkanFunctions() const
    6676  {
    6677  return m_VulkanFunctions;
    6678  }
    6679 
    6680  VkDeviceSize GetBufferImageGranularity() const
    6681  {
    6682  return VMA_MAX(
    6683  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6684  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6685  }
    6686 
    6687  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6688  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6689 
    6690  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6691  {
    6692  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6693  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6694  }
    6695  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6696  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6697  {
    6698  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6699  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6700  }
    6701  // Minimum alignment for all allocations in specific memory type.
    6702  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6703  {
    6704  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6705  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6706  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6707  }
    6708 
    6709  bool IsIntegratedGpu() const
    6710  {
    6711  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6712  }
    6713 
    6714 #if VMA_RECORDING_ENABLED
    6715  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6716 #endif
    6717 
    6718  void GetBufferMemoryRequirements(
    6719  VkBuffer hBuffer,
    6720  VkMemoryRequirements& memReq,
    6721  bool& requiresDedicatedAllocation,
    6722  bool& prefersDedicatedAllocation) const;
    6723  void GetImageMemoryRequirements(
    6724  VkImage hImage,
    6725  VkMemoryRequirements& memReq,
    6726  bool& requiresDedicatedAllocation,
    6727  bool& prefersDedicatedAllocation) const;
    6728 
    6729  // Main allocation function.
    6730  VkResult AllocateMemory(
    6731  const VkMemoryRequirements& vkMemReq,
    6732  bool requiresDedicatedAllocation,
    6733  bool prefersDedicatedAllocation,
    6734  VkBuffer dedicatedBuffer,
    6735  VkImage dedicatedImage,
    6736  const VmaAllocationCreateInfo& createInfo,
    6737  VmaSuballocationType suballocType,
    6738  size_t allocationCount,
    6739  VmaAllocation* pAllocations);
    6740 
    6741  // Main deallocation function.
    6742  void FreeMemory(
    6743  size_t allocationCount,
    6744  const VmaAllocation* pAllocations);
    6745 
    6746  VkResult ResizeAllocation(
    6747  const VmaAllocation alloc,
    6748  VkDeviceSize newSize);
    6749 
    6750  void CalculateStats(VmaStats* pStats);
    6751 
    6752 #if VMA_STATS_STRING_ENABLED
    6753  void PrintDetailedMap(class VmaJsonWriter& json);
    6754 #endif
    6755 
    6756  VkResult DefragmentationBegin(
    6757  const VmaDefragmentationInfo2& info,
    6758  VmaDefragmentationStats* pStats,
    6759  VmaDefragmentationContext* pContext);
    6760  VkResult DefragmentationEnd(
    6761  VmaDefragmentationContext context);
    6762 
    6763  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6764  bool TouchAllocation(VmaAllocation hAllocation);
    6765 
    6766  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6767  void DestroyPool(VmaPool pool);
    6768  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6769 
    6770  void SetCurrentFrameIndex(uint32_t frameIndex);
    6771  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6772 
    6773  void MakePoolAllocationsLost(
    6774  VmaPool hPool,
    6775  size_t* pLostAllocationCount);
    6776  VkResult CheckPoolCorruption(VmaPool hPool);
    6777  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6778 
    6779  void CreateLostAllocation(VmaAllocation* pAllocation);
    6780 
    6781  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6782  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6783 
    6784  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6785  void Unmap(VmaAllocation hAllocation);
    6786 
    6787  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6788  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6789 
    6790  void FlushOrInvalidateAllocation(
    6791  VmaAllocation hAllocation,
    6792  VkDeviceSize offset, VkDeviceSize size,
    6793  VMA_CACHE_OPERATION op);
    6794 
    6795  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6796 
    6797  /*
    6798  Returns bit mask of memory types that can support defragmentation on GPU as
    6799  they support creation of required buffer for copy operations.
    6800  */
    6801  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6802 
    6803 private:
    6804  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6805 
    6806  VkPhysicalDevice m_PhysicalDevice;
    6807  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6808  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6809 
    6810  VMA_RW_MUTEX m_PoolsMutex;
    6811  // Protected by m_PoolsMutex. Sorted by pointer value.
    6812  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6813  uint32_t m_NextPoolId;
    6814 
    6815  VmaVulkanFunctions m_VulkanFunctions;
    6816 
    6817 #if VMA_RECORDING_ENABLED
    6818  VmaRecorder* m_pRecorder;
    6819 #endif
    6820 
    6821  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6822 
    6823  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6824 
    6825  VkResult AllocateMemoryOfType(
    6826  VkDeviceSize size,
    6827  VkDeviceSize alignment,
    6828  bool dedicatedAllocation,
    6829  VkBuffer dedicatedBuffer,
    6830  VkImage dedicatedImage,
    6831  const VmaAllocationCreateInfo& createInfo,
    6832  uint32_t memTypeIndex,
    6833  VmaSuballocationType suballocType,
    6834  size_t allocationCount,
    6835  VmaAllocation* pAllocations);
    6836 
    6837  // Helper function only to be used inside AllocateDedicatedMemory.
    6838  VkResult AllocateDedicatedMemoryPage(
    6839  VkDeviceSize size,
    6840  VmaSuballocationType suballocType,
    6841  uint32_t memTypeIndex,
    6842  const VkMemoryAllocateInfo& allocInfo,
    6843  bool map,
    6844  bool isUserDataString,
    6845  void* pUserData,
    6846  VmaAllocation* pAllocation);
    6847 
    6848  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6849  VkResult AllocateDedicatedMemory(
    6850  VkDeviceSize size,
    6851  VmaSuballocationType suballocType,
    6852  uint32_t memTypeIndex,
    6853  bool map,
    6854  bool isUserDataString,
    6855  void* pUserData,
    6856  VkBuffer dedicatedBuffer,
    6857  VkImage dedicatedImage,
    6858  size_t allocationCount,
    6859  VmaAllocation* pAllocations);
    6860 
    6861  void FreeDedicatedMemory(VmaAllocation allocation);
    6862 
    6863  /*
    6864  Calculates and returns bit mask of memory types that can support defragmentation
    6865  on GPU as they support creation of required buffer for copy operations.
    6866  */
    6867  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6868 };
    6869 
    6871 // Memory allocation #2 after VmaAllocator_T definition
    6872 
    6873 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6874 {
    6875  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6876 }
    6877 
    6878 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6879 {
    6880  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6881 }
    6882 
    6883 template<typename T>
    6884 static T* VmaAllocate(VmaAllocator hAllocator)
    6885 {
    6886  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6887 }
    6888 
    6889 template<typename T>
    6890 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6891 {
    6892  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6893 }
    6894 
    6895 template<typename T>
    6896 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6897 {
    6898  if(ptr != VMA_NULL)
    6899  {
    6900  ptr->~T();
    6901  VmaFree(hAllocator, ptr);
    6902  }
    6903 }
    6904 
    6905 template<typename T>
    6906 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6907 {
    6908  if(ptr != VMA_NULL)
    6909  {
    6910  for(size_t i = count; i--; )
    6911  ptr[i].~T();
    6912  VmaFree(hAllocator, ptr);
    6913  }
    6914 }
    6915 
    6917 // VmaStringBuilder
    6918 
    6919 #if VMA_STATS_STRING_ENABLED
    6920 
    6921 class VmaStringBuilder
    6922 {
    6923 public:
    6924  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6925  size_t GetLength() const { return m_Data.size(); }
    6926  const char* GetData() const { return m_Data.data(); }
    6927 
    6928  void Add(char ch) { m_Data.push_back(ch); }
    6929  void Add(const char* pStr);
    6930  void AddNewLine() { Add('\n'); }
    6931  void AddNumber(uint32_t num);
    6932  void AddNumber(uint64_t num);
    6933  void AddPointer(const void* ptr);
    6934 
    6935 private:
    6936  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6937 };
    6938 
    6939 void VmaStringBuilder::Add(const char* pStr)
    6940 {
    6941  const size_t strLen = strlen(pStr);
    6942  if(strLen > 0)
    6943  {
    6944  const size_t oldCount = m_Data.size();
    6945  m_Data.resize(oldCount + strLen);
    6946  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6947  }
    6948 }
    6949 
    6950 void VmaStringBuilder::AddNumber(uint32_t num)
    6951 {
    6952  char buf[11];
    6953  VmaUint32ToStr(buf, sizeof(buf), num);
    6954  Add(buf);
    6955 }
    6956 
    6957 void VmaStringBuilder::AddNumber(uint64_t num)
    6958 {
    6959  char buf[21];
    6960  VmaUint64ToStr(buf, sizeof(buf), num);
    6961  Add(buf);
    6962 }
    6963 
    6964 void VmaStringBuilder::AddPointer(const void* ptr)
    6965 {
    6966  char buf[21];
    6967  VmaPtrToStr(buf, sizeof(buf), ptr);
    6968  Add(buf);
    6969 }
    6970 
    6971 #endif // #if VMA_STATS_STRING_ENABLED
    6972 
    6974 // VmaJsonWriter
    6975 
    6976 #if VMA_STATS_STRING_ENABLED
    6977 
    6978 class VmaJsonWriter
    6979 {
    6980  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6981 public:
    6982  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6983  ~VmaJsonWriter();
    6984 
    6985  void BeginObject(bool singleLine = false);
    6986  void EndObject();
    6987 
    6988  void BeginArray(bool singleLine = false);
    6989  void EndArray();
    6990 
    6991  void WriteString(const char* pStr);
    6992  void BeginString(const char* pStr = VMA_NULL);
    6993  void ContinueString(const char* pStr);
    6994  void ContinueString(uint32_t n);
    6995  void ContinueString(uint64_t n);
    6996  void ContinueString_Pointer(const void* ptr);
    6997  void EndString(const char* pStr = VMA_NULL);
    6998 
    6999  void WriteNumber(uint32_t n);
    7000  void WriteNumber(uint64_t n);
    7001  void WriteBool(bool b);
    7002  void WriteNull();
    7003 
    7004 private:
    7005  static const char* const INDENT;
    7006 
    7007  enum COLLECTION_TYPE
    7008  {
    7009  COLLECTION_TYPE_OBJECT,
    7010  COLLECTION_TYPE_ARRAY,
    7011  };
    7012  struct StackItem
    7013  {
    7014  COLLECTION_TYPE type;
    7015  uint32_t valueCount;
    7016  bool singleLineMode;
    7017  };
    7018 
    7019  VmaStringBuilder& m_SB;
    7020  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7021  bool m_InsideString;
    7022 
    7023  void BeginValue(bool isString);
    7024  void WriteIndent(bool oneLess = false);
    7025 };
    7026 
    7027 const char* const VmaJsonWriter::INDENT = " ";
    7028 
    7029 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7030  m_SB(sb),
    7031  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7032  m_InsideString(false)
    7033 {
    7034 }
    7035 
    7036 VmaJsonWriter::~VmaJsonWriter()
    7037 {
    7038  VMA_ASSERT(!m_InsideString);
    7039  VMA_ASSERT(m_Stack.empty());
    7040 }
    7041 
    7042 void VmaJsonWriter::BeginObject(bool singleLine)
    7043 {
    7044  VMA_ASSERT(!m_InsideString);
    7045 
    7046  BeginValue(false);
    7047  m_SB.Add('{');
    7048 
    7049  StackItem item;
    7050  item.type = COLLECTION_TYPE_OBJECT;
    7051  item.valueCount = 0;
    7052  item.singleLineMode = singleLine;
    7053  m_Stack.push_back(item);
    7054 }
    7055 
    7056 void VmaJsonWriter::EndObject()
    7057 {
    7058  VMA_ASSERT(!m_InsideString);
    7059 
    7060  WriteIndent(true);
    7061  m_SB.Add('}');
    7062 
    7063  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7064  m_Stack.pop_back();
    7065 }
    7066 
    7067 void VmaJsonWriter::BeginArray(bool singleLine)
    7068 {
    7069  VMA_ASSERT(!m_InsideString);
    7070 
    7071  BeginValue(false);
    7072  m_SB.Add('[');
    7073 
    7074  StackItem item;
    7075  item.type = COLLECTION_TYPE_ARRAY;
    7076  item.valueCount = 0;
    7077  item.singleLineMode = singleLine;
    7078  m_Stack.push_back(item);
    7079 }
    7080 
    7081 void VmaJsonWriter::EndArray()
    7082 {
    7083  VMA_ASSERT(!m_InsideString);
    7084 
    7085  WriteIndent(true);
    7086  m_SB.Add(']');
    7087 
    7088  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7089  m_Stack.pop_back();
    7090 }
    7091 
    7092 void VmaJsonWriter::WriteString(const char* pStr)
    7093 {
    7094  BeginString(pStr);
    7095  EndString();
    7096 }
    7097 
    7098 void VmaJsonWriter::BeginString(const char* pStr)
    7099 {
    7100  VMA_ASSERT(!m_InsideString);
    7101 
    7102  BeginValue(true);
    7103  m_SB.Add('"');
    7104  m_InsideString = true;
    7105  if(pStr != VMA_NULL && pStr[0] != '\0')
    7106  {
    7107  ContinueString(pStr);
    7108  }
    7109 }
    7110 
    7111 void VmaJsonWriter::ContinueString(const char* pStr)
    7112 {
    7113  VMA_ASSERT(m_InsideString);
    7114 
    7115  const size_t strLen = strlen(pStr);
    7116  for(size_t i = 0; i < strLen; ++i)
    7117  {
    7118  char ch = pStr[i];
    7119  if(ch == '\\')
    7120  {
    7121  m_SB.Add("\\\\");
    7122  }
    7123  else if(ch == '"')
    7124  {
    7125  m_SB.Add("\\\"");
    7126  }
    7127  else if(ch >= 32)
    7128  {
    7129  m_SB.Add(ch);
    7130  }
    7131  else switch(ch)
    7132  {
    7133  case '\b':
    7134  m_SB.Add("\\b");
    7135  break;
    7136  case '\f':
    7137  m_SB.Add("\\f");
    7138  break;
    7139  case '\n':
    7140  m_SB.Add("\\n");
    7141  break;
    7142  case '\r':
    7143  m_SB.Add("\\r");
    7144  break;
    7145  case '\t':
    7146  m_SB.Add("\\t");
    7147  break;
    7148  default:
    7149  VMA_ASSERT(0 && "Character not currently supported.");
    7150  break;
    7151  }
    7152  }
    7153 }
    7154 
    7155 void VmaJsonWriter::ContinueString(uint32_t n)
    7156 {
    7157  VMA_ASSERT(m_InsideString);
    7158  m_SB.AddNumber(n);
    7159 }
    7160 
    7161 void VmaJsonWriter::ContinueString(uint64_t n)
    7162 {
    7163  VMA_ASSERT(m_InsideString);
    7164  m_SB.AddNumber(n);
    7165 }
    7166 
    7167 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7168 {
    7169  VMA_ASSERT(m_InsideString);
    7170  m_SB.AddPointer(ptr);
    7171 }
    7172 
    7173 void VmaJsonWriter::EndString(const char* pStr)
    7174 {
    7175  VMA_ASSERT(m_InsideString);
    7176  if(pStr != VMA_NULL && pStr[0] != '\0')
    7177  {
    7178  ContinueString(pStr);
    7179  }
    7180  m_SB.Add('"');
    7181  m_InsideString = false;
    7182 }
    7183 
    7184 void VmaJsonWriter::WriteNumber(uint32_t n)
    7185 {
    7186  VMA_ASSERT(!m_InsideString);
    7187  BeginValue(false);
    7188  m_SB.AddNumber(n);
    7189 }
    7190 
    7191 void VmaJsonWriter::WriteNumber(uint64_t n)
    7192 {
    7193  VMA_ASSERT(!m_InsideString);
    7194  BeginValue(false);
    7195  m_SB.AddNumber(n);
    7196 }
    7197 
    7198 void VmaJsonWriter::WriteBool(bool b)
    7199 {
    7200  VMA_ASSERT(!m_InsideString);
    7201  BeginValue(false);
    7202  m_SB.Add(b ? "true" : "false");
    7203 }
    7204 
    7205 void VmaJsonWriter::WriteNull()
    7206 {
    7207  VMA_ASSERT(!m_InsideString);
    7208  BeginValue(false);
    7209  m_SB.Add("null");
    7210 }
    7211 
    7212 void VmaJsonWriter::BeginValue(bool isString)
    7213 {
    7214  if(!m_Stack.empty())
    7215  {
    7216  StackItem& currItem = m_Stack.back();
    7217  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7218  currItem.valueCount % 2 == 0)
    7219  {
    7220  VMA_ASSERT(isString);
    7221  }
    7222 
    7223  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7224  currItem.valueCount % 2 != 0)
    7225  {
    7226  m_SB.Add(": ");
    7227  }
    7228  else if(currItem.valueCount > 0)
    7229  {
    7230  m_SB.Add(", ");
    7231  WriteIndent();
    7232  }
    7233  else
    7234  {
    7235  WriteIndent();
    7236  }
    7237  ++currItem.valueCount;
    7238  }
    7239 }
    7240 
    7241 void VmaJsonWriter::WriteIndent(bool oneLess)
    7242 {
    7243  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7244  {
    7245  m_SB.AddNewLine();
    7246 
    7247  size_t count = m_Stack.size();
    7248  if(count > 0 && oneLess)
    7249  {
    7250  --count;
    7251  }
    7252  for(size_t i = 0; i < count; ++i)
    7253  {
    7254  m_SB.Add(INDENT);
    7255  }
    7256  }
    7257 }
    7258 
    7259 #endif // #if VMA_STATS_STRING_ENABLED
    7260 
    7262 
    7263 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7264 {
    7265  if(IsUserDataString())
    7266  {
    7267  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7268 
    7269  FreeUserDataString(hAllocator);
    7270 
    7271  if(pUserData != VMA_NULL)
    7272  {
    7273  const char* const newStrSrc = (char*)pUserData;
    7274  const size_t newStrLen = strlen(newStrSrc);
    7275  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7276  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7277  m_pUserData = newStrDst;
    7278  }
    7279  }
    7280  else
    7281  {
    7282  m_pUserData = pUserData;
    7283  }
    7284 }
    7285 
    7286 void VmaAllocation_T::ChangeBlockAllocation(
    7287  VmaAllocator hAllocator,
    7288  VmaDeviceMemoryBlock* block,
    7289  VkDeviceSize offset)
    7290 {
    7291  VMA_ASSERT(block != VMA_NULL);
    7292  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7293 
    7294  // Move mapping reference counter from old block to new block.
    7295  if(block != m_BlockAllocation.m_Block)
    7296  {
    7297  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7298  if(IsPersistentMap())
    7299  ++mapRefCount;
    7300  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7301  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7302  }
    7303 
    7304  m_BlockAllocation.m_Block = block;
    7305  m_BlockAllocation.m_Offset = offset;
    7306 }
    7307 
    7308 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7309 {
    7310  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7311  m_BlockAllocation.m_Offset = newOffset;
    7312 }
    7313 
    7314 VkDeviceSize VmaAllocation_T::GetOffset() const
    7315 {
    7316  switch(m_Type)
    7317  {
    7318  case ALLOCATION_TYPE_BLOCK:
    7319  return m_BlockAllocation.m_Offset;
    7320  case ALLOCATION_TYPE_DEDICATED:
    7321  return 0;
    7322  default:
    7323  VMA_ASSERT(0);
    7324  return 0;
    7325  }
    7326 }
    7327 
    7328 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7329 {
    7330  switch(m_Type)
    7331  {
    7332  case ALLOCATION_TYPE_BLOCK:
    7333  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7334  case ALLOCATION_TYPE_DEDICATED:
    7335  return m_DedicatedAllocation.m_hMemory;
    7336  default:
    7337  VMA_ASSERT(0);
    7338  return VK_NULL_HANDLE;
    7339  }
    7340 }
    7341 
    7342 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7343 {
    7344  switch(m_Type)
    7345  {
    7346  case ALLOCATION_TYPE_BLOCK:
    7347  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7348  case ALLOCATION_TYPE_DEDICATED:
    7349  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7350  default:
    7351  VMA_ASSERT(0);
    7352  return UINT32_MAX;
    7353  }
    7354 }
    7355 
    7356 void* VmaAllocation_T::GetMappedData() const
    7357 {
    7358  switch(m_Type)
    7359  {
    7360  case ALLOCATION_TYPE_BLOCK:
    7361  if(m_MapCount != 0)
    7362  {
    7363  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7364  VMA_ASSERT(pBlockData != VMA_NULL);
    7365  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7366  }
    7367  else
    7368  {
    7369  return VMA_NULL;
    7370  }
    7371  break;
    7372  case ALLOCATION_TYPE_DEDICATED:
    7373  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7374  return m_DedicatedAllocation.m_pMappedData;
    7375  default:
    7376  VMA_ASSERT(0);
    7377  return VMA_NULL;
    7378  }
    7379 }
    7380 
    7381 bool VmaAllocation_T::CanBecomeLost() const
    7382 {
    7383  switch(m_Type)
    7384  {
    7385  case ALLOCATION_TYPE_BLOCK:
    7386  return m_BlockAllocation.m_CanBecomeLost;
    7387  case ALLOCATION_TYPE_DEDICATED:
    7388  return false;
    7389  default:
    7390  VMA_ASSERT(0);
    7391  return false;
    7392  }
    7393 }
    7394 
    7395 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7396 {
    7397  VMA_ASSERT(CanBecomeLost());
    7398 
    7399  /*
    7400  Warning: This is a carefully designed algorithm.
    7401  Do not modify unless you really know what you're doing :)
    7402  */
    7403  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7404  for(;;)
    7405  {
    7406  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7407  {
    7408  VMA_ASSERT(0);
    7409  return false;
    7410  }
    7411  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7412  {
    7413  return false;
    7414  }
    7415  else // Last use time earlier than current time.
    7416  {
    7417  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7418  {
    7419  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7420  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7421  return true;
    7422  }
    7423  }
    7424  }
    7425 }
    7426 
    7427 #if VMA_STATS_STRING_ENABLED
    7428 
    7429 // Correspond to values of enum VmaSuballocationType.
    7430 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7431  "FREE",
    7432  "UNKNOWN",
    7433  "BUFFER",
    7434  "IMAGE_UNKNOWN",
    7435  "IMAGE_LINEAR",
    7436  "IMAGE_OPTIMAL",
    7437 };
    7438 
    7439 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7440 {
    7441  json.WriteString("Type");
    7442  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7443 
    7444  json.WriteString("Size");
    7445  json.WriteNumber(m_Size);
    7446 
    7447  if(m_pUserData != VMA_NULL)
    7448  {
    7449  json.WriteString("UserData");
    7450  if(IsUserDataString())
    7451  {
    7452  json.WriteString((const char*)m_pUserData);
    7453  }
    7454  else
    7455  {
    7456  json.BeginString();
    7457  json.ContinueString_Pointer(m_pUserData);
    7458  json.EndString();
    7459  }
    7460  }
    7461 
    7462  json.WriteString("CreationFrameIndex");
    7463  json.WriteNumber(m_CreationFrameIndex);
    7464 
    7465  json.WriteString("LastUseFrameIndex");
    7466  json.WriteNumber(GetLastUseFrameIndex());
    7467 
    7468  if(m_BufferImageUsage != 0)
    7469  {
    7470  json.WriteString("Usage");
    7471  json.WriteNumber(m_BufferImageUsage);
    7472  }
    7473 }
    7474 
    7475 #endif
    7476 
    7477 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7478 {
    7479  VMA_ASSERT(IsUserDataString());
    7480  if(m_pUserData != VMA_NULL)
    7481  {
    7482  char* const oldStr = (char*)m_pUserData;
    7483  const size_t oldStrLen = strlen(oldStr);
    7484  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7485  m_pUserData = VMA_NULL;
    7486  }
    7487 }
    7488 
    7489 void VmaAllocation_T::BlockAllocMap()
    7490 {
    7491  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7492 
    7493  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7494  {
    7495  ++m_MapCount;
    7496  }
    7497  else
    7498  {
    7499  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7500  }
    7501 }
    7502 
    7503 void VmaAllocation_T::BlockAllocUnmap()
    7504 {
    7505  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7506 
    7507  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7508  {
    7509  --m_MapCount;
    7510  }
    7511  else
    7512  {
    7513  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7514  }
    7515 }
    7516 
    7517 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7518 {
    7519  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7520 
    7521  if(m_MapCount != 0)
    7522  {
    7523  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7524  {
    7525  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7526  *ppData = m_DedicatedAllocation.m_pMappedData;
    7527  ++m_MapCount;
    7528  return VK_SUCCESS;
    7529  }
    7530  else
    7531  {
    7532  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7533  return VK_ERROR_MEMORY_MAP_FAILED;
    7534  }
    7535  }
    7536  else
    7537  {
    7538  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7539  hAllocator->m_hDevice,
    7540  m_DedicatedAllocation.m_hMemory,
    7541  0, // offset
    7542  VK_WHOLE_SIZE,
    7543  0, // flags
    7544  ppData);
    7545  if(result == VK_SUCCESS)
    7546  {
    7547  m_DedicatedAllocation.m_pMappedData = *ppData;
    7548  m_MapCount = 1;
    7549  }
    7550  return result;
    7551  }
    7552 }
    7553 
    7554 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7555 {
    7556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7557 
    7558  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7559  {
    7560  --m_MapCount;
    7561  if(m_MapCount == 0)
    7562  {
    7563  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7564  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7565  hAllocator->m_hDevice,
    7566  m_DedicatedAllocation.m_hMemory);
    7567  }
    7568  }
    7569  else
    7570  {
    7571  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7572  }
    7573 }
    7574 
    7575 #if VMA_STATS_STRING_ENABLED
    7576 
    7577 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7578 {
    7579  json.BeginObject();
    7580 
    7581  json.WriteString("Blocks");
    7582  json.WriteNumber(stat.blockCount);
    7583 
    7584  json.WriteString("Allocations");
    7585  json.WriteNumber(stat.allocationCount);
    7586 
    7587  json.WriteString("UnusedRanges");
    7588  json.WriteNumber(stat.unusedRangeCount);
    7589 
    7590  json.WriteString("UsedBytes");
    7591  json.WriteNumber(stat.usedBytes);
    7592 
    7593  json.WriteString("UnusedBytes");
    7594  json.WriteNumber(stat.unusedBytes);
    7595 
    7596  if(stat.allocationCount > 1)
    7597  {
    7598  json.WriteString("AllocationSize");
    7599  json.BeginObject(true);
    7600  json.WriteString("Min");
    7601  json.WriteNumber(stat.allocationSizeMin);
    7602  json.WriteString("Avg");
    7603  json.WriteNumber(stat.allocationSizeAvg);
    7604  json.WriteString("Max");
    7605  json.WriteNumber(stat.allocationSizeMax);
    7606  json.EndObject();
    7607  }
    7608 
    7609  if(stat.unusedRangeCount > 1)
    7610  {
    7611  json.WriteString("UnusedRangeSize");
    7612  json.BeginObject(true);
    7613  json.WriteString("Min");
    7614  json.WriteNumber(stat.unusedRangeSizeMin);
    7615  json.WriteString("Avg");
    7616  json.WriteNumber(stat.unusedRangeSizeAvg);
    7617  json.WriteString("Max");
    7618  json.WriteNumber(stat.unusedRangeSizeMax);
    7619  json.EndObject();
    7620  }
    7621 
    7622  json.EndObject();
    7623 }
    7624 
    7625 #endif // #if VMA_STATS_STRING_ENABLED
    7626 
    7627 struct VmaSuballocationItemSizeLess
    7628 {
    7629  bool operator()(
    7630  const VmaSuballocationList::iterator lhs,
    7631  const VmaSuballocationList::iterator rhs) const
    7632  {
    7633  return lhs->size < rhs->size;
    7634  }
    7635  bool operator()(
    7636  const VmaSuballocationList::iterator lhs,
    7637  VkDeviceSize rhsSize) const
    7638  {
    7639  return lhs->size < rhsSize;
    7640  }
    7641 };
    7642 
    7643 
    7645 // class VmaBlockMetadata
    7646 
    7647 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7648  m_Size(0),
    7649  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7650 {
    7651 }
    7652 
    7653 #if VMA_STATS_STRING_ENABLED
    7654 
    7655 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7656  VkDeviceSize unusedBytes,
    7657  size_t allocationCount,
    7658  size_t unusedRangeCount) const
    7659 {
    7660  json.BeginObject();
    7661 
    7662  json.WriteString("TotalBytes");
    7663  json.WriteNumber(GetSize());
    7664 
    7665  json.WriteString("UnusedBytes");
    7666  json.WriteNumber(unusedBytes);
    7667 
    7668  json.WriteString("Allocations");
    7669  json.WriteNumber((uint64_t)allocationCount);
    7670 
    7671  json.WriteString("UnusedRanges");
    7672  json.WriteNumber((uint64_t)unusedRangeCount);
    7673 
    7674  json.WriteString("Suballocations");
    7675  json.BeginArray();
    7676 }
    7677 
    7678 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7679  VkDeviceSize offset,
    7680  VmaAllocation hAllocation) const
    7681 {
    7682  json.BeginObject(true);
    7683 
    7684  json.WriteString("Offset");
    7685  json.WriteNumber(offset);
    7686 
    7687  hAllocation->PrintParameters(json);
    7688 
    7689  json.EndObject();
    7690 }
    7691 
    7692 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7693  VkDeviceSize offset,
    7694  VkDeviceSize size) const
    7695 {
    7696  json.BeginObject(true);
    7697 
    7698  json.WriteString("Offset");
    7699  json.WriteNumber(offset);
    7700 
    7701  json.WriteString("Type");
    7702  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7703 
    7704  json.WriteString("Size");
    7705  json.WriteNumber(size);
    7706 
    7707  json.EndObject();
    7708 }
    7709 
    7710 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7711 {
    7712  json.EndArray();
    7713  json.EndObject();
    7714 }
    7715 
    7716 #endif // #if VMA_STATS_STRING_ENABLED
    7717 
    7719 // class VmaBlockMetadata_Generic
    7720 
    7721 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7722  VmaBlockMetadata(hAllocator),
    7723  m_FreeCount(0),
    7724  m_SumFreeSize(0),
    7725  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7726  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7727 {
    7728 }
    7729 
    7730 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7731 {
    7732 }
    7733 
    7734 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7735 {
    7736  VmaBlockMetadata::Init(size);
    7737 
    7738  m_FreeCount = 1;
    7739  m_SumFreeSize = size;
    7740 
    7741  VmaSuballocation suballoc = {};
    7742  suballoc.offset = 0;
    7743  suballoc.size = size;
    7744  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7745  suballoc.hAllocation = VK_NULL_HANDLE;
    7746 
    7747  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7748  m_Suballocations.push_back(suballoc);
    7749  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7750  --suballocItem;
    7751  m_FreeSuballocationsBySize.push_back(suballocItem);
    7752 }
    7753 
    7754 bool VmaBlockMetadata_Generic::Validate() const
    7755 {
    7756  VMA_VALIDATE(!m_Suballocations.empty());
    7757 
    7758  // Expected offset of new suballocation as calculated from previous ones.
    7759  VkDeviceSize calculatedOffset = 0;
    7760  // Expected number of free suballocations as calculated from traversing their list.
    7761  uint32_t calculatedFreeCount = 0;
    7762  // Expected sum size of free suballocations as calculated from traversing their list.
    7763  VkDeviceSize calculatedSumFreeSize = 0;
    7764  // Expected number of free suballocations that should be registered in
    7765  // m_FreeSuballocationsBySize calculated from traversing their list.
    7766  size_t freeSuballocationsToRegister = 0;
    7767  // True if previous visited suballocation was free.
    7768  bool prevFree = false;
    7769 
    7770  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7771  suballocItem != m_Suballocations.cend();
    7772  ++suballocItem)
    7773  {
    7774  const VmaSuballocation& subAlloc = *suballocItem;
    7775 
    7776  // Actual offset of this suballocation doesn't match expected one.
    7777  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7778 
    7779  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7780  // Two adjacent free suballocations are invalid. They should be merged.
    7781  VMA_VALIDATE(!prevFree || !currFree);
    7782 
    7783  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7784 
    7785  if(currFree)
    7786  {
    7787  calculatedSumFreeSize += subAlloc.size;
    7788  ++calculatedFreeCount;
    7789  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7790  {
    7791  ++freeSuballocationsToRegister;
    7792  }
    7793 
    7794  // Margin required between allocations - every free space must be at least that large.
    7795  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7796  }
    7797  else
    7798  {
    7799  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7800  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7801 
    7802  // Margin required between allocations - previous allocation must be free.
    7803  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7804  }
    7805 
    7806  calculatedOffset += subAlloc.size;
    7807  prevFree = currFree;
    7808  }
    7809 
    7810  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7811  // match expected one.
    7812  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7813 
    7814  VkDeviceSize lastSize = 0;
    7815  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7816  {
    7817  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7818 
    7819  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7820  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7821  // They must be sorted by size ascending.
    7822  VMA_VALIDATE(suballocItem->size >= lastSize);
    7823 
    7824  lastSize = suballocItem->size;
    7825  }
    7826 
    7827  // Check if totals match calculacted values.
    7828  VMA_VALIDATE(ValidateFreeSuballocationList());
    7829  VMA_VALIDATE(calculatedOffset == GetSize());
    7830  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7831  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7832 
    7833  return true;
    7834 }
    7835 
    7836 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7837 {
    7838  if(!m_FreeSuballocationsBySize.empty())
    7839  {
    7840  return m_FreeSuballocationsBySize.back()->size;
    7841  }
    7842  else
    7843  {
    7844  return 0;
    7845  }
    7846 }
    7847 
    7848 bool VmaBlockMetadata_Generic::IsEmpty() const
    7849 {
    7850  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7851 }
    7852 
    7853 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7854 {
    7855  outInfo.blockCount = 1;
    7856 
    7857  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7858  outInfo.allocationCount = rangeCount - m_FreeCount;
    7859  outInfo.unusedRangeCount = m_FreeCount;
    7860 
    7861  outInfo.unusedBytes = m_SumFreeSize;
    7862  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7863 
    7864  outInfo.allocationSizeMin = UINT64_MAX;
    7865  outInfo.allocationSizeMax = 0;
    7866  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7867  outInfo.unusedRangeSizeMax = 0;
    7868 
    7869  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7870  suballocItem != m_Suballocations.cend();
    7871  ++suballocItem)
    7872  {
    7873  const VmaSuballocation& suballoc = *suballocItem;
    7874  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7875  {
    7876  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7877  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7878  }
    7879  else
    7880  {
    7881  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7882  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7883  }
    7884  }
    7885 }
    7886 
    7887 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7888 {
    7889  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7890 
    7891  inoutStats.size += GetSize();
    7892  inoutStats.unusedSize += m_SumFreeSize;
    7893  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7894  inoutStats.unusedRangeCount += m_FreeCount;
    7895  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7896 }
    7897 
    7898 #if VMA_STATS_STRING_ENABLED
    7899 
    7900 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7901 {
    7902  PrintDetailedMap_Begin(json,
    7903  m_SumFreeSize, // unusedBytes
    7904  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7905  m_FreeCount); // unusedRangeCount
    7906 
    7907  size_t i = 0;
    7908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7909  suballocItem != m_Suballocations.cend();
    7910  ++suballocItem, ++i)
    7911  {
    7912  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7913  {
    7914  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7915  }
    7916  else
    7917  {
    7918  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7919  }
    7920  }
    7921 
    7922  PrintDetailedMap_End(json);
    7923 }
    7924 
    7925 #endif // #if VMA_STATS_STRING_ENABLED
    7926 
    7927 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7928  uint32_t currentFrameIndex,
    7929  uint32_t frameInUseCount,
    7930  VkDeviceSize bufferImageGranularity,
    7931  VkDeviceSize allocSize,
    7932  VkDeviceSize allocAlignment,
    7933  bool upperAddress,
    7934  VmaSuballocationType allocType,
    7935  bool canMakeOtherLost,
    7936  uint32_t strategy,
    7937  VmaAllocationRequest* pAllocationRequest)
    7938 {
    7939  VMA_ASSERT(allocSize > 0);
    7940  VMA_ASSERT(!upperAddress);
    7941  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7942  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7943  VMA_HEAVY_ASSERT(Validate());
    7944 
    7945  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7946 
    7947  // There is not enough total free space in this block to fullfill the request: Early return.
    7948  if(canMakeOtherLost == false &&
    7949  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7950  {
    7951  return false;
    7952  }
    7953 
    7954  // New algorithm, efficiently searching freeSuballocationsBySize.
    7955  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7956  if(freeSuballocCount > 0)
    7957  {
    7959  {
    7960  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7961  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7962  m_FreeSuballocationsBySize.data(),
    7963  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7964  allocSize + 2 * VMA_DEBUG_MARGIN,
    7965  VmaSuballocationItemSizeLess());
    7966  size_t index = it - m_FreeSuballocationsBySize.data();
    7967  for(; index < freeSuballocCount; ++index)
    7968  {
    7969  if(CheckAllocation(
    7970  currentFrameIndex,
    7971  frameInUseCount,
    7972  bufferImageGranularity,
    7973  allocSize,
    7974  allocAlignment,
    7975  allocType,
    7976  m_FreeSuballocationsBySize[index],
    7977  false, // canMakeOtherLost
    7978  &pAllocationRequest->offset,
    7979  &pAllocationRequest->itemsToMakeLostCount,
    7980  &pAllocationRequest->sumFreeSize,
    7981  &pAllocationRequest->sumItemSize))
    7982  {
    7983  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7984  return true;
    7985  }
    7986  }
    7987  }
    7988  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7989  {
    7990  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7991  it != m_Suballocations.end();
    7992  ++it)
    7993  {
    7994  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7995  currentFrameIndex,
    7996  frameInUseCount,
    7997  bufferImageGranularity,
    7998  allocSize,
    7999  allocAlignment,
    8000  allocType,
    8001  it,
    8002  false, // canMakeOtherLost
    8003  &pAllocationRequest->offset,
    8004  &pAllocationRequest->itemsToMakeLostCount,
    8005  &pAllocationRequest->sumFreeSize,
    8006  &pAllocationRequest->sumItemSize))
    8007  {
    8008  pAllocationRequest->item = it;
    8009  return true;
    8010  }
    8011  }
    8012  }
    8013  else // WORST_FIT, FIRST_FIT
    8014  {
    8015  // Search staring from biggest suballocations.
    8016  for(size_t index = freeSuballocCount; index--; )
    8017  {
    8018  if(CheckAllocation(
    8019  currentFrameIndex,
    8020  frameInUseCount,
    8021  bufferImageGranularity,
    8022  allocSize,
    8023  allocAlignment,
    8024  allocType,
    8025  m_FreeSuballocationsBySize[index],
    8026  false, // canMakeOtherLost
    8027  &pAllocationRequest->offset,
    8028  &pAllocationRequest->itemsToMakeLostCount,
    8029  &pAllocationRequest->sumFreeSize,
    8030  &pAllocationRequest->sumItemSize))
    8031  {
    8032  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8033  return true;
    8034  }
    8035  }
    8036  }
    8037  }
    8038 
    8039  if(canMakeOtherLost)
    8040  {
    8041  // Brute-force algorithm. TODO: Come up with something better.
    8042 
    8043  bool found = false;
    8044  VmaAllocationRequest tmpAllocRequest = {};
    8045  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8046  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8047  suballocIt != m_Suballocations.end();
    8048  ++suballocIt)
    8049  {
    8050  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8051  suballocIt->hAllocation->CanBecomeLost())
    8052  {
    8053  if(CheckAllocation(
    8054  currentFrameIndex,
    8055  frameInUseCount,
    8056  bufferImageGranularity,
    8057  allocSize,
    8058  allocAlignment,
    8059  allocType,
    8060  suballocIt,
    8061  canMakeOtherLost,
    8062  &tmpAllocRequest.offset,
    8063  &tmpAllocRequest.itemsToMakeLostCount,
    8064  &tmpAllocRequest.sumFreeSize,
    8065  &tmpAllocRequest.sumItemSize))
    8066  {
    8068  {
    8069  *pAllocationRequest = tmpAllocRequest;
    8070  pAllocationRequest->item = suballocIt;
    8071  break;
    8072  }
    8073  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8074  {
    8075  *pAllocationRequest = tmpAllocRequest;
    8076  pAllocationRequest->item = suballocIt;
    8077  found = true;
    8078  }
    8079  }
    8080  }
    8081  }
    8082 
    8083  return found;
    8084  }
    8085 
    8086  return false;
    8087 }
    8088 
    8089 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8090  uint32_t currentFrameIndex,
    8091  uint32_t frameInUseCount,
    8092  VmaAllocationRequest* pAllocationRequest)
    8093 {
    8094  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8095 
    8096  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8097  {
    8098  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8099  {
    8100  ++pAllocationRequest->item;
    8101  }
    8102  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8103  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8104  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8105  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8106  {
    8107  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8108  --pAllocationRequest->itemsToMakeLostCount;
    8109  }
    8110  else
    8111  {
    8112  return false;
    8113  }
    8114  }
    8115 
    8116  VMA_HEAVY_ASSERT(Validate());
    8117  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8118  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8119 
    8120  return true;
    8121 }
    8122 
    8123 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8124 {
    8125  uint32_t lostAllocationCount = 0;
    8126  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8127  it != m_Suballocations.end();
    8128  ++it)
    8129  {
    8130  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8131  it->hAllocation->CanBecomeLost() &&
    8132  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8133  {
    8134  it = FreeSuballocation(it);
    8135  ++lostAllocationCount;
    8136  }
    8137  }
    8138  return lostAllocationCount;
    8139 }
    8140 
    8141 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8142 {
    8143  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8144  it != m_Suballocations.end();
    8145  ++it)
    8146  {
    8147  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8148  {
    8149  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8150  {
    8151  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8152  return VK_ERROR_VALIDATION_FAILED_EXT;
    8153  }
    8154  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8155  {
    8156  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8157  return VK_ERROR_VALIDATION_FAILED_EXT;
    8158  }
    8159  }
    8160  }
    8161 
    8162  return VK_SUCCESS;
    8163 }
    8164 
    8165 void VmaBlockMetadata_Generic::Alloc(
    8166  const VmaAllocationRequest& request,
    8167  VmaSuballocationType type,
    8168  VkDeviceSize allocSize,
    8169  VmaAllocation hAllocation)
    8170 {
    8171  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8172  VMA_ASSERT(request.item != m_Suballocations.end());
    8173  VmaSuballocation& suballoc = *request.item;
    8174  // Given suballocation is a free block.
    8175  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8176  // Given offset is inside this suballocation.
    8177  VMA_ASSERT(request.offset >= suballoc.offset);
    8178  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8179  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8180  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8181 
    8182  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8183  // it to become used.
    8184  UnregisterFreeSuballocation(request.item);
    8185 
    8186  suballoc.offset = request.offset;
    8187  suballoc.size = allocSize;
    8188  suballoc.type = type;
    8189  suballoc.hAllocation = hAllocation;
    8190 
    8191  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8192  if(paddingEnd)
    8193  {
    8194  VmaSuballocation paddingSuballoc = {};
    8195  paddingSuballoc.offset = request.offset + allocSize;
    8196  paddingSuballoc.size = paddingEnd;
    8197  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8198  VmaSuballocationList::iterator next = request.item;
    8199  ++next;
    8200  const VmaSuballocationList::iterator paddingEndItem =
    8201  m_Suballocations.insert(next, paddingSuballoc);
    8202  RegisterFreeSuballocation(paddingEndItem);
    8203  }
    8204 
    8205  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8206  if(paddingBegin)
    8207  {
    8208  VmaSuballocation paddingSuballoc = {};
    8209  paddingSuballoc.offset = request.offset - paddingBegin;
    8210  paddingSuballoc.size = paddingBegin;
    8211  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8212  const VmaSuballocationList::iterator paddingBeginItem =
    8213  m_Suballocations.insert(request.item, paddingSuballoc);
    8214  RegisterFreeSuballocation(paddingBeginItem);
    8215  }
    8216 
    8217  // Update totals.
    8218  m_FreeCount = m_FreeCount - 1;
    8219  if(paddingBegin > 0)
    8220  {
    8221  ++m_FreeCount;
    8222  }
    8223  if(paddingEnd > 0)
    8224  {
    8225  ++m_FreeCount;
    8226  }
    8227  m_SumFreeSize -= allocSize;
    8228 }
    8229 
    8230 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8231 {
    8232  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8233  suballocItem != m_Suballocations.end();
    8234  ++suballocItem)
    8235  {
    8236  VmaSuballocation& suballoc = *suballocItem;
    8237  if(suballoc.hAllocation == allocation)
    8238  {
    8239  FreeSuballocation(suballocItem);
    8240  VMA_HEAVY_ASSERT(Validate());
    8241  return;
    8242  }
    8243  }
    8244  VMA_ASSERT(0 && "Not found!");
    8245 }
    8246 
    8247 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8248 {
    8249  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8250  suballocItem != m_Suballocations.end();
    8251  ++suballocItem)
    8252  {
    8253  VmaSuballocation& suballoc = *suballocItem;
    8254  if(suballoc.offset == offset)
    8255  {
    8256  FreeSuballocation(suballocItem);
    8257  return;
    8258  }
    8259  }
    8260  VMA_ASSERT(0 && "Not found!");
    8261 }
    8262 
    8263 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8264 {
    8265  VkDeviceSize lastSize = 0;
    8266  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8267  {
    8268  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8269 
    8270  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8271  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8272  VMA_VALIDATE(it->size >= lastSize);
    8273  lastSize = it->size;
    8274  }
    8275  return true;
    8276 }
    8277 
    8278 bool VmaBlockMetadata_Generic::CheckAllocation(
    8279  uint32_t currentFrameIndex,
    8280  uint32_t frameInUseCount,
    8281  VkDeviceSize bufferImageGranularity,
    8282  VkDeviceSize allocSize,
    8283  VkDeviceSize allocAlignment,
    8284  VmaSuballocationType allocType,
    8285  VmaSuballocationList::const_iterator suballocItem,
    8286  bool canMakeOtherLost,
    8287  VkDeviceSize* pOffset,
    8288  size_t* itemsToMakeLostCount,
    8289  VkDeviceSize* pSumFreeSize,
    8290  VkDeviceSize* pSumItemSize) const
    8291 {
    8292  VMA_ASSERT(allocSize > 0);
    8293  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8294  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8295  VMA_ASSERT(pOffset != VMA_NULL);
    8296 
    8297  *itemsToMakeLostCount = 0;
    8298  *pSumFreeSize = 0;
    8299  *pSumItemSize = 0;
    8300 
    8301  if(canMakeOtherLost)
    8302  {
    8303  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8304  {
    8305  *pSumFreeSize = suballocItem->size;
    8306  }
    8307  else
    8308  {
    8309  if(suballocItem->hAllocation->CanBecomeLost() &&
    8310  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8311  {
    8312  ++*itemsToMakeLostCount;
    8313  *pSumItemSize = suballocItem->size;
    8314  }
    8315  else
    8316  {
    8317  return false;
    8318  }
    8319  }
    8320 
    8321  // Remaining size is too small for this request: Early return.
    8322  if(GetSize() - suballocItem->offset < allocSize)
    8323  {
    8324  return false;
    8325  }
    8326 
    8327  // Start from offset equal to beginning of this suballocation.
    8328  *pOffset = suballocItem->offset;
    8329 
    8330  // Apply VMA_DEBUG_MARGIN at the beginning.
    8331  if(VMA_DEBUG_MARGIN > 0)
    8332  {
    8333  *pOffset += VMA_DEBUG_MARGIN;
    8334  }
    8335 
    8336  // Apply alignment.
    8337  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8338 
    8339  // Check previous suballocations for BufferImageGranularity conflicts.
    8340  // Make bigger alignment if necessary.
    8341  if(bufferImageGranularity > 1)
    8342  {
    8343  bool bufferImageGranularityConflict = false;
    8344  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8345  while(prevSuballocItem != m_Suballocations.cbegin())
    8346  {
    8347  --prevSuballocItem;
    8348  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8349  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8350  {
    8351  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8352  {
    8353  bufferImageGranularityConflict = true;
    8354  break;
    8355  }
    8356  }
    8357  else
    8358  // Already on previous page.
    8359  break;
    8360  }
    8361  if(bufferImageGranularityConflict)
    8362  {
    8363  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8364  }
    8365  }
    8366 
    8367  // Now that we have final *pOffset, check if we are past suballocItem.
    8368  // If yes, return false - this function should be called for another suballocItem as starting point.
    8369  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8370  {
    8371  return false;
    8372  }
    8373 
    8374  // Calculate padding at the beginning based on current offset.
    8375  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8376 
    8377  // Calculate required margin at the end.
    8378  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8379 
    8380  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8381  // Another early return check.
    8382  if(suballocItem->offset + totalSize > GetSize())
    8383  {
    8384  return false;
    8385  }
    8386 
    8387  // Advance lastSuballocItem until desired size is reached.
    8388  // Update itemsToMakeLostCount.
    8389  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8390  if(totalSize > suballocItem->size)
    8391  {
    8392  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8393  while(remainingSize > 0)
    8394  {
    8395  ++lastSuballocItem;
    8396  if(lastSuballocItem == m_Suballocations.cend())
    8397  {
    8398  return false;
    8399  }
    8400  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8401  {
    8402  *pSumFreeSize += lastSuballocItem->size;
    8403  }
    8404  else
    8405  {
    8406  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8407  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8408  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8409  {
    8410  ++*itemsToMakeLostCount;
    8411  *pSumItemSize += lastSuballocItem->size;
    8412  }
    8413  else
    8414  {
    8415  return false;
    8416  }
    8417  }
    8418  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8419  remainingSize - lastSuballocItem->size : 0;
    8420  }
    8421  }
    8422 
    8423  // Check next suballocations for BufferImageGranularity conflicts.
    8424  // If conflict exists, we must mark more allocations lost or fail.
    8425  if(bufferImageGranularity > 1)
    8426  {
    8427  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8428  ++nextSuballocItem;
    8429  while(nextSuballocItem != m_Suballocations.cend())
    8430  {
    8431  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8432  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8433  {
    8434  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8435  {
    8436  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8437  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8438  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8439  {
    8440  ++*itemsToMakeLostCount;
    8441  }
    8442  else
    8443  {
    8444  return false;
    8445  }
    8446  }
    8447  }
    8448  else
    8449  {
    8450  // Already on next page.
    8451  break;
    8452  }
    8453  ++nextSuballocItem;
    8454  }
    8455  }
    8456  }
    8457  else
    8458  {
    8459  const VmaSuballocation& suballoc = *suballocItem;
    8460  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8461 
    8462  *pSumFreeSize = suballoc.size;
    8463 
    8464  // Size of this suballocation is too small for this request: Early return.
    8465  if(suballoc.size < allocSize)
    8466  {
    8467  return false;
    8468  }
    8469 
    8470  // Start from offset equal to beginning of this suballocation.
    8471  *pOffset = suballoc.offset;
    8472 
    8473  // Apply VMA_DEBUG_MARGIN at the beginning.
    8474  if(VMA_DEBUG_MARGIN > 0)
    8475  {
    8476  *pOffset += VMA_DEBUG_MARGIN;
    8477  }
    8478 
    8479  // Apply alignment.
    8480  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8481 
    8482  // Check previous suballocations for BufferImageGranularity conflicts.
    8483  // Make bigger alignment if necessary.
    8484  if(bufferImageGranularity > 1)
    8485  {
    8486  bool bufferImageGranularityConflict = false;
    8487  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8488  while(prevSuballocItem != m_Suballocations.cbegin())
    8489  {
    8490  --prevSuballocItem;
    8491  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8492  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8493  {
    8494  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8495  {
    8496  bufferImageGranularityConflict = true;
    8497  break;
    8498  }
    8499  }
    8500  else
    8501  // Already on previous page.
    8502  break;
    8503  }
    8504  if(bufferImageGranularityConflict)
    8505  {
    8506  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8507  }
    8508  }
    8509 
    8510  // Calculate padding at the beginning based on current offset.
    8511  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8512 
    8513  // Calculate required margin at the end.
    8514  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8515 
    8516  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8517  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8518  {
    8519  return false;
    8520  }
    8521 
    8522  // Check next suballocations for BufferImageGranularity conflicts.
    8523  // If conflict exists, allocation cannot be made here.
    8524  if(bufferImageGranularity > 1)
    8525  {
    8526  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8527  ++nextSuballocItem;
    8528  while(nextSuballocItem != m_Suballocations.cend())
    8529  {
    8530  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8531  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8532  {
    8533  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8534  {
    8535  return false;
    8536  }
    8537  }
    8538  else
    8539  {
    8540  // Already on next page.
    8541  break;
    8542  }
    8543  ++nextSuballocItem;
    8544  }
    8545  }
    8546  }
    8547 
    8548  // All tests passed: Success. pOffset is already filled.
    8549  return true;
    8550 }
    8551 
    8552 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8553 {
    8554  VMA_ASSERT(item != m_Suballocations.end());
    8555  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8556 
    8557  VmaSuballocationList::iterator nextItem = item;
    8558  ++nextItem;
    8559  VMA_ASSERT(nextItem != m_Suballocations.end());
    8560  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8561 
    8562  item->size += nextItem->size;
    8563  --m_FreeCount;
    8564  m_Suballocations.erase(nextItem);
    8565 }
    8566 
    8567 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8568 {
    8569  // Change this suballocation to be marked as free.
    8570  VmaSuballocation& suballoc = *suballocItem;
    8571  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8572  suballoc.hAllocation = VK_NULL_HANDLE;
    8573 
    8574  // Update totals.
    8575  ++m_FreeCount;
    8576  m_SumFreeSize += suballoc.size;
    8577 
    8578  // Merge with previous and/or next suballocation if it's also free.
    8579  bool mergeWithNext = false;
    8580  bool mergeWithPrev = false;
    8581 
    8582  VmaSuballocationList::iterator nextItem = suballocItem;
    8583  ++nextItem;
    8584  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8585  {
    8586  mergeWithNext = true;
    8587  }
    8588 
    8589  VmaSuballocationList::iterator prevItem = suballocItem;
    8590  if(suballocItem != m_Suballocations.begin())
    8591  {
    8592  --prevItem;
    8593  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8594  {
    8595  mergeWithPrev = true;
    8596  }
    8597  }
    8598 
    8599  if(mergeWithNext)
    8600  {
    8601  UnregisterFreeSuballocation(nextItem);
    8602  MergeFreeWithNext(suballocItem);
    8603  }
    8604 
    8605  if(mergeWithPrev)
    8606  {
    8607  UnregisterFreeSuballocation(prevItem);
    8608  MergeFreeWithNext(prevItem);
    8609  RegisterFreeSuballocation(prevItem);
    8610  return prevItem;
    8611  }
    8612  else
    8613  {
    8614  RegisterFreeSuballocation(suballocItem);
    8615  return suballocItem;
    8616  }
    8617 }
    8618 
    8619 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8620 {
    8621  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8622  VMA_ASSERT(item->size > 0);
    8623 
    8624  // You may want to enable this validation at the beginning or at the end of
    8625  // this function, depending on what do you want to check.
    8626  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8627 
    8628  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8629  {
    8630  if(m_FreeSuballocationsBySize.empty())
    8631  {
    8632  m_FreeSuballocationsBySize.push_back(item);
    8633  }
    8634  else
    8635  {
    8636  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8637  }
    8638  }
    8639 
    8640  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8641 }
    8642 
    8643 
    8644 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8645 {
    8646  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8647  VMA_ASSERT(item->size > 0);
    8648 
    8649  // You may want to enable this validation at the beginning or at the end of
    8650  // this function, depending on what do you want to check.
    8651  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8652 
    8653  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8654  {
    8655  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8656  m_FreeSuballocationsBySize.data(),
    8657  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8658  item,
    8659  VmaSuballocationItemSizeLess());
    8660  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8661  index < m_FreeSuballocationsBySize.size();
    8662  ++index)
    8663  {
    8664  if(m_FreeSuballocationsBySize[index] == item)
    8665  {
    8666  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8667  return;
    8668  }
    8669  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8670  }
    8671  VMA_ASSERT(0 && "Not found.");
    8672  }
    8673 
    8674  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8675 }
    8676 
    8677 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8678  VkDeviceSize bufferImageGranularity,
    8679  VmaSuballocationType& inOutPrevSuballocType) const
    8680 {
    8681  if(bufferImageGranularity == 1 || IsEmpty())
    8682  {
    8683  return false;
    8684  }
    8685 
    8686  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8687  bool typeConflictFound = false;
    8688  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8689  it != m_Suballocations.cend();
    8690  ++it)
    8691  {
    8692  const VmaSuballocationType suballocType = it->type;
    8693  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8694  {
    8695  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8696  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8697  {
    8698  typeConflictFound = true;
    8699  }
    8700  inOutPrevSuballocType = suballocType;
    8701  }
    8702  }
    8703 
    8704  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8705 }
    8706 
    8708 // class VmaBlockMetadata_Linear
    8709 
    8710 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8711  VmaBlockMetadata(hAllocator),
    8712  m_SumFreeSize(0),
    8713  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8714  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8715  m_1stVectorIndex(0),
    8716  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8717  m_1stNullItemsBeginCount(0),
    8718  m_1stNullItemsMiddleCount(0),
    8719  m_2ndNullItemsCount(0)
    8720 {
    8721 }
    8722 
    8723 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8724 {
    8725 }
    8726 
    8727 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8728 {
    8729  VmaBlockMetadata::Init(size);
    8730  m_SumFreeSize = size;
    8731 }
    8732 
    8733 bool VmaBlockMetadata_Linear::Validate() const
    8734 {
    8735  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8736  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8737 
    8738  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8739  VMA_VALIDATE(!suballocations1st.empty() ||
    8740  suballocations2nd.empty() ||
    8741  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8742 
    8743  if(!suballocations1st.empty())
    8744  {
    8745  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8746  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8747  // Null item at the end should be just pop_back().
    8748  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8749  }
    8750  if(!suballocations2nd.empty())
    8751  {
    8752  // Null item at the end should be just pop_back().
    8753  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8754  }
    8755 
    8756  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8757  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8758 
    8759  VkDeviceSize sumUsedSize = 0;
    8760  const size_t suballoc1stCount = suballocations1st.size();
    8761  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8762 
    8763  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8764  {
    8765  const size_t suballoc2ndCount = suballocations2nd.size();
    8766  size_t nullItem2ndCount = 0;
    8767  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8768  {
    8769  const VmaSuballocation& suballoc = suballocations2nd[i];
    8770  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8771 
    8772  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8773  VMA_VALIDATE(suballoc.offset >= offset);
    8774 
    8775  if(!currFree)
    8776  {
    8777  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8778  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8779  sumUsedSize += suballoc.size;
    8780  }
    8781  else
    8782  {
    8783  ++nullItem2ndCount;
    8784  }
    8785 
    8786  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8787  }
    8788 
    8789  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8790  }
    8791 
    8792  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8793  {
    8794  const VmaSuballocation& suballoc = suballocations1st[i];
    8795  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8796  suballoc.hAllocation == VK_NULL_HANDLE);
    8797  }
    8798 
    8799  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8800 
    8801  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8802  {
    8803  const VmaSuballocation& suballoc = suballocations1st[i];
    8804  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8805 
    8806  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8807  VMA_VALIDATE(suballoc.offset >= offset);
    8808  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8809 
    8810  if(!currFree)
    8811  {
    8812  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8813  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8814  sumUsedSize += suballoc.size;
    8815  }
    8816  else
    8817  {
    8818  ++nullItem1stCount;
    8819  }
    8820 
    8821  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8822  }
    8823  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8824 
    8825  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8826  {
    8827  const size_t suballoc2ndCount = suballocations2nd.size();
    8828  size_t nullItem2ndCount = 0;
    8829  for(size_t i = suballoc2ndCount; i--; )
    8830  {
    8831  const VmaSuballocation& suballoc = suballocations2nd[i];
    8832  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8833 
    8834  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8835  VMA_VALIDATE(suballoc.offset >= offset);
    8836 
    8837  if(!currFree)
    8838  {
    8839  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8840  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8841  sumUsedSize += suballoc.size;
    8842  }
    8843  else
    8844  {
    8845  ++nullItem2ndCount;
    8846  }
    8847 
    8848  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8849  }
    8850 
    8851  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8852  }
    8853 
    8854  VMA_VALIDATE(offset <= GetSize());
    8855  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8856 
    8857  return true;
    8858 }
    8859 
    8860 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8861 {
    8862  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8863  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8864 }
    8865 
    8866 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8867 {
    8868  const VkDeviceSize size = GetSize();
    8869 
    8870  /*
    8871  We don't consider gaps inside allocation vectors with freed allocations because
    8872  they are not suitable for reuse in linear allocator. We consider only space that
    8873  is available for new allocations.
    8874  */
    8875  if(IsEmpty())
    8876  {
    8877  return size;
    8878  }
    8879 
    8880  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8881 
    8882  switch(m_2ndVectorMode)
    8883  {
    8884  case SECOND_VECTOR_EMPTY:
    8885  /*
    8886  Available space is after end of 1st, as well as before beginning of 1st (which
    8887  whould make it a ring buffer).
    8888  */
    8889  {
    8890  const size_t suballocations1stCount = suballocations1st.size();
    8891  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8892  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8893  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8894  return VMA_MAX(
    8895  firstSuballoc.offset,
    8896  size - (lastSuballoc.offset + lastSuballoc.size));
    8897  }
    8898  break;
    8899 
    8900  case SECOND_VECTOR_RING_BUFFER:
    8901  /*
    8902  Available space is only between end of 2nd and beginning of 1st.
    8903  */
    8904  {
    8905  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8906  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8907  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8908  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8909  }
    8910  break;
    8911 
    8912  case SECOND_VECTOR_DOUBLE_STACK:
    8913  /*
    8914  Available space is only between end of 1st and top of 2nd.
    8915  */
    8916  {
    8917  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8918  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8919  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8920  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8921  }
    8922  break;
    8923 
    8924  default:
    8925  VMA_ASSERT(0);
    8926  return 0;
    8927  }
    8928 }
    8929 
    8930 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8931 {
    8932  const VkDeviceSize size = GetSize();
    8933  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8934  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8935  const size_t suballoc1stCount = suballocations1st.size();
    8936  const size_t suballoc2ndCount = suballocations2nd.size();
    8937 
    8938  outInfo.blockCount = 1;
    8939  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8940  outInfo.unusedRangeCount = 0;
    8941  outInfo.usedBytes = 0;
    8942  outInfo.allocationSizeMin = UINT64_MAX;
    8943  outInfo.allocationSizeMax = 0;
    8944  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8945  outInfo.unusedRangeSizeMax = 0;
    8946 
    8947  VkDeviceSize lastOffset = 0;
    8948 
    8949  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8950  {
    8951  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8952  size_t nextAlloc2ndIndex = 0;
    8953  while(lastOffset < freeSpace2ndTo1stEnd)
    8954  {
    8955  // Find next non-null allocation or move nextAllocIndex to the end.
    8956  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8957  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8958  {
    8959  ++nextAlloc2ndIndex;
    8960  }
    8961 
    8962  // Found non-null allocation.
    8963  if(nextAlloc2ndIndex < suballoc2ndCount)
    8964  {
    8965  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8966 
    8967  // 1. Process free space before this allocation.
    8968  if(lastOffset < suballoc.offset)
    8969  {
    8970  // There is free space from lastOffset to suballoc.offset.
    8971  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8972  ++outInfo.unusedRangeCount;
    8973  outInfo.unusedBytes += unusedRangeSize;
    8974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8976  }
    8977 
    8978  // 2. Process this allocation.
    8979  // There is allocation with suballoc.offset, suballoc.size.
    8980  outInfo.usedBytes += suballoc.size;
    8981  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8982  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8983 
    8984  // 3. Prepare for next iteration.
    8985  lastOffset = suballoc.offset + suballoc.size;
    8986  ++nextAlloc2ndIndex;
    8987  }
    8988  // We are at the end.
    8989  else
    8990  {
    8991  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8992  if(lastOffset < freeSpace2ndTo1stEnd)
    8993  {
    8994  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8995  ++outInfo.unusedRangeCount;
    8996  outInfo.unusedBytes += unusedRangeSize;
    8997  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8998  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8999  }
    9000 
    9001  // End of loop.
    9002  lastOffset = freeSpace2ndTo1stEnd;
    9003  }
    9004  }
    9005  }
    9006 
    9007  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9008  const VkDeviceSize freeSpace1stTo2ndEnd =
    9009  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9010  while(lastOffset < freeSpace1stTo2ndEnd)
    9011  {
    9012  // Find next non-null allocation or move nextAllocIndex to the end.
    9013  while(nextAlloc1stIndex < suballoc1stCount &&
    9014  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9015  {
    9016  ++nextAlloc1stIndex;
    9017  }
    9018 
    9019  // Found non-null allocation.
    9020  if(nextAlloc1stIndex < suballoc1stCount)
    9021  {
    9022  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9023 
    9024  // 1. Process free space before this allocation.
    9025  if(lastOffset < suballoc.offset)
    9026  {
    9027  // There is free space from lastOffset to suballoc.offset.
    9028  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9029  ++outInfo.unusedRangeCount;
    9030  outInfo.unusedBytes += unusedRangeSize;
    9031  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9032  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9033  }
    9034 
    9035  // 2. Process this allocation.
    9036  // There is allocation with suballoc.offset, suballoc.size.
    9037  outInfo.usedBytes += suballoc.size;
    9038  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9039  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9040 
    9041  // 3. Prepare for next iteration.
    9042  lastOffset = suballoc.offset + suballoc.size;
    9043  ++nextAlloc1stIndex;
    9044  }
    9045  // We are at the end.
    9046  else
    9047  {
    9048  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9049  if(lastOffset < freeSpace1stTo2ndEnd)
    9050  {
    9051  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9052  ++outInfo.unusedRangeCount;
    9053  outInfo.unusedBytes += unusedRangeSize;
    9054  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9055  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9056  }
    9057 
    9058  // End of loop.
    9059  lastOffset = freeSpace1stTo2ndEnd;
    9060  }
    9061  }
    9062 
    9063  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9064  {
    9065  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9066  while(lastOffset < size)
    9067  {
    9068  // Find next non-null allocation or move nextAllocIndex to the end.
    9069  while(nextAlloc2ndIndex != SIZE_MAX &&
    9070  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9071  {
    9072  --nextAlloc2ndIndex;
    9073  }
    9074 
    9075  // Found non-null allocation.
    9076  if(nextAlloc2ndIndex != SIZE_MAX)
    9077  {
    9078  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9079 
    9080  // 1. Process free space before this allocation.
    9081  if(lastOffset < suballoc.offset)
    9082  {
    9083  // There is free space from lastOffset to suballoc.offset.
    9084  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9085  ++outInfo.unusedRangeCount;
    9086  outInfo.unusedBytes += unusedRangeSize;
    9087  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9088  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9089  }
    9090 
    9091  // 2. Process this allocation.
    9092  // There is allocation with suballoc.offset, suballoc.size.
    9093  outInfo.usedBytes += suballoc.size;
    9094  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9095  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9096 
    9097  // 3. Prepare for next iteration.
    9098  lastOffset = suballoc.offset + suballoc.size;
    9099  --nextAlloc2ndIndex;
    9100  }
    9101  // We are at the end.
    9102  else
    9103  {
    9104  // There is free space from lastOffset to size.
    9105  if(lastOffset < size)
    9106  {
    9107  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9108  ++outInfo.unusedRangeCount;
    9109  outInfo.unusedBytes += unusedRangeSize;
    9110  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9111  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9112  }
    9113 
    9114  // End of loop.
    9115  lastOffset = size;
    9116  }
    9117  }
    9118  }
    9119 
    9120  outInfo.unusedBytes = size - outInfo.usedBytes;
    9121 }
    9122 
    9123 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9124 {
    9125  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9126  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9127  const VkDeviceSize size = GetSize();
    9128  const size_t suballoc1stCount = suballocations1st.size();
    9129  const size_t suballoc2ndCount = suballocations2nd.size();
    9130 
    9131  inoutStats.size += size;
    9132 
    9133  VkDeviceSize lastOffset = 0;
    9134 
    9135  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9136  {
    9137  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9138  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9139  while(lastOffset < freeSpace2ndTo1stEnd)
    9140  {
    9141  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9142  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9143  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9144  {
    9145  ++nextAlloc2ndIndex;
    9146  }
    9147 
    9148  // Found non-null allocation.
    9149  if(nextAlloc2ndIndex < suballoc2ndCount)
    9150  {
    9151  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9152 
    9153  // 1. Process free space before this allocation.
    9154  if(lastOffset < suballoc.offset)
    9155  {
    9156  // There is free space from lastOffset to suballoc.offset.
    9157  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9158  inoutStats.unusedSize += unusedRangeSize;
    9159  ++inoutStats.unusedRangeCount;
    9160  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9161  }
    9162 
    9163  // 2. Process this allocation.
    9164  // There is allocation with suballoc.offset, suballoc.size.
    9165  ++inoutStats.allocationCount;
    9166 
    9167  // 3. Prepare for next iteration.
    9168  lastOffset = suballoc.offset + suballoc.size;
    9169  ++nextAlloc2ndIndex;
    9170  }
    9171  // We are at the end.
    9172  else
    9173  {
    9174  if(lastOffset < freeSpace2ndTo1stEnd)
    9175  {
    9176  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9177  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9178  inoutStats.unusedSize += unusedRangeSize;
    9179  ++inoutStats.unusedRangeCount;
    9180  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9181  }
    9182 
    9183  // End of loop.
    9184  lastOffset = freeSpace2ndTo1stEnd;
    9185  }
    9186  }
    9187  }
    9188 
    9189  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9190  const VkDeviceSize freeSpace1stTo2ndEnd =
    9191  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9192  while(lastOffset < freeSpace1stTo2ndEnd)
    9193  {
    9194  // Find next non-null allocation or move nextAllocIndex to the end.
    9195  while(nextAlloc1stIndex < suballoc1stCount &&
    9196  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9197  {
    9198  ++nextAlloc1stIndex;
    9199  }
    9200 
    9201  // Found non-null allocation.
    9202  if(nextAlloc1stIndex < suballoc1stCount)
    9203  {
    9204  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9205 
    9206  // 1. Process free space before this allocation.
    9207  if(lastOffset < suballoc.offset)
    9208  {
    9209  // There is free space from lastOffset to suballoc.offset.
    9210  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9211  inoutStats.unusedSize += unusedRangeSize;
    9212  ++inoutStats.unusedRangeCount;
    9213  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9214  }
    9215 
    9216  // 2. Process this allocation.
    9217  // There is allocation with suballoc.offset, suballoc.size.
    9218  ++inoutStats.allocationCount;
    9219 
    9220  // 3. Prepare for next iteration.
    9221  lastOffset = suballoc.offset + suballoc.size;
    9222  ++nextAlloc1stIndex;
    9223  }
    9224  // We are at the end.
    9225  else
    9226  {
    9227  if(lastOffset < freeSpace1stTo2ndEnd)
    9228  {
    9229  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9230  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9231  inoutStats.unusedSize += unusedRangeSize;
    9232  ++inoutStats.unusedRangeCount;
    9233  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9234  }
    9235 
    9236  // End of loop.
    9237  lastOffset = freeSpace1stTo2ndEnd;
    9238  }
    9239  }
    9240 
    9241  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9242  {
    9243  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9244  while(lastOffset < size)
    9245  {
    9246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9247  while(nextAlloc2ndIndex != SIZE_MAX &&
    9248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9249  {
    9250  --nextAlloc2ndIndex;
    9251  }
    9252 
    9253  // Found non-null allocation.
    9254  if(nextAlloc2ndIndex != SIZE_MAX)
    9255  {
    9256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9257 
    9258  // 1. Process free space before this allocation.
    9259  if(lastOffset < suballoc.offset)
    9260  {
    9261  // There is free space from lastOffset to suballoc.offset.
    9262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9263  inoutStats.unusedSize += unusedRangeSize;
    9264  ++inoutStats.unusedRangeCount;
    9265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9266  }
    9267 
    9268  // 2. Process this allocation.
    9269  // There is allocation with suballoc.offset, suballoc.size.
    9270  ++inoutStats.allocationCount;
    9271 
    9272  // 3. Prepare for next iteration.
    9273  lastOffset = suballoc.offset + suballoc.size;
    9274  --nextAlloc2ndIndex;
    9275  }
    9276  // We are at the end.
    9277  else
    9278  {
    9279  if(lastOffset < size)
    9280  {
    9281  // There is free space from lastOffset to size.
    9282  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9283  inoutStats.unusedSize += unusedRangeSize;
    9284  ++inoutStats.unusedRangeCount;
    9285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9286  }
    9287 
    9288  // End of loop.
    9289  lastOffset = size;
    9290  }
    9291  }
    9292  }
    9293 }
    9294 
    9295 #if VMA_STATS_STRING_ENABLED
    9296 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9297 {
    9298  const VkDeviceSize size = GetSize();
    9299  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9300  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9301  const size_t suballoc1stCount = suballocations1st.size();
    9302  const size_t suballoc2ndCount = suballocations2nd.size();
    9303 
    9304  // FIRST PASS
    9305 
    9306  size_t unusedRangeCount = 0;
    9307  VkDeviceSize usedBytes = 0;
    9308 
    9309  VkDeviceSize lastOffset = 0;
    9310 
    9311  size_t alloc2ndCount = 0;
    9312  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9313  {
    9314  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9315  size_t nextAlloc2ndIndex = 0;
    9316  while(lastOffset < freeSpace2ndTo1stEnd)
    9317  {
    9318  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9319  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9320  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9321  {
    9322  ++nextAlloc2ndIndex;
    9323  }
    9324 
    9325  // Found non-null allocation.
    9326  if(nextAlloc2ndIndex < suballoc2ndCount)
    9327  {
    9328  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9329 
    9330  // 1. Process free space before this allocation.
    9331  if(lastOffset < suballoc.offset)
    9332  {
    9333  // There is free space from lastOffset to suballoc.offset.
    9334  ++unusedRangeCount;
    9335  }
    9336 
    9337  // 2. Process this allocation.
    9338  // There is allocation with suballoc.offset, suballoc.size.
    9339  ++alloc2ndCount;
    9340  usedBytes += suballoc.size;
    9341 
    9342  // 3. Prepare for next iteration.
    9343  lastOffset = suballoc.offset + suballoc.size;
    9344  ++nextAlloc2ndIndex;
    9345  }
    9346  // We are at the end.
    9347  else
    9348  {
    9349  if(lastOffset < freeSpace2ndTo1stEnd)
    9350  {
    9351  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9352  ++unusedRangeCount;
    9353  }
    9354 
    9355  // End of loop.
    9356  lastOffset = freeSpace2ndTo1stEnd;
    9357  }
    9358  }
    9359  }
    9360 
    9361  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9362  size_t alloc1stCount = 0;
    9363  const VkDeviceSize freeSpace1stTo2ndEnd =
    9364  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9365  while(lastOffset < freeSpace1stTo2ndEnd)
    9366  {
    9367  // Find next non-null allocation or move nextAllocIndex to the end.
    9368  while(nextAlloc1stIndex < suballoc1stCount &&
    9369  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9370  {
    9371  ++nextAlloc1stIndex;
    9372  }
    9373 
    9374  // Found non-null allocation.
    9375  if(nextAlloc1stIndex < suballoc1stCount)
    9376  {
    9377  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9378 
    9379  // 1. Process free space before this allocation.
    9380  if(lastOffset < suballoc.offset)
    9381  {
    9382  // There is free space from lastOffset to suballoc.offset.
    9383  ++unusedRangeCount;
    9384  }
    9385 
    9386  // 2. Process this allocation.
    9387  // There is allocation with suballoc.offset, suballoc.size.
    9388  ++alloc1stCount;
    9389  usedBytes += suballoc.size;
    9390 
    9391  // 3. Prepare for next iteration.
    9392  lastOffset = suballoc.offset + suballoc.size;
    9393  ++nextAlloc1stIndex;
    9394  }
    9395  // We are at the end.
    9396  else
    9397  {
    9398  if(lastOffset < size)
    9399  {
    9400  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9401  ++unusedRangeCount;
    9402  }
    9403 
    9404  // End of loop.
    9405  lastOffset = freeSpace1stTo2ndEnd;
    9406  }
    9407  }
    9408 
    9409  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9410  {
    9411  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9412  while(lastOffset < size)
    9413  {
    9414  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9415  while(nextAlloc2ndIndex != SIZE_MAX &&
    9416  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9417  {
    9418  --nextAlloc2ndIndex;
    9419  }
    9420 
    9421  // Found non-null allocation.
    9422  if(nextAlloc2ndIndex != SIZE_MAX)
    9423  {
    9424  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9425 
    9426  // 1. Process free space before this allocation.
    9427  if(lastOffset < suballoc.offset)
    9428  {
    9429  // There is free space from lastOffset to suballoc.offset.
    9430  ++unusedRangeCount;
    9431  }
    9432 
    9433  // 2. Process this allocation.
    9434  // There is allocation with suballoc.offset, suballoc.size.
    9435  ++alloc2ndCount;
    9436  usedBytes += suballoc.size;
    9437 
    9438  // 3. Prepare for next iteration.
    9439  lastOffset = suballoc.offset + suballoc.size;
    9440  --nextAlloc2ndIndex;
    9441  }
    9442  // We are at the end.
    9443  else
    9444  {
    9445  if(lastOffset < size)
    9446  {
    9447  // There is free space from lastOffset to size.
    9448  ++unusedRangeCount;
    9449  }
    9450 
    9451  // End of loop.
    9452  lastOffset = size;
    9453  }
    9454  }
    9455  }
    9456 
    9457  const VkDeviceSize unusedBytes = size - usedBytes;
    9458  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9459 
    9460  // SECOND PASS
    9461  lastOffset = 0;
    9462 
    9463  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9464  {
    9465  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9466  size_t nextAlloc2ndIndex = 0;
    9467  while(lastOffset < freeSpace2ndTo1stEnd)
    9468  {
    9469  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9470  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9471  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9472  {
    9473  ++nextAlloc2ndIndex;
    9474  }
    9475 
    9476  // Found non-null allocation.
    9477  if(nextAlloc2ndIndex < suballoc2ndCount)
    9478  {
    9479  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9480 
    9481  // 1. Process free space before this allocation.
    9482  if(lastOffset < suballoc.offset)
    9483  {
    9484  // There is free space from lastOffset to suballoc.offset.
    9485  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9486  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9487  }
    9488 
    9489  // 2. Process this allocation.
    9490  // There is allocation with suballoc.offset, suballoc.size.
    9491  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9492 
    9493  // 3. Prepare for next iteration.
    9494  lastOffset = suballoc.offset + suballoc.size;
    9495  ++nextAlloc2ndIndex;
    9496  }
    9497  // We are at the end.
    9498  else
    9499  {
    9500  if(lastOffset < freeSpace2ndTo1stEnd)
    9501  {
    9502  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9503  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9504  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9505  }
    9506 
    9507  // End of loop.
    9508  lastOffset = freeSpace2ndTo1stEnd;
    9509  }
    9510  }
    9511  }
    9512 
    9513  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9514  while(lastOffset < freeSpace1stTo2ndEnd)
    9515  {
    9516  // Find next non-null allocation or move nextAllocIndex to the end.
    9517  while(nextAlloc1stIndex < suballoc1stCount &&
    9518  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9519  {
    9520  ++nextAlloc1stIndex;
    9521  }
    9522 
    9523  // Found non-null allocation.
    9524  if(nextAlloc1stIndex < suballoc1stCount)
    9525  {
    9526  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9527 
    9528  // 1. Process free space before this allocation.
    9529  if(lastOffset < suballoc.offset)
    9530  {
    9531  // There is free space from lastOffset to suballoc.offset.
    9532  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9533  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9534  }
    9535 
    9536  // 2. Process this allocation.
    9537  // There is allocation with suballoc.offset, suballoc.size.
    9538  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9539 
    9540  // 3. Prepare for next iteration.
    9541  lastOffset = suballoc.offset + suballoc.size;
    9542  ++nextAlloc1stIndex;
    9543  }
    9544  // We are at the end.
    9545  else
    9546  {
    9547  if(lastOffset < freeSpace1stTo2ndEnd)
    9548  {
    9549  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9550  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9551  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9552  }
    9553 
    9554  // End of loop.
    9555  lastOffset = freeSpace1stTo2ndEnd;
    9556  }
    9557  }
    9558 
    9559  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9560  {
    9561  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9562  while(lastOffset < size)
    9563  {
    9564  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9565  while(nextAlloc2ndIndex != SIZE_MAX &&
    9566  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9567  {
    9568  --nextAlloc2ndIndex;
    9569  }
    9570 
    9571  // Found non-null allocation.
    9572  if(nextAlloc2ndIndex != SIZE_MAX)
    9573  {
    9574  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9575 
    9576  // 1. Process free space before this allocation.
    9577  if(lastOffset < suballoc.offset)
    9578  {
    9579  // There is free space from lastOffset to suballoc.offset.
    9580  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9581  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9582  }
    9583 
    9584  // 2. Process this allocation.
    9585  // There is allocation with suballoc.offset, suballoc.size.
    9586  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9587 
    9588  // 3. Prepare for next iteration.
    9589  lastOffset = suballoc.offset + suballoc.size;
    9590  --nextAlloc2ndIndex;
    9591  }
    9592  // We are at the end.
    9593  else
    9594  {
    9595  if(lastOffset < size)
    9596  {
    9597  // There is free space from lastOffset to size.
    9598  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9599  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9600  }
    9601 
    9602  // End of loop.
    9603  lastOffset = size;
    9604  }
    9605  }
    9606  }
    9607 
    9608  PrintDetailedMap_End(json);
    9609 }
    9610 #endif // #if VMA_STATS_STRING_ENABLED
    9611 
    9612 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9613  uint32_t currentFrameIndex,
    9614  uint32_t frameInUseCount,
    9615  VkDeviceSize bufferImageGranularity,
    9616  VkDeviceSize allocSize,
    9617  VkDeviceSize allocAlignment,
    9618  bool upperAddress,
    9619  VmaSuballocationType allocType,
    9620  bool canMakeOtherLost,
    9621  uint32_t strategy,
    9622  VmaAllocationRequest* pAllocationRequest)
    9623 {
    9624  VMA_ASSERT(allocSize > 0);
    9625  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9626  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9627  VMA_HEAVY_ASSERT(Validate());
    9628  return upperAddress ?
    9629  CreateAllocationRequest_UpperAddress(
    9630  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9631  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9632  CreateAllocationRequest_LowerAddress(
    9633  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9634  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9635 }
    9636 
    9637 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9638  uint32_t currentFrameIndex,
    9639  uint32_t frameInUseCount,
    9640  VkDeviceSize bufferImageGranularity,
    9641  VkDeviceSize allocSize,
    9642  VkDeviceSize allocAlignment,
    9643  VmaSuballocationType allocType,
    9644  bool canMakeOtherLost,
    9645  uint32_t strategy,
    9646  VmaAllocationRequest* pAllocationRequest)
    9647 {
    9648  const VkDeviceSize size = GetSize();
    9649  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9650  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9651 
    9652  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9653  {
    9654  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9655  return false;
    9656  }
    9657 
    9658  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9659  if(allocSize > size)
    9660  {
    9661  return false;
    9662  }
    9663  VkDeviceSize resultBaseOffset = size - allocSize;
    9664  if(!suballocations2nd.empty())
    9665  {
    9666  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9667  resultBaseOffset = lastSuballoc.offset - allocSize;
    9668  if(allocSize > lastSuballoc.offset)
    9669  {
    9670  return false;
    9671  }
    9672  }
    9673 
    9674  // Start from offset equal to end of free space.
    9675  VkDeviceSize resultOffset = resultBaseOffset;
    9676 
    9677  // Apply VMA_DEBUG_MARGIN at the end.
    9678  if(VMA_DEBUG_MARGIN > 0)
    9679  {
    9680  if(resultOffset < VMA_DEBUG_MARGIN)
    9681  {
    9682  return false;
    9683  }
    9684  resultOffset -= VMA_DEBUG_MARGIN;
    9685  }
    9686 
    9687  // Apply alignment.
    9688  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9689 
    9690  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9691  // Make bigger alignment if necessary.
    9692  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9693  {
    9694  bool bufferImageGranularityConflict = false;
    9695  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9696  {
    9697  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9698  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9699  {
    9700  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9701  {
    9702  bufferImageGranularityConflict = true;
    9703  break;
    9704  }
    9705  }
    9706  else
    9707  // Already on previous page.
    9708  break;
    9709  }
    9710  if(bufferImageGranularityConflict)
    9711  {
    9712  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9713  }
    9714  }
    9715 
    9716  // There is enough free space.
    9717  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9718  suballocations1st.back().offset + suballocations1st.back().size :
    9719  0;
    9720  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9721  {
    9722  // Check previous suballocations for BufferImageGranularity conflicts.
    9723  // If conflict exists, allocation cannot be made here.
    9724  if(bufferImageGranularity > 1)
    9725  {
    9726  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9727  {
    9728  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9729  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9730  {
    9731  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9732  {
    9733  return false;
    9734  }
    9735  }
    9736  else
    9737  {
    9738  // Already on next page.
    9739  break;
    9740  }
    9741  }
    9742  }
    9743 
    9744  // All tests passed: Success.
    9745  pAllocationRequest->offset = resultOffset;
    9746  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9747  pAllocationRequest->sumItemSize = 0;
    9748  // pAllocationRequest->item unused.
    9749  pAllocationRequest->itemsToMakeLostCount = 0;
    9750  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9751  return true;
    9752  }
    9753 
    9754  return false;
    9755 }
    9756 
    9757 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9758  uint32_t currentFrameIndex,
    9759  uint32_t frameInUseCount,
    9760  VkDeviceSize bufferImageGranularity,
    9761  VkDeviceSize allocSize,
    9762  VkDeviceSize allocAlignment,
    9763  VmaSuballocationType allocType,
    9764  bool canMakeOtherLost,
    9765  uint32_t strategy,
    9766  VmaAllocationRequest* pAllocationRequest)
    9767 {
    9768  const VkDeviceSize size = GetSize();
    9769  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9770  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9771 
    9772  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9773  {
    9774  // Try to allocate at the end of 1st vector.
    9775 
    9776  VkDeviceSize resultBaseOffset = 0;
    9777  if(!suballocations1st.empty())
    9778  {
    9779  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9780  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9781  }
    9782 
    9783  // Start from offset equal to beginning of free space.
    9784  VkDeviceSize resultOffset = resultBaseOffset;
    9785 
    9786  // Apply VMA_DEBUG_MARGIN at the beginning.
    9787  if(VMA_DEBUG_MARGIN > 0)
    9788  {
    9789  resultOffset += VMA_DEBUG_MARGIN;
    9790  }
    9791 
    9792  // Apply alignment.
    9793  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9794 
    9795  // Check previous suballocations for BufferImageGranularity conflicts.
    9796  // Make bigger alignment if necessary.
    9797  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9798  {
    9799  bool bufferImageGranularityConflict = false;
    9800  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9801  {
    9802  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9803  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9804  {
    9805  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9806  {
    9807  bufferImageGranularityConflict = true;
    9808  break;
    9809  }
    9810  }
    9811  else
    9812  // Already on previous page.
    9813  break;
    9814  }
    9815  if(bufferImageGranularityConflict)
    9816  {
    9817  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9818  }
    9819  }
    9820 
    9821  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9822  suballocations2nd.back().offset : size;
    9823 
    9824  // There is enough free space at the end after alignment.
    9825  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9826  {
    9827  // Check next suballocations for BufferImageGranularity conflicts.
    9828  // If conflict exists, allocation cannot be made here.
    9829  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9830  {
    9831  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9832  {
    9833  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9834  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9835  {
    9836  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9837  {
    9838  return false;
    9839  }
    9840  }
    9841  else
    9842  {
    9843  // Already on previous page.
    9844  break;
    9845  }
    9846  }
    9847  }
    9848 
    9849  // All tests passed: Success.
    9850  pAllocationRequest->offset = resultOffset;
    9851  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9852  pAllocationRequest->sumItemSize = 0;
    9853  // pAllocationRequest->item, customData unused.
    9854  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9855  pAllocationRequest->itemsToMakeLostCount = 0;
    9856  return true;
    9857  }
    9858  }
    9859 
    9860  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9861  // beginning of 1st vector as the end of free space.
    9862  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9863  {
    9864  VMA_ASSERT(!suballocations1st.empty());
    9865 
    9866  VkDeviceSize resultBaseOffset = 0;
    9867  if(!suballocations2nd.empty())
    9868  {
    9869  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9870  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9871  }
    9872 
    9873  // Start from offset equal to beginning of free space.
    9874  VkDeviceSize resultOffset = resultBaseOffset;
    9875 
    9876  // Apply VMA_DEBUG_MARGIN at the beginning.
    9877  if(VMA_DEBUG_MARGIN > 0)
    9878  {
    9879  resultOffset += VMA_DEBUG_MARGIN;
    9880  }
    9881 
    9882  // Apply alignment.
    9883  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9884 
    9885  // Check previous suballocations for BufferImageGranularity conflicts.
    9886  // Make bigger alignment if necessary.
    9887  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9888  {
    9889  bool bufferImageGranularityConflict = false;
    9890  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9891  {
    9892  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9893  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9894  {
    9895  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9896  {
    9897  bufferImageGranularityConflict = true;
    9898  break;
    9899  }
    9900  }
    9901  else
    9902  // Already on previous page.
    9903  break;
    9904  }
    9905  if(bufferImageGranularityConflict)
    9906  {
    9907  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9908  }
    9909  }
    9910 
    9911  pAllocationRequest->itemsToMakeLostCount = 0;
    9912  pAllocationRequest->sumItemSize = 0;
    9913  size_t index1st = m_1stNullItemsBeginCount;
    9914 
    9915  if(canMakeOtherLost)
    9916  {
    9917  while(index1st < suballocations1st.size() &&
    9918  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    9919  {
    9920  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    9921  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9922  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    9923  {
    9924  // No problem.
    9925  }
    9926  else
    9927  {
    9928  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9929  if(suballoc.hAllocation->CanBecomeLost() &&
    9930  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9931  {
    9932  ++pAllocationRequest->itemsToMakeLostCount;
    9933  pAllocationRequest->sumItemSize += suballoc.size;
    9934  }
    9935  else
    9936  {
    9937  return false;
    9938  }
    9939  }
    9940  ++index1st;
    9941  }
    9942 
    9943  // Check next suballocations for BufferImageGranularity conflicts.
    9944  // If conflict exists, we must mark more allocations lost or fail.
    9945  if(bufferImageGranularity > 1)
    9946  {
    9947  while(index1st < suballocations1st.size())
    9948  {
    9949  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9950  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9951  {
    9952  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9953  {
    9954  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9955  if(suballoc.hAllocation->CanBecomeLost() &&
    9956  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9957  {
    9958  ++pAllocationRequest->itemsToMakeLostCount;
    9959  pAllocationRequest->sumItemSize += suballoc.size;
    9960  }
    9961  else
    9962  {
    9963  return false;
    9964  }
    9965  }
    9966  }
    9967  else
    9968  {
    9969  // Already on next page.
    9970  break;
    9971  }
    9972  ++index1st;
    9973  }
    9974  }
    9975 
    9976  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    9977  if(index1st == suballocations1st.size() &&
    9978  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    9979  {
    9980  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    9981  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    9982  }
    9983  }
    9984 
    9985  // There is enough free space at the end after alignment.
    9986  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    9987  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9988  {
    9989  // Check next suballocations for BufferImageGranularity conflicts.
    9990  // If conflict exists, allocation cannot be made here.
    9991  if(bufferImageGranularity > 1)
    9992  {
    9993  for(size_t nextSuballocIndex = index1st;
    9994  nextSuballocIndex < suballocations1st.size();
    9995  nextSuballocIndex++)
    9996  {
    9997  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9998  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9999  {
    10000  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10001  {
    10002  return false;
    10003  }
    10004  }
    10005  else
    10006  {
    10007  // Already on next page.
    10008  break;
    10009  }
    10010  }
    10011  }
    10012 
    10013  // All tests passed: Success.
    10014  pAllocationRequest->offset = resultOffset;
    10015  pAllocationRequest->sumFreeSize =
    10016  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10017  - resultBaseOffset
    10018  - pAllocationRequest->sumItemSize;
    10019  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10020  // pAllocationRequest->item, customData unused.
    10021  return true;
    10022  }
    10023  }
    10024 
    10025  return false;
    10026 }
    10027 
    10028 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10029  uint32_t currentFrameIndex,
    10030  uint32_t frameInUseCount,
    10031  VmaAllocationRequest* pAllocationRequest)
    10032 {
    10033  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10034  {
    10035  return true;
    10036  }
    10037 
    10038  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10039 
    10040  // We always start from 1st.
    10041  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10042  size_t index = m_1stNullItemsBeginCount;
    10043  size_t madeLostCount = 0;
    10044  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10045  {
    10046  if(index == suballocations->size())
    10047  {
    10048  index = 0;
    10049  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10050  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10051  {
    10052  suballocations = &AccessSuballocations2nd();
    10053  }
    10054  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10055  // suballocations continues pointing at AccessSuballocations1st().
    10056  VMA_ASSERT(!suballocations->empty());
    10057  }
    10058  VmaSuballocation& suballoc = (*suballocations)[index];
    10059  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10060  {
    10061  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10062  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10063  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10064  {
    10065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10066  suballoc.hAllocation = VK_NULL_HANDLE;
    10067  m_SumFreeSize += suballoc.size;
    10068  if(suballocations == &AccessSuballocations1st())
    10069  {
    10070  ++m_1stNullItemsMiddleCount;
    10071  }
    10072  else
    10073  {
    10074  ++m_2ndNullItemsCount;
    10075  }
    10076  ++madeLostCount;
    10077  }
    10078  else
    10079  {
    10080  return false;
    10081  }
    10082  }
    10083  ++index;
    10084  }
    10085 
    10086  CleanupAfterFree();
    10087  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10088 
    10089  return true;
    10090 }
    10091 
    10092 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10093 {
    10094  uint32_t lostAllocationCount = 0;
    10095 
    10096  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10097  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10098  {
    10099  VmaSuballocation& suballoc = suballocations1st[i];
    10100  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10101  suballoc.hAllocation->CanBecomeLost() &&
    10102  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10103  {
    10104  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10105  suballoc.hAllocation = VK_NULL_HANDLE;
    10106  ++m_1stNullItemsMiddleCount;
    10107  m_SumFreeSize += suballoc.size;
    10108  ++lostAllocationCount;
    10109  }
    10110  }
    10111 
    10112  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10113  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10114  {
    10115  VmaSuballocation& suballoc = suballocations2nd[i];
    10116  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10117  suballoc.hAllocation->CanBecomeLost() &&
    10118  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10119  {
    10120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10121  suballoc.hAllocation = VK_NULL_HANDLE;
    10122  ++m_2ndNullItemsCount;
    10123  m_SumFreeSize += suballoc.size;
    10124  ++lostAllocationCount;
    10125  }
    10126  }
    10127 
    10128  if(lostAllocationCount)
    10129  {
    10130  CleanupAfterFree();
    10131  }
    10132 
    10133  return lostAllocationCount;
    10134 }
    10135 
    10136 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10137 {
    10138  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10139  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10140  {
    10141  const VmaSuballocation& suballoc = suballocations1st[i];
    10142  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10143  {
    10144  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10145  {
    10146  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10147  return VK_ERROR_VALIDATION_FAILED_EXT;
    10148  }
    10149  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10150  {
    10151  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10152  return VK_ERROR_VALIDATION_FAILED_EXT;
    10153  }
    10154  }
    10155  }
    10156 
    10157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10159  {
    10160  const VmaSuballocation& suballoc = suballocations2nd[i];
    10161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10162  {
    10163  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10164  {
    10165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10166  return VK_ERROR_VALIDATION_FAILED_EXT;
    10167  }
    10168  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10169  {
    10170  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10171  return VK_ERROR_VALIDATION_FAILED_EXT;
    10172  }
    10173  }
    10174  }
    10175 
    10176  return VK_SUCCESS;
    10177 }
    10178 
    10179 void VmaBlockMetadata_Linear::Alloc(
    10180  const VmaAllocationRequest& request,
    10181  VmaSuballocationType type,
    10182  VkDeviceSize allocSize,
    10183  VmaAllocation hAllocation)
    10184 {
    10185  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10186 
    10187  switch(request.type)
    10188  {
    10189  case VmaAllocationRequestType::UpperAddress:
    10190  {
    10191  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10192  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10193  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10194  suballocations2nd.push_back(newSuballoc);
    10195  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10196  }
    10197  break;
    10198  case VmaAllocationRequestType::EndOf1st:
    10199  {
    10200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10201 
    10202  VMA_ASSERT(suballocations1st.empty() ||
    10203  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10204  // Check if it fits before the end of the block.
    10205  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10206 
    10207  suballocations1st.push_back(newSuballoc);
    10208  }
    10209  break;
    10210  case VmaAllocationRequestType::EndOf2nd:
    10211  {
    10212  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10213  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10214  VMA_ASSERT(!suballocations1st.empty() &&
    10215  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10216  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10217 
    10218  switch(m_2ndVectorMode)
    10219  {
    10220  case SECOND_VECTOR_EMPTY:
    10221  // First allocation from second part ring buffer.
    10222  VMA_ASSERT(suballocations2nd.empty());
    10223  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10224  break;
    10225  case SECOND_VECTOR_RING_BUFFER:
    10226  // 2-part ring buffer is already started.
    10227  VMA_ASSERT(!suballocations2nd.empty());
    10228  break;
    10229  case SECOND_VECTOR_DOUBLE_STACK:
    10230  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10231  break;
    10232  default:
    10233  VMA_ASSERT(0);
    10234  }
    10235 
    10236  suballocations2nd.push_back(newSuballoc);
    10237  }
    10238  break;
    10239  default:
    10240  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10241  }
    10242 
    10243  m_SumFreeSize -= newSuballoc.size;
    10244 }
    10245 
    10246 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10247 {
    10248  FreeAtOffset(allocation->GetOffset());
    10249 }
    10250 
    10251 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10252 {
    10253  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10254  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10255 
    10256  if(!suballocations1st.empty())
    10257  {
    10258  // First allocation: Mark it as next empty at the beginning.
    10259  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10260  if(firstSuballoc.offset == offset)
    10261  {
    10262  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10263  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10264  m_SumFreeSize += firstSuballoc.size;
    10265  ++m_1stNullItemsBeginCount;
    10266  CleanupAfterFree();
    10267  return;
    10268  }
    10269  }
    10270 
    10271  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10272  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10273  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10274  {
    10275  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10276  if(lastSuballoc.offset == offset)
    10277  {
    10278  m_SumFreeSize += lastSuballoc.size;
    10279  suballocations2nd.pop_back();
    10280  CleanupAfterFree();
    10281  return;
    10282  }
    10283  }
    10284  // Last allocation in 1st vector.
    10285  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10286  {
    10287  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10288  if(lastSuballoc.offset == offset)
    10289  {
    10290  m_SumFreeSize += lastSuballoc.size;
    10291  suballocations1st.pop_back();
    10292  CleanupAfterFree();
    10293  return;
    10294  }
    10295  }
    10296 
    10297  // Item from the middle of 1st vector.
    10298  {
    10299  VmaSuballocation refSuballoc;
    10300  refSuballoc.offset = offset;
    10301  // Rest of members stays uninitialized intentionally for better performance.
    10302  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10303  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10304  suballocations1st.end(),
    10305  refSuballoc,
    10306  VmaSuballocationOffsetLess());
    10307  if(it != suballocations1st.end())
    10308  {
    10309  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10310  it->hAllocation = VK_NULL_HANDLE;
    10311  ++m_1stNullItemsMiddleCount;
    10312  m_SumFreeSize += it->size;
    10313  CleanupAfterFree();
    10314  return;
    10315  }
    10316  }
    10317 
    10318  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10319  {
    10320  // Item from the middle of 2nd vector.
    10321  VmaSuballocation refSuballoc;
    10322  refSuballoc.offset = offset;
    10323  // Rest of members stays uninitialized intentionally for better performance.
    10324  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10325  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10326  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10327  if(it != suballocations2nd.end())
    10328  {
    10329  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10330  it->hAllocation = VK_NULL_HANDLE;
    10331  ++m_2ndNullItemsCount;
    10332  m_SumFreeSize += it->size;
    10333  CleanupAfterFree();
    10334  return;
    10335  }
    10336  }
    10337 
    10338  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10339 }
    10340 
    10341 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10342 {
    10343  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10344  const size_t suballocCount = AccessSuballocations1st().size();
    10345  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10346 }
    10347 
    10348 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10349 {
    10350  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10351  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10352 
    10353  if(IsEmpty())
    10354  {
    10355  suballocations1st.clear();
    10356  suballocations2nd.clear();
    10357  m_1stNullItemsBeginCount = 0;
    10358  m_1stNullItemsMiddleCount = 0;
    10359  m_2ndNullItemsCount = 0;
    10360  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10361  }
    10362  else
    10363  {
    10364  const size_t suballoc1stCount = suballocations1st.size();
    10365  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10366  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10367 
    10368  // Find more null items at the beginning of 1st vector.
    10369  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10370  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10371  {
    10372  ++m_1stNullItemsBeginCount;
    10373  --m_1stNullItemsMiddleCount;
    10374  }
    10375 
    10376  // Find more null items at the end of 1st vector.
    10377  while(m_1stNullItemsMiddleCount > 0 &&
    10378  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10379  {
    10380  --m_1stNullItemsMiddleCount;
    10381  suballocations1st.pop_back();
    10382  }
    10383 
    10384  // Find more null items at the end of 2nd vector.
    10385  while(m_2ndNullItemsCount > 0 &&
    10386  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10387  {
    10388  --m_2ndNullItemsCount;
    10389  suballocations2nd.pop_back();
    10390  }
    10391 
    10392  // Find more null items at the beginning of 2nd vector.
    10393  while(m_2ndNullItemsCount > 0 &&
    10394  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10395  {
    10396  --m_2ndNullItemsCount;
    10397  VmaVectorRemove(suballocations2nd, 0);
    10398  }
    10399 
    10400  if(ShouldCompact1st())
    10401  {
    10402  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10403  size_t srcIndex = m_1stNullItemsBeginCount;
    10404  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10405  {
    10406  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10407  {
    10408  ++srcIndex;
    10409  }
    10410  if(dstIndex != srcIndex)
    10411  {
    10412  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10413  }
    10414  ++srcIndex;
    10415  }
    10416  suballocations1st.resize(nonNullItemCount);
    10417  m_1stNullItemsBeginCount = 0;
    10418  m_1stNullItemsMiddleCount = 0;
    10419  }
    10420 
    10421  // 2nd vector became empty.
    10422  if(suballocations2nd.empty())
    10423  {
    10424  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10425  }
    10426 
    10427  // 1st vector became empty.
    10428  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10429  {
    10430  suballocations1st.clear();
    10431  m_1stNullItemsBeginCount = 0;
    10432 
    10433  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10434  {
    10435  // Swap 1st with 2nd. Now 2nd is empty.
    10436  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10437  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10438  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10439  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10440  {
    10441  ++m_1stNullItemsBeginCount;
    10442  --m_1stNullItemsMiddleCount;
    10443  }
    10444  m_2ndNullItemsCount = 0;
    10445  m_1stVectorIndex ^= 1;
    10446  }
    10447  }
    10448  }
    10449 
    10450  VMA_HEAVY_ASSERT(Validate());
    10451 }
    10452 
    10453 
    10455 // class VmaBlockMetadata_Buddy
    10456 
    10457 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10458  VmaBlockMetadata(hAllocator),
    10459  m_Root(VMA_NULL),
    10460  m_AllocationCount(0),
    10461  m_FreeCount(1),
    10462  m_SumFreeSize(0)
    10463 {
    10464  memset(m_FreeList, 0, sizeof(m_FreeList));
    10465 }
    10466 
    10467 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10468 {
    10469  DeleteNode(m_Root);
    10470 }
    10471 
    10472 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10473 {
    10474  VmaBlockMetadata::Init(size);
    10475 
    10476  m_UsableSize = VmaPrevPow2(size);
    10477  m_SumFreeSize = m_UsableSize;
    10478 
    10479  // Calculate m_LevelCount.
    10480  m_LevelCount = 1;
    10481  while(m_LevelCount < MAX_LEVELS &&
    10482  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10483  {
    10484  ++m_LevelCount;
    10485  }
    10486 
    10487  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10488  rootNode->offset = 0;
    10489  rootNode->type = Node::TYPE_FREE;
    10490  rootNode->parent = VMA_NULL;
    10491  rootNode->buddy = VMA_NULL;
    10492 
    10493  m_Root = rootNode;
    10494  AddToFreeListFront(0, rootNode);
    10495 }
    10496 
    10497 bool VmaBlockMetadata_Buddy::Validate() const
    10498 {
    10499  // Validate tree.
    10500  ValidationContext ctx;
    10501  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10502  {
    10503  VMA_VALIDATE(false && "ValidateNode failed.");
    10504  }
    10505  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10506  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10507 
    10508  // Validate free node lists.
    10509  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10510  {
    10511  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10512  m_FreeList[level].front->free.prev == VMA_NULL);
    10513 
    10514  for(Node* node = m_FreeList[level].front;
    10515  node != VMA_NULL;
    10516  node = node->free.next)
    10517  {
    10518  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10519 
    10520  if(node->free.next == VMA_NULL)
    10521  {
    10522  VMA_VALIDATE(m_FreeList[level].back == node);
    10523  }
    10524  else
    10525  {
    10526  VMA_VALIDATE(node->free.next->free.prev == node);
    10527  }
    10528  }
    10529  }
    10530 
    10531  // Validate that free lists ar higher levels are empty.
    10532  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10533  {
    10534  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10535  }
    10536 
    10537  return true;
    10538 }
    10539 
    10540 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10541 {
    10542  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10543  {
    10544  if(m_FreeList[level].front != VMA_NULL)
    10545  {
    10546  return LevelToNodeSize(level);
    10547  }
    10548  }
    10549  return 0;
    10550 }
    10551 
    10552 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10553 {
    10554  const VkDeviceSize unusableSize = GetUnusableSize();
    10555 
    10556  outInfo.blockCount = 1;
    10557 
    10558  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10559  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10560 
    10561  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10562  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10563  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10564 
    10565  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10566 
    10567  if(unusableSize > 0)
    10568  {
    10569  ++outInfo.unusedRangeCount;
    10570  outInfo.unusedBytes += unusableSize;
    10571  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10572  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10573  }
    10574 }
    10575 
    10576 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10577 {
    10578  const VkDeviceSize unusableSize = GetUnusableSize();
    10579 
    10580  inoutStats.size += GetSize();
    10581  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10582  inoutStats.allocationCount += m_AllocationCount;
    10583  inoutStats.unusedRangeCount += m_FreeCount;
    10584  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10585 
    10586  if(unusableSize > 0)
    10587  {
    10588  ++inoutStats.unusedRangeCount;
    10589  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10590  }
    10591 }
    10592 
    10593 #if VMA_STATS_STRING_ENABLED
    10594 
    10595 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10596 {
    10597  // TODO optimize
    10598  VmaStatInfo stat;
    10599  CalcAllocationStatInfo(stat);
    10600 
    10601  PrintDetailedMap_Begin(
    10602  json,
    10603  stat.unusedBytes,
    10604  stat.allocationCount,
    10605  stat.unusedRangeCount);
    10606 
    10607  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10608 
    10609  const VkDeviceSize unusableSize = GetUnusableSize();
    10610  if(unusableSize > 0)
    10611  {
    10612  PrintDetailedMap_UnusedRange(json,
    10613  m_UsableSize, // offset
    10614  unusableSize); // size
    10615  }
    10616 
    10617  PrintDetailedMap_End(json);
    10618 }
    10619 
    10620 #endif // #if VMA_STATS_STRING_ENABLED
    10621 
    10622 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10623  uint32_t currentFrameIndex,
    10624  uint32_t frameInUseCount,
    10625  VkDeviceSize bufferImageGranularity,
    10626  VkDeviceSize allocSize,
    10627  VkDeviceSize allocAlignment,
    10628  bool upperAddress,
    10629  VmaSuballocationType allocType,
    10630  bool canMakeOtherLost,
    10631  uint32_t strategy,
    10632  VmaAllocationRequest* pAllocationRequest)
    10633 {
    10634  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10635 
    10636  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10637  // Whenever it might be an OPTIMAL image...
    10638  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10639  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10640  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10641  {
    10642  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10643  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10644  }
    10645 
    10646  if(allocSize > m_UsableSize)
    10647  {
    10648  return false;
    10649  }
    10650 
    10651  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10652  for(uint32_t level = targetLevel + 1; level--; )
    10653  {
    10654  for(Node* freeNode = m_FreeList[level].front;
    10655  freeNode != VMA_NULL;
    10656  freeNode = freeNode->free.next)
    10657  {
    10658  if(freeNode->offset % allocAlignment == 0)
    10659  {
    10660  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10661  pAllocationRequest->offset = freeNode->offset;
    10662  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10663  pAllocationRequest->sumItemSize = 0;
    10664  pAllocationRequest->itemsToMakeLostCount = 0;
    10665  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10666  return true;
    10667  }
    10668  }
    10669  }
    10670 
    10671  return false;
    10672 }
    10673 
    10674 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10675  uint32_t currentFrameIndex,
    10676  uint32_t frameInUseCount,
    10677  VmaAllocationRequest* pAllocationRequest)
    10678 {
    10679  /*
    10680  Lost allocations are not supported in buddy allocator at the moment.
    10681  Support might be added in the future.
    10682  */
    10683  return pAllocationRequest->itemsToMakeLostCount == 0;
    10684 }
    10685 
    10686 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10687 {
    10688  /*
    10689  Lost allocations are not supported in buddy allocator at the moment.
    10690  Support might be added in the future.
    10691  */
    10692  return 0;
    10693 }
    10694 
    10695 void VmaBlockMetadata_Buddy::Alloc(
    10696  const VmaAllocationRequest& request,
    10697  VmaSuballocationType type,
    10698  VkDeviceSize allocSize,
    10699  VmaAllocation hAllocation)
    10700 {
    10701  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10702 
    10703  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10704  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10705 
    10706  Node* currNode = m_FreeList[currLevel].front;
    10707  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10708  while(currNode->offset != request.offset)
    10709  {
    10710  currNode = currNode->free.next;
    10711  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10712  }
    10713 
    10714  // Go down, splitting free nodes.
    10715  while(currLevel < targetLevel)
    10716  {
    10717  // currNode is already first free node at currLevel.
    10718  // Remove it from list of free nodes at this currLevel.
    10719  RemoveFromFreeList(currLevel, currNode);
    10720 
    10721  const uint32_t childrenLevel = currLevel + 1;
    10722 
    10723  // Create two free sub-nodes.
    10724  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10725  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10726 
    10727  leftChild->offset = currNode->offset;
    10728  leftChild->type = Node::TYPE_FREE;
    10729  leftChild->parent = currNode;
    10730  leftChild->buddy = rightChild;
    10731 
    10732  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10733  rightChild->type = Node::TYPE_FREE;
    10734  rightChild->parent = currNode;
    10735  rightChild->buddy = leftChild;
    10736 
    10737  // Convert current currNode to split type.
    10738  currNode->type = Node::TYPE_SPLIT;
    10739  currNode->split.leftChild = leftChild;
    10740 
    10741  // Add child nodes to free list. Order is important!
    10742  AddToFreeListFront(childrenLevel, rightChild);
    10743  AddToFreeListFront(childrenLevel, leftChild);
    10744 
    10745  ++m_FreeCount;
    10746  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10747  ++currLevel;
    10748  currNode = m_FreeList[currLevel].front;
    10749 
    10750  /*
    10751  We can be sure that currNode, as left child of node previously split,
    10752  also fullfills the alignment requirement.
    10753  */
    10754  }
    10755 
    10756  // Remove from free list.
    10757  VMA_ASSERT(currLevel == targetLevel &&
    10758  currNode != VMA_NULL &&
    10759  currNode->type == Node::TYPE_FREE);
    10760  RemoveFromFreeList(currLevel, currNode);
    10761 
    10762  // Convert to allocation node.
    10763  currNode->type = Node::TYPE_ALLOCATION;
    10764  currNode->allocation.alloc = hAllocation;
    10765 
    10766  ++m_AllocationCount;
    10767  --m_FreeCount;
    10768  m_SumFreeSize -= allocSize;
    10769 }
    10770 
    10771 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10772 {
    10773  if(node->type == Node::TYPE_SPLIT)
    10774  {
    10775  DeleteNode(node->split.leftChild->buddy);
    10776  DeleteNode(node->split.leftChild);
    10777  }
    10778 
    10779  vma_delete(GetAllocationCallbacks(), node);
    10780 }
    10781 
    10782 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10783 {
    10784  VMA_VALIDATE(level < m_LevelCount);
    10785  VMA_VALIDATE(curr->parent == parent);
    10786  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10787  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10788  switch(curr->type)
    10789  {
    10790  case Node::TYPE_FREE:
    10791  // curr->free.prev, next are validated separately.
    10792  ctx.calculatedSumFreeSize += levelNodeSize;
    10793  ++ctx.calculatedFreeCount;
    10794  break;
    10795  case Node::TYPE_ALLOCATION:
    10796  ++ctx.calculatedAllocationCount;
    10797  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10798  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10799  break;
    10800  case Node::TYPE_SPLIT:
    10801  {
    10802  const uint32_t childrenLevel = level + 1;
    10803  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10804  const Node* const leftChild = curr->split.leftChild;
    10805  VMA_VALIDATE(leftChild != VMA_NULL);
    10806  VMA_VALIDATE(leftChild->offset == curr->offset);
    10807  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10808  {
    10809  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10810  }
    10811  const Node* const rightChild = leftChild->buddy;
    10812  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10813  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10814  {
    10815  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10816  }
    10817  }
    10818  break;
    10819  default:
    10820  return false;
    10821  }
    10822 
    10823  return true;
    10824 }
    10825 
    10826 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10827 {
    10828  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10829  uint32_t level = 0;
    10830  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10831  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10832  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10833  {
    10834  ++level;
    10835  currLevelNodeSize = nextLevelNodeSize;
    10836  nextLevelNodeSize = currLevelNodeSize >> 1;
    10837  }
    10838  return level;
    10839 }
    10840 
    10841 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10842 {
    10843  // Find node and level.
    10844  Node* node = m_Root;
    10845  VkDeviceSize nodeOffset = 0;
    10846  uint32_t level = 0;
    10847  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10848  while(node->type == Node::TYPE_SPLIT)
    10849  {
    10850  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10851  if(offset < nodeOffset + nextLevelSize)
    10852  {
    10853  node = node->split.leftChild;
    10854  }
    10855  else
    10856  {
    10857  node = node->split.leftChild->buddy;
    10858  nodeOffset += nextLevelSize;
    10859  }
    10860  ++level;
    10861  levelNodeSize = nextLevelSize;
    10862  }
    10863 
    10864  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10865  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10866 
    10867  ++m_FreeCount;
    10868  --m_AllocationCount;
    10869  m_SumFreeSize += alloc->GetSize();
    10870 
    10871  node->type = Node::TYPE_FREE;
    10872 
    10873  // Join free nodes if possible.
    10874  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10875  {
    10876  RemoveFromFreeList(level, node->buddy);
    10877  Node* const parent = node->parent;
    10878 
    10879  vma_delete(GetAllocationCallbacks(), node->buddy);
    10880  vma_delete(GetAllocationCallbacks(), node);
    10881  parent->type = Node::TYPE_FREE;
    10882 
    10883  node = parent;
    10884  --level;
    10885  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10886  --m_FreeCount;
    10887  }
    10888 
    10889  AddToFreeListFront(level, node);
    10890 }
    10891 
    10892 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10893 {
    10894  switch(node->type)
    10895  {
    10896  case Node::TYPE_FREE:
    10897  ++outInfo.unusedRangeCount;
    10898  outInfo.unusedBytes += levelNodeSize;
    10899  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10900  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10901  break;
    10902  case Node::TYPE_ALLOCATION:
    10903  {
    10904  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10905  ++outInfo.allocationCount;
    10906  outInfo.usedBytes += allocSize;
    10907  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10908  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10909 
    10910  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10911  if(unusedRangeSize > 0)
    10912  {
    10913  ++outInfo.unusedRangeCount;
    10914  outInfo.unusedBytes += unusedRangeSize;
    10915  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    10916  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    10917  }
    10918  }
    10919  break;
    10920  case Node::TYPE_SPLIT:
    10921  {
    10922  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10923  const Node* const leftChild = node->split.leftChild;
    10924  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    10925  const Node* const rightChild = leftChild->buddy;
    10926  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    10927  }
    10928  break;
    10929  default:
    10930  VMA_ASSERT(0);
    10931  }
    10932 }
    10933 
    10934 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    10935 {
    10936  VMA_ASSERT(node->type == Node::TYPE_FREE);
    10937 
    10938  // List is empty.
    10939  Node* const frontNode = m_FreeList[level].front;
    10940  if(frontNode == VMA_NULL)
    10941  {
    10942  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    10943  node->free.prev = node->free.next = VMA_NULL;
    10944  m_FreeList[level].front = m_FreeList[level].back = node;
    10945  }
    10946  else
    10947  {
    10948  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    10949  node->free.prev = VMA_NULL;
    10950  node->free.next = frontNode;
    10951  frontNode->free.prev = node;
    10952  m_FreeList[level].front = node;
    10953  }
    10954 }
    10955 
    10956 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    10957 {
    10958  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    10959 
    10960  // It is at the front.
    10961  if(node->free.prev == VMA_NULL)
    10962  {
    10963  VMA_ASSERT(m_FreeList[level].front == node);
    10964  m_FreeList[level].front = node->free.next;
    10965  }
    10966  else
    10967  {
    10968  Node* const prevFreeNode = node->free.prev;
    10969  VMA_ASSERT(prevFreeNode->free.next == node);
    10970  prevFreeNode->free.next = node->free.next;
    10971  }
    10972 
    10973  // It is at the back.
    10974  if(node->free.next == VMA_NULL)
    10975  {
    10976  VMA_ASSERT(m_FreeList[level].back == node);
    10977  m_FreeList[level].back = node->free.prev;
    10978  }
    10979  else
    10980  {
    10981  Node* const nextFreeNode = node->free.next;
    10982  VMA_ASSERT(nextFreeNode->free.prev == node);
    10983  nextFreeNode->free.prev = node->free.prev;
    10984  }
    10985 }
    10986 
    10987 #if VMA_STATS_STRING_ENABLED
    10988 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10989 {
    10990  switch(node->type)
    10991  {
    10992  case Node::TYPE_FREE:
    10993  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10994  break;
    10995  case Node::TYPE_ALLOCATION:
    10996  {
    10997  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10998  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10999  if(allocSize < levelNodeSize)
    11000  {
    11001  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11002  }
    11003  }
    11004  break;
    11005  case Node::TYPE_SPLIT:
    11006  {
    11007  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11008  const Node* const leftChild = node->split.leftChild;
    11009  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11010  const Node* const rightChild = leftChild->buddy;
    11011  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11012  }
    11013  break;
    11014  default:
    11015  VMA_ASSERT(0);
    11016  }
    11017 }
    11018 #endif // #if VMA_STATS_STRING_ENABLED
    11019 
    11020 
    11022 // class VmaDeviceMemoryBlock
    11023 
    11024 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11025  m_pMetadata(VMA_NULL),
    11026  m_MemoryTypeIndex(UINT32_MAX),
    11027  m_Id(0),
    11028  m_hMemory(VK_NULL_HANDLE),
    11029  m_MapCount(0),
    11030  m_pMappedData(VMA_NULL)
    11031 {
    11032 }
    11033 
    11034 void VmaDeviceMemoryBlock::Init(
    11035  VmaAllocator hAllocator,
    11036  VmaPool hParentPool,
    11037  uint32_t newMemoryTypeIndex,
    11038  VkDeviceMemory newMemory,
    11039  VkDeviceSize newSize,
    11040  uint32_t id,
    11041  uint32_t algorithm)
    11042 {
    11043  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11044 
    11045  m_hParentPool = hParentPool;
    11046  m_MemoryTypeIndex = newMemoryTypeIndex;
    11047  m_Id = id;
    11048  m_hMemory = newMemory;
    11049 
    11050  switch(algorithm)
    11051  {
    11053  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11054  break;
    11056  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11057  break;
    11058  default:
    11059  VMA_ASSERT(0);
    11060  // Fall-through.
    11061  case 0:
    11062  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11063  }
    11064  m_pMetadata->Init(newSize);
    11065 }
    11066 
    11067 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11068 {
    11069  // This is the most important assert in the entire library.
    11070  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11071  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11072 
    11073  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11074  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11075  m_hMemory = VK_NULL_HANDLE;
    11076 
    11077  vma_delete(allocator, m_pMetadata);
    11078  m_pMetadata = VMA_NULL;
    11079 }
    11080 
    11081 bool VmaDeviceMemoryBlock::Validate() const
    11082 {
    11083  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11084  (m_pMetadata->GetSize() != 0));
    11085 
    11086  return m_pMetadata->Validate();
    11087 }
    11088 
    11089 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11090 {
    11091  void* pData = nullptr;
    11092  VkResult res = Map(hAllocator, 1, &pData);
    11093  if(res != VK_SUCCESS)
    11094  {
    11095  return res;
    11096  }
    11097 
    11098  res = m_pMetadata->CheckCorruption(pData);
    11099 
    11100  Unmap(hAllocator, 1);
    11101 
    11102  return res;
    11103 }
    11104 
    11105 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11106 {
    11107  if(count == 0)
    11108  {
    11109  return VK_SUCCESS;
    11110  }
    11111 
    11112  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11113  if(m_MapCount != 0)
    11114  {
    11115  m_MapCount += count;
    11116  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11117  if(ppData != VMA_NULL)
    11118  {
    11119  *ppData = m_pMappedData;
    11120  }
    11121  return VK_SUCCESS;
    11122  }
    11123  else
    11124  {
    11125  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11126  hAllocator->m_hDevice,
    11127  m_hMemory,
    11128  0, // offset
    11129  VK_WHOLE_SIZE,
    11130  0, // flags
    11131  &m_pMappedData);
    11132  if(result == VK_SUCCESS)
    11133  {
    11134  if(ppData != VMA_NULL)
    11135  {
    11136  *ppData = m_pMappedData;
    11137  }
    11138  m_MapCount = count;
    11139  }
    11140  return result;
    11141  }
    11142 }
    11143 
    11144 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11145 {
    11146  if(count == 0)
    11147  {
    11148  return;
    11149  }
    11150 
    11151  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11152  if(m_MapCount >= count)
    11153  {
    11154  m_MapCount -= count;
    11155  if(m_MapCount == 0)
    11156  {
    11157  m_pMappedData = VMA_NULL;
    11158  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11159  }
    11160  }
    11161  else
    11162  {
    11163  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11164  }
    11165 }
    11166 
    11167 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11168 {
    11169  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11170  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11171 
    11172  void* pData;
    11173  VkResult res = Map(hAllocator, 1, &pData);
    11174  if(res != VK_SUCCESS)
    11175  {
    11176  return res;
    11177  }
    11178 
    11179  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11180  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11181 
    11182  Unmap(hAllocator, 1);
    11183 
    11184  return VK_SUCCESS;
    11185 }
    11186 
    11187 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11188 {
    11189  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11190  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11191 
    11192  void* pData;
    11193  VkResult res = Map(hAllocator, 1, &pData);
    11194  if(res != VK_SUCCESS)
    11195  {
    11196  return res;
    11197  }
    11198 
    11199  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11200  {
    11201  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11202  }
    11203  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11204  {
    11205  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11206  }
    11207 
    11208  Unmap(hAllocator, 1);
    11209 
    11210  return VK_SUCCESS;
    11211 }
    11212 
    11213 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11214  const VmaAllocator hAllocator,
    11215  const VmaAllocation hAllocation,
    11216  VkBuffer hBuffer)
    11217 {
    11218  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11219  hAllocation->GetBlock() == this);
    11220  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11221  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11222  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11223  hAllocator->m_hDevice,
    11224  hBuffer,
    11225  m_hMemory,
    11226  hAllocation->GetOffset());
    11227 }
    11228 
    11229 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11230  const VmaAllocator hAllocator,
    11231  const VmaAllocation hAllocation,
    11232  VkImage hImage)
    11233 {
    11234  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11235  hAllocation->GetBlock() == this);
    11236  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11237  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11238  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11239  hAllocator->m_hDevice,
    11240  hImage,
    11241  m_hMemory,
    11242  hAllocation->GetOffset());
    11243 }
    11244 
    11245 static void InitStatInfo(VmaStatInfo& outInfo)
    11246 {
    11247  memset(&outInfo, 0, sizeof(outInfo));
    11248  outInfo.allocationSizeMin = UINT64_MAX;
    11249  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11250 }
    11251 
    11252 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11253 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11254 {
    11255  inoutInfo.blockCount += srcInfo.blockCount;
    11256  inoutInfo.allocationCount += srcInfo.allocationCount;
    11257  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11258  inoutInfo.usedBytes += srcInfo.usedBytes;
    11259  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11260  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11261  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11262  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11263  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11264 }
    11265 
    11266 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11267 {
    11268  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11269  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11270  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11271  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11272 }
    11273 
    11274 VmaPool_T::VmaPool_T(
    11275  VmaAllocator hAllocator,
    11276  const VmaPoolCreateInfo& createInfo,
    11277  VkDeviceSize preferredBlockSize) :
    11278  m_BlockVector(
    11279  hAllocator,
    11280  this, // hParentPool
    11281  createInfo.memoryTypeIndex,
    11282  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11283  createInfo.minBlockCount,
    11284  createInfo.maxBlockCount,
    11285  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11286  createInfo.frameInUseCount,
    11287  true, // isCustomPool
    11288  createInfo.blockSize != 0, // explicitBlockSize
    11289  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11290  m_Id(0)
    11291 {
    11292 }
    11293 
    11294 VmaPool_T::~VmaPool_T()
    11295 {
    11296 }
    11297 
    11298 #if VMA_STATS_STRING_ENABLED
    11299 
    11300 #endif // #if VMA_STATS_STRING_ENABLED
    11301 
    11302 VmaBlockVector::VmaBlockVector(
    11303  VmaAllocator hAllocator,
    11304  VmaPool hParentPool,
    11305  uint32_t memoryTypeIndex,
    11306  VkDeviceSize preferredBlockSize,
    11307  size_t minBlockCount,
    11308  size_t maxBlockCount,
    11309  VkDeviceSize bufferImageGranularity,
    11310  uint32_t frameInUseCount,
    11311  bool isCustomPool,
    11312  bool explicitBlockSize,
    11313  uint32_t algorithm) :
    11314  m_hAllocator(hAllocator),
    11315  m_hParentPool(hParentPool),
    11316  m_MemoryTypeIndex(memoryTypeIndex),
    11317  m_PreferredBlockSize(preferredBlockSize),
    11318  m_MinBlockCount(minBlockCount),
    11319  m_MaxBlockCount(maxBlockCount),
    11320  m_BufferImageGranularity(bufferImageGranularity),
    11321  m_FrameInUseCount(frameInUseCount),
    11322  m_IsCustomPool(isCustomPool),
    11323  m_ExplicitBlockSize(explicitBlockSize),
    11324  m_Algorithm(algorithm),
    11325  m_HasEmptyBlock(false),
    11326  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11327  m_NextBlockId(0)
    11328 {
    11329 }
    11330 
    11331 VmaBlockVector::~VmaBlockVector()
    11332 {
    11333  for(size_t i = m_Blocks.size(); i--; )
    11334  {
    11335  m_Blocks[i]->Destroy(m_hAllocator);
    11336  vma_delete(m_hAllocator, m_Blocks[i]);
    11337  }
    11338 }
    11339 
    11340 VkResult VmaBlockVector::CreateMinBlocks()
    11341 {
    11342  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11343  {
    11344  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11345  if(res != VK_SUCCESS)
    11346  {
    11347  return res;
    11348  }
    11349  }
    11350  return VK_SUCCESS;
    11351 }
    11352 
    11353 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11354 {
    11355  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11356 
    11357  const size_t blockCount = m_Blocks.size();
    11358 
    11359  pStats->size = 0;
    11360  pStats->unusedSize = 0;
    11361  pStats->allocationCount = 0;
    11362  pStats->unusedRangeCount = 0;
    11363  pStats->unusedRangeSizeMax = 0;
    11364  pStats->blockCount = blockCount;
    11365 
    11366  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11367  {
    11368  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11369  VMA_ASSERT(pBlock);
    11370  VMA_HEAVY_ASSERT(pBlock->Validate());
    11371  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11372  }
    11373 }
    11374 
    11375 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11376 {
    11377  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11378  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11379  (VMA_DEBUG_MARGIN > 0) &&
    11380  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11381  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11382 }
    11383 
    11384 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11385 
    11386 VkResult VmaBlockVector::Allocate(
    11387  uint32_t currentFrameIndex,
    11388  VkDeviceSize size,
    11389  VkDeviceSize alignment,
    11390  const VmaAllocationCreateInfo& createInfo,
    11391  VmaSuballocationType suballocType,
    11392  size_t allocationCount,
    11393  VmaAllocation* pAllocations)
    11394 {
    11395  size_t allocIndex;
    11396  VkResult res = VK_SUCCESS;
    11397 
    11398  if(IsCorruptionDetectionEnabled())
    11399  {
    11400  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11401  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11402  }
    11403 
    11404  {
    11405  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11406  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11407  {
    11408  res = AllocatePage(
    11409  currentFrameIndex,
    11410  size,
    11411  alignment,
    11412  createInfo,
    11413  suballocType,
    11414  pAllocations + allocIndex);
    11415  if(res != VK_SUCCESS)
    11416  {
    11417  break;
    11418  }
    11419  }
    11420  }
    11421 
    11422  if(res != VK_SUCCESS)
    11423  {
    11424  // Free all already created allocations.
    11425  while(allocIndex--)
    11426  {
    11427  Free(pAllocations[allocIndex]);
    11428  }
    11429  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11430  }
    11431 
    11432  return res;
    11433 }
    11434 
    11435 VkResult VmaBlockVector::AllocatePage(
    11436  uint32_t currentFrameIndex,
    11437  VkDeviceSize size,
    11438  VkDeviceSize alignment,
    11439  const VmaAllocationCreateInfo& createInfo,
    11440  VmaSuballocationType suballocType,
    11441  VmaAllocation* pAllocation)
    11442 {
    11443  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11444  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11445  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11446  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11447  const bool canCreateNewBlock =
    11448  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11449  (m_Blocks.size() < m_MaxBlockCount);
    11450  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11451 
    11452  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11453  // Which in turn is available only when maxBlockCount = 1.
    11454  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11455  {
    11456  canMakeOtherLost = false;
    11457  }
    11458 
    11459  // Upper address can only be used with linear allocator and within single memory block.
    11460  if(isUpperAddress &&
    11461  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11462  {
    11463  return VK_ERROR_FEATURE_NOT_PRESENT;
    11464  }
    11465 
    11466  // Validate strategy.
    11467  switch(strategy)
    11468  {
    11469  case 0:
    11471  break;
    11475  break;
    11476  default:
    11477  return VK_ERROR_FEATURE_NOT_PRESENT;
    11478  }
    11479 
    11480  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11481  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11482  {
    11483  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11484  }
    11485 
    11486  /*
    11487  Under certain condition, this whole section can be skipped for optimization, so
    11488  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11489  e.g. for custom pools with linear algorithm.
    11490  */
    11491  if(!canMakeOtherLost || canCreateNewBlock)
    11492  {
    11493  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11494  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11496 
    11497  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11498  {
    11499  // Use only last block.
    11500  if(!m_Blocks.empty())
    11501  {
    11502  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11503  VMA_ASSERT(pCurrBlock);
    11504  VkResult res = AllocateFromBlock(
    11505  pCurrBlock,
    11506  currentFrameIndex,
    11507  size,
    11508  alignment,
    11509  allocFlagsCopy,
    11510  createInfo.pUserData,
    11511  suballocType,
    11512  strategy,
    11513  pAllocation);
    11514  if(res == VK_SUCCESS)
    11515  {
    11516  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11517  return VK_SUCCESS;
    11518  }
    11519  }
    11520  }
    11521  else
    11522  {
    11524  {
    11525  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11526  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11527  {
    11528  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11529  VMA_ASSERT(pCurrBlock);
    11530  VkResult res = AllocateFromBlock(
    11531  pCurrBlock,
    11532  currentFrameIndex,
    11533  size,
    11534  alignment,
    11535  allocFlagsCopy,
    11536  createInfo.pUserData,
    11537  suballocType,
    11538  strategy,
    11539  pAllocation);
    11540  if(res == VK_SUCCESS)
    11541  {
    11542  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11543  return VK_SUCCESS;
    11544  }
    11545  }
    11546  }
    11547  else // WORST_FIT, FIRST_FIT
    11548  {
    11549  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11550  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11551  {
    11552  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11553  VMA_ASSERT(pCurrBlock);
    11554  VkResult res = AllocateFromBlock(
    11555  pCurrBlock,
    11556  currentFrameIndex,
    11557  size,
    11558  alignment,
    11559  allocFlagsCopy,
    11560  createInfo.pUserData,
    11561  suballocType,
    11562  strategy,
    11563  pAllocation);
    11564  if(res == VK_SUCCESS)
    11565  {
    11566  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11567  return VK_SUCCESS;
    11568  }
    11569  }
    11570  }
    11571  }
    11572 
    11573  // 2. Try to create new block.
    11574  if(canCreateNewBlock)
    11575  {
    11576  // Calculate optimal size for new block.
    11577  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11578  uint32_t newBlockSizeShift = 0;
    11579  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11580 
    11581  if(!m_ExplicitBlockSize)
    11582  {
    11583  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11584  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11585  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11586  {
    11587  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11588  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11589  {
    11590  newBlockSize = smallerNewBlockSize;
    11591  ++newBlockSizeShift;
    11592  }
    11593  else
    11594  {
    11595  break;
    11596  }
    11597  }
    11598  }
    11599 
    11600  size_t newBlockIndex = 0;
    11601  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11602  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11603  if(!m_ExplicitBlockSize)
    11604  {
    11605  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11606  {
    11607  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11608  if(smallerNewBlockSize >= size)
    11609  {
    11610  newBlockSize = smallerNewBlockSize;
    11611  ++newBlockSizeShift;
    11612  res = CreateBlock(newBlockSize, &newBlockIndex);
    11613  }
    11614  else
    11615  {
    11616  break;
    11617  }
    11618  }
    11619  }
    11620 
    11621  if(res == VK_SUCCESS)
    11622  {
    11623  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11624  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11625 
    11626  res = AllocateFromBlock(
    11627  pBlock,
    11628  currentFrameIndex,
    11629  size,
    11630  alignment,
    11631  allocFlagsCopy,
    11632  createInfo.pUserData,
    11633  suballocType,
    11634  strategy,
    11635  pAllocation);
    11636  if(res == VK_SUCCESS)
    11637  {
    11638  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11639  return VK_SUCCESS;
    11640  }
    11641  else
    11642  {
    11643  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11644  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11645  }
    11646  }
    11647  }
    11648  }
    11649 
    11650  // 3. Try to allocate from existing blocks with making other allocations lost.
    11651  if(canMakeOtherLost)
    11652  {
    11653  uint32_t tryIndex = 0;
    11654  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11655  {
    11656  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11657  VmaAllocationRequest bestRequest = {};
    11658  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11659 
    11660  // 1. Search existing allocations.
    11662  {
    11663  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11664  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11665  {
    11666  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11667  VMA_ASSERT(pCurrBlock);
    11668  VmaAllocationRequest currRequest = {};
    11669  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11670  currentFrameIndex,
    11671  m_FrameInUseCount,
    11672  m_BufferImageGranularity,
    11673  size,
    11674  alignment,
    11675  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11676  suballocType,
    11677  canMakeOtherLost,
    11678  strategy,
    11679  &currRequest))
    11680  {
    11681  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11682  if(pBestRequestBlock == VMA_NULL ||
    11683  currRequestCost < bestRequestCost)
    11684  {
    11685  pBestRequestBlock = pCurrBlock;
    11686  bestRequest = currRequest;
    11687  bestRequestCost = currRequestCost;
    11688 
    11689  if(bestRequestCost == 0)
    11690  {
    11691  break;
    11692  }
    11693  }
    11694  }
    11695  }
    11696  }
    11697  else // WORST_FIT, FIRST_FIT
    11698  {
    11699  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11700  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11701  {
    11702  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11703  VMA_ASSERT(pCurrBlock);
    11704  VmaAllocationRequest currRequest = {};
    11705  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11706  currentFrameIndex,
    11707  m_FrameInUseCount,
    11708  m_BufferImageGranularity,
    11709  size,
    11710  alignment,
    11711  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11712  suballocType,
    11713  canMakeOtherLost,
    11714  strategy,
    11715  &currRequest))
    11716  {
    11717  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11718  if(pBestRequestBlock == VMA_NULL ||
    11719  currRequestCost < bestRequestCost ||
    11721  {
    11722  pBestRequestBlock = pCurrBlock;
    11723  bestRequest = currRequest;
    11724  bestRequestCost = currRequestCost;
    11725 
    11726  if(bestRequestCost == 0 ||
    11728  {
    11729  break;
    11730  }
    11731  }
    11732  }
    11733  }
    11734  }
    11735 
    11736  if(pBestRequestBlock != VMA_NULL)
    11737  {
    11738  if(mapped)
    11739  {
    11740  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11741  if(res != VK_SUCCESS)
    11742  {
    11743  return res;
    11744  }
    11745  }
    11746 
    11747  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11748  currentFrameIndex,
    11749  m_FrameInUseCount,
    11750  &bestRequest))
    11751  {
    11752  // We no longer have an empty Allocation.
    11753  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11754  {
    11755  m_HasEmptyBlock = false;
    11756  }
    11757  // Allocate from this pBlock.
    11758  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11759  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11760  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11761  (*pAllocation)->InitBlockAllocation(
    11762  pBestRequestBlock,
    11763  bestRequest.offset,
    11764  alignment,
    11765  size,
    11766  suballocType,
    11767  mapped,
    11768  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11769  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11770  VMA_DEBUG_LOG(" Returned from existing block");
    11771  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11772  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11773  {
    11774  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11775  }
    11776  if(IsCorruptionDetectionEnabled())
    11777  {
    11778  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11779  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11780  }
    11781  return VK_SUCCESS;
    11782  }
    11783  // else: Some allocations must have been touched while we are here. Next try.
    11784  }
    11785  else
    11786  {
    11787  // Could not find place in any of the blocks - break outer loop.
    11788  break;
    11789  }
    11790  }
    11791  /* Maximum number of tries exceeded - a very unlike event when many other
    11792  threads are simultaneously touching allocations making it impossible to make
    11793  lost at the same time as we try to allocate. */
    11794  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11795  {
    11796  return VK_ERROR_TOO_MANY_OBJECTS;
    11797  }
    11798  }
    11799 
    11800  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11801 }
    11802 
    11803 void VmaBlockVector::Free(
    11804  VmaAllocation hAllocation)
    11805 {
    11806  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11807 
    11808  // Scope for lock.
    11809  {
    11810  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11811 
    11812  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11813 
    11814  if(IsCorruptionDetectionEnabled())
    11815  {
    11816  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11817  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11818  }
    11819 
    11820  if(hAllocation->IsPersistentMap())
    11821  {
    11822  pBlock->Unmap(m_hAllocator, 1);
    11823  }
    11824 
    11825  pBlock->m_pMetadata->Free(hAllocation);
    11826  VMA_HEAVY_ASSERT(pBlock->Validate());
    11827 
    11828  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11829 
    11830  // pBlock became empty after this deallocation.
    11831  if(pBlock->m_pMetadata->IsEmpty())
    11832  {
    11833  // Already has empty Allocation. We don't want to have two, so delete this one.
    11834  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11835  {
    11836  pBlockToDelete = pBlock;
    11837  Remove(pBlock);
    11838  }
    11839  // We now have first empty block.
    11840  else
    11841  {
    11842  m_HasEmptyBlock = true;
    11843  }
    11844  }
    11845  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11846  // (This is optional, heuristics.)
    11847  else if(m_HasEmptyBlock)
    11848  {
    11849  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11850  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11851  {
    11852  pBlockToDelete = pLastBlock;
    11853  m_Blocks.pop_back();
    11854  m_HasEmptyBlock = false;
    11855  }
    11856  }
    11857 
    11858  IncrementallySortBlocks();
    11859  }
    11860 
    11861  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11862  // lock, for performance reason.
    11863  if(pBlockToDelete != VMA_NULL)
    11864  {
    11865  VMA_DEBUG_LOG(" Deleted empty allocation");
    11866  pBlockToDelete->Destroy(m_hAllocator);
    11867  vma_delete(m_hAllocator, pBlockToDelete);
    11868  }
    11869 }
    11870 
    11871 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11872 {
    11873  VkDeviceSize result = 0;
    11874  for(size_t i = m_Blocks.size(); i--; )
    11875  {
    11876  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11877  if(result >= m_PreferredBlockSize)
    11878  {
    11879  break;
    11880  }
    11881  }
    11882  return result;
    11883 }
    11884 
    11885 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11886 {
    11887  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11888  {
    11889  if(m_Blocks[blockIndex] == pBlock)
    11890  {
    11891  VmaVectorRemove(m_Blocks, blockIndex);
    11892  return;
    11893  }
    11894  }
    11895  VMA_ASSERT(0);
    11896 }
    11897 
    11898 void VmaBlockVector::IncrementallySortBlocks()
    11899 {
    11900  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11901  {
    11902  // Bubble sort only until first swap.
    11903  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11904  {
    11905  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11906  {
    11907  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11908  return;
    11909  }
    11910  }
    11911  }
    11912 }
    11913 
    11914 VkResult VmaBlockVector::AllocateFromBlock(
    11915  VmaDeviceMemoryBlock* pBlock,
    11916  uint32_t currentFrameIndex,
    11917  VkDeviceSize size,
    11918  VkDeviceSize alignment,
    11919  VmaAllocationCreateFlags allocFlags,
    11920  void* pUserData,
    11921  VmaSuballocationType suballocType,
    11922  uint32_t strategy,
    11923  VmaAllocation* pAllocation)
    11924 {
    11925  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    11926  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11927  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11928  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11929 
    11930  VmaAllocationRequest currRequest = {};
    11931  if(pBlock->m_pMetadata->CreateAllocationRequest(
    11932  currentFrameIndex,
    11933  m_FrameInUseCount,
    11934  m_BufferImageGranularity,
    11935  size,
    11936  alignment,
    11937  isUpperAddress,
    11938  suballocType,
    11939  false, // canMakeOtherLost
    11940  strategy,
    11941  &currRequest))
    11942  {
    11943  // Allocate from pCurrBlock.
    11944  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    11945 
    11946  if(mapped)
    11947  {
    11948  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    11949  if(res != VK_SUCCESS)
    11950  {
    11951  return res;
    11952  }
    11953  }
    11954 
    11955  // We no longer have an empty Allocation.
    11956  if(pBlock->m_pMetadata->IsEmpty())
    11957  {
    11958  m_HasEmptyBlock = false;
    11959  }
    11960 
    11961  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11962  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11963  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    11964  (*pAllocation)->InitBlockAllocation(
    11965  pBlock,
    11966  currRequest.offset,
    11967  alignment,
    11968  size,
    11969  suballocType,
    11970  mapped,
    11971  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11972  VMA_HEAVY_ASSERT(pBlock->Validate());
    11973  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    11974  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11975  {
    11976  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11977  }
    11978  if(IsCorruptionDetectionEnabled())
    11979  {
    11980  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    11981  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11982  }
    11983  return VK_SUCCESS;
    11984  }
    11985  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11986 }
    11987 
    11988 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    11989 {
    11990  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    11991  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    11992  allocInfo.allocationSize = blockSize;
    11993  VkDeviceMemory mem = VK_NULL_HANDLE;
    11994  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    11995  if(res < 0)
    11996  {
    11997  return res;
    11998  }
    11999 
    12000  // New VkDeviceMemory successfully created.
    12001 
    12002  // Create new Allocation for it.
    12003  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12004  pBlock->Init(
    12005  m_hAllocator,
    12006  m_hParentPool,
    12007  m_MemoryTypeIndex,
    12008  mem,
    12009  allocInfo.allocationSize,
    12010  m_NextBlockId++,
    12011  m_Algorithm);
    12012 
    12013  m_Blocks.push_back(pBlock);
    12014  if(pNewBlockIndex != VMA_NULL)
    12015  {
    12016  *pNewBlockIndex = m_Blocks.size() - 1;
    12017  }
    12018 
    12019  return VK_SUCCESS;
    12020 }
    12021 
    12022 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12023  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12024  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12025 {
    12026  const size_t blockCount = m_Blocks.size();
    12027  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12028 
    12029  enum BLOCK_FLAG
    12030  {
    12031  BLOCK_FLAG_USED = 0x00000001,
    12032  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12033  };
    12034 
    12035  struct BlockInfo
    12036  {
    12037  uint32_t flags;
    12038  void* pMappedData;
    12039  };
    12040  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12041  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12042  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12043 
    12044  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12045  const size_t moveCount = moves.size();
    12046  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12047  {
    12048  const VmaDefragmentationMove& move = moves[moveIndex];
    12049  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12050  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12051  }
    12052 
    12053  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12054 
    12055  // Go over all blocks. Get mapped pointer or map if necessary.
    12056  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12057  {
    12058  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12059  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12060  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12061  {
    12062  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12063  // It is not originally mapped - map it.
    12064  if(currBlockInfo.pMappedData == VMA_NULL)
    12065  {
    12066  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12067  if(pDefragCtx->res == VK_SUCCESS)
    12068  {
    12069  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12070  }
    12071  }
    12072  }
    12073  }
    12074 
    12075  // Go over all moves. Do actual data transfer.
    12076  if(pDefragCtx->res == VK_SUCCESS)
    12077  {
    12078  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12079  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12080 
    12081  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12082  {
    12083  const VmaDefragmentationMove& move = moves[moveIndex];
    12084 
    12085  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12086  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12087 
    12088  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12089 
    12090  // Invalidate source.
    12091  if(isNonCoherent)
    12092  {
    12093  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12094  memRange.memory = pSrcBlock->GetDeviceMemory();
    12095  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12096  memRange.size = VMA_MIN(
    12097  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12098  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12099  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12100  }
    12101 
    12102  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12103  memmove(
    12104  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12105  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12106  static_cast<size_t>(move.size));
    12107 
    12108  if(IsCorruptionDetectionEnabled())
    12109  {
    12110  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12111  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12112  }
    12113 
    12114  // Flush destination.
    12115  if(isNonCoherent)
    12116  {
    12117  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12118  memRange.memory = pDstBlock->GetDeviceMemory();
    12119  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12120  memRange.size = VMA_MIN(
    12121  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12122  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12123  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12124  }
    12125  }
    12126  }
    12127 
    12128  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12129  // Regardless of pCtx->res == VK_SUCCESS.
    12130  for(size_t blockIndex = blockCount; blockIndex--; )
    12131  {
    12132  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12133  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12134  {
    12135  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12136  pBlock->Unmap(m_hAllocator, 1);
    12137  }
    12138  }
    12139 }
    12140 
    12141 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12142  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12143  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12144  VkCommandBuffer commandBuffer)
    12145 {
    12146  const size_t blockCount = m_Blocks.size();
    12147 
    12148  pDefragCtx->blockContexts.resize(blockCount);
    12149  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12150 
    12151  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12152  const size_t moveCount = moves.size();
    12153  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12154  {
    12155  const VmaDefragmentationMove& move = moves[moveIndex];
    12156  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12157  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12158  }
    12159 
    12160  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12161 
    12162  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12163  {
    12164  VkBufferCreateInfo bufCreateInfo;
    12165  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12166 
    12167  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12168  {
    12169  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12170  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12171  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12172  {
    12173  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12174  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12175  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12176  if(pDefragCtx->res == VK_SUCCESS)
    12177  {
    12178  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12179  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12180  }
    12181  }
    12182  }
    12183  }
    12184 
    12185  // Go over all moves. Post data transfer commands to command buffer.
    12186  if(pDefragCtx->res == VK_SUCCESS)
    12187  {
    12188  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12189  {
    12190  const VmaDefragmentationMove& move = moves[moveIndex];
    12191 
    12192  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12193  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12194 
    12195  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12196 
    12197  VkBufferCopy region = {
    12198  move.srcOffset,
    12199  move.dstOffset,
    12200  move.size };
    12201  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12202  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12203  }
    12204  }
    12205 
    12206  // Save buffers to defrag context for later destruction.
    12207  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12208  {
    12209  pDefragCtx->res = VK_NOT_READY;
    12210  }
    12211 }
    12212 
    12213 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12214 {
    12215  m_HasEmptyBlock = false;
    12216  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12217  {
    12218  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12219  if(pBlock->m_pMetadata->IsEmpty())
    12220  {
    12221  if(m_Blocks.size() > m_MinBlockCount)
    12222  {
    12223  if(pDefragmentationStats != VMA_NULL)
    12224  {
    12225  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12226  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12227  }
    12228 
    12229  VmaVectorRemove(m_Blocks, blockIndex);
    12230  pBlock->Destroy(m_hAllocator);
    12231  vma_delete(m_hAllocator, pBlock);
    12232  }
    12233  else
    12234  {
    12235  m_HasEmptyBlock = true;
    12236  }
    12237  }
    12238  }
    12239 }
    12240 
    12241 #if VMA_STATS_STRING_ENABLED
    12242 
    12243 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12244 {
    12245  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12246 
    12247  json.BeginObject();
    12248 
    12249  if(m_IsCustomPool)
    12250  {
    12251  json.WriteString("MemoryTypeIndex");
    12252  json.WriteNumber(m_MemoryTypeIndex);
    12253 
    12254  json.WriteString("BlockSize");
    12255  json.WriteNumber(m_PreferredBlockSize);
    12256 
    12257  json.WriteString("BlockCount");
    12258  json.BeginObject(true);
    12259  if(m_MinBlockCount > 0)
    12260  {
    12261  json.WriteString("Min");
    12262  json.WriteNumber((uint64_t)m_MinBlockCount);
    12263  }
    12264  if(m_MaxBlockCount < SIZE_MAX)
    12265  {
    12266  json.WriteString("Max");
    12267  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12268  }
    12269  json.WriteString("Cur");
    12270  json.WriteNumber((uint64_t)m_Blocks.size());
    12271  json.EndObject();
    12272 
    12273  if(m_FrameInUseCount > 0)
    12274  {
    12275  json.WriteString("FrameInUseCount");
    12276  json.WriteNumber(m_FrameInUseCount);
    12277  }
    12278 
    12279  if(m_Algorithm != 0)
    12280  {
    12281  json.WriteString("Algorithm");
    12282  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12283  }
    12284  }
    12285  else
    12286  {
    12287  json.WriteString("PreferredBlockSize");
    12288  json.WriteNumber(m_PreferredBlockSize);
    12289  }
    12290 
    12291  json.WriteString("Blocks");
    12292  json.BeginObject();
    12293  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12294  {
    12295  json.BeginString();
    12296  json.ContinueString(m_Blocks[i]->GetId());
    12297  json.EndString();
    12298 
    12299  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12300  }
    12301  json.EndObject();
    12302 
    12303  json.EndObject();
    12304 }
    12305 
    12306 #endif // #if VMA_STATS_STRING_ENABLED
    12307 
    12308 void VmaBlockVector::Defragment(
    12309  class VmaBlockVectorDefragmentationContext* pCtx,
    12310  VmaDefragmentationStats* pStats,
    12311  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12312  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12313  VkCommandBuffer commandBuffer)
    12314 {
    12315  pCtx->res = VK_SUCCESS;
    12316 
    12317  const VkMemoryPropertyFlags memPropFlags =
    12318  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12319  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12320 
    12321  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12322  isHostVisible;
    12323  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12324  !IsCorruptionDetectionEnabled() &&
    12325  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12326 
    12327  // There are options to defragment this memory type.
    12328  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12329  {
    12330  bool defragmentOnGpu;
    12331  // There is only one option to defragment this memory type.
    12332  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12333  {
    12334  defragmentOnGpu = canDefragmentOnGpu;
    12335  }
    12336  // Both options are available: Heuristics to choose the best one.
    12337  else
    12338  {
    12339  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12340  m_hAllocator->IsIntegratedGpu();
    12341  }
    12342 
    12343  bool overlappingMoveSupported = !defragmentOnGpu;
    12344 
    12345  if(m_hAllocator->m_UseMutex)
    12346  {
    12347  m_Mutex.LockWrite();
    12348  pCtx->mutexLocked = true;
    12349  }
    12350 
    12351  pCtx->Begin(overlappingMoveSupported);
    12352 
    12353  // Defragment.
    12354 
    12355  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12356  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12357  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12358  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12359  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12360 
    12361  // Accumulate statistics.
    12362  if(pStats != VMA_NULL)
    12363  {
    12364  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12365  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12366  pStats->bytesMoved += bytesMoved;
    12367  pStats->allocationsMoved += allocationsMoved;
    12368  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12369  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12370  if(defragmentOnGpu)
    12371  {
    12372  maxGpuBytesToMove -= bytesMoved;
    12373  maxGpuAllocationsToMove -= allocationsMoved;
    12374  }
    12375  else
    12376  {
    12377  maxCpuBytesToMove -= bytesMoved;
    12378  maxCpuAllocationsToMove -= allocationsMoved;
    12379  }
    12380  }
    12381 
    12382  if(pCtx->res >= VK_SUCCESS)
    12383  {
    12384  if(defragmentOnGpu)
    12385  {
    12386  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12387  }
    12388  else
    12389  {
    12390  ApplyDefragmentationMovesCpu(pCtx, moves);
    12391  }
    12392  }
    12393  }
    12394 }
    12395 
    12396 void VmaBlockVector::DefragmentationEnd(
    12397  class VmaBlockVectorDefragmentationContext* pCtx,
    12398  VmaDefragmentationStats* pStats)
    12399 {
    12400  // Destroy buffers.
    12401  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12402  {
    12403  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12404  if(blockCtx.hBuffer)
    12405  {
    12406  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12407  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12408  }
    12409  }
    12410 
    12411  if(pCtx->res >= VK_SUCCESS)
    12412  {
    12413  FreeEmptyBlocks(pStats);
    12414  }
    12415 
    12416  if(pCtx->mutexLocked)
    12417  {
    12418  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12419  m_Mutex.UnlockWrite();
    12420  }
    12421 }
    12422 
    12423 size_t VmaBlockVector::CalcAllocationCount() const
    12424 {
    12425  size_t result = 0;
    12426  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12427  {
    12428  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12429  }
    12430  return result;
    12431 }
    12432 
    12433 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12434 {
    12435  if(m_BufferImageGranularity == 1)
    12436  {
    12437  return false;
    12438  }
    12439  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12440  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12441  {
    12442  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12443  VMA_ASSERT(m_Algorithm == 0);
    12444  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12445  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12446  {
    12447  return true;
    12448  }
    12449  }
    12450  return false;
    12451 }
    12452 
    12453 void VmaBlockVector::MakePoolAllocationsLost(
    12454  uint32_t currentFrameIndex,
    12455  size_t* pLostAllocationCount)
    12456 {
    12457  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12458  size_t lostAllocationCount = 0;
    12459  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12460  {
    12461  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12462  VMA_ASSERT(pBlock);
    12463  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12464  }
    12465  if(pLostAllocationCount != VMA_NULL)
    12466  {
    12467  *pLostAllocationCount = lostAllocationCount;
    12468  }
    12469 }
    12470 
    12471 VkResult VmaBlockVector::CheckCorruption()
    12472 {
    12473  if(!IsCorruptionDetectionEnabled())
    12474  {
    12475  return VK_ERROR_FEATURE_NOT_PRESENT;
    12476  }
    12477 
    12478  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12479  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12480  {
    12481  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12482  VMA_ASSERT(pBlock);
    12483  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12484  if(res != VK_SUCCESS)
    12485  {
    12486  return res;
    12487  }
    12488  }
    12489  return VK_SUCCESS;
    12490 }
    12491 
    12492 void VmaBlockVector::AddStats(VmaStats* pStats)
    12493 {
    12494  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12495  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12496 
    12497  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12498 
    12499  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12500  {
    12501  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12502  VMA_ASSERT(pBlock);
    12503  VMA_HEAVY_ASSERT(pBlock->Validate());
    12504  VmaStatInfo allocationStatInfo;
    12505  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12506  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12507  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12508  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12509  }
    12510 }
    12511 
    12513 // VmaDefragmentationAlgorithm_Generic members definition
    12514 
    12515 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12516  VmaAllocator hAllocator,
    12517  VmaBlockVector* pBlockVector,
    12518  uint32_t currentFrameIndex,
    12519  bool overlappingMoveSupported) :
    12520  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12521  m_AllocationCount(0),
    12522  m_AllAllocations(false),
    12523  m_BytesMoved(0),
    12524  m_AllocationsMoved(0),
    12525  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12526 {
    12527  // Create block info for each block.
    12528  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12529  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12530  {
    12531  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12532  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12533  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12534  m_Blocks.push_back(pBlockInfo);
    12535  }
    12536 
    12537  // Sort them by m_pBlock pointer value.
    12538  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12539 }
    12540 
    12541 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12542 {
    12543  for(size_t i = m_Blocks.size(); i--; )
    12544  {
    12545  vma_delete(m_hAllocator, m_Blocks[i]);
    12546  }
    12547 }
    12548 
    12549 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12550 {
    12551  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12552  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12553  {
    12554  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12555  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12556  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12557  {
    12558  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12559  (*it)->m_Allocations.push_back(allocInfo);
    12560  }
    12561  else
    12562  {
    12563  VMA_ASSERT(0);
    12564  }
    12565 
    12566  ++m_AllocationCount;
    12567  }
    12568 }
    12569 
    12570 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12571  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12572  VkDeviceSize maxBytesToMove,
    12573  uint32_t maxAllocationsToMove)
    12574 {
    12575  if(m_Blocks.empty())
    12576  {
    12577  return VK_SUCCESS;
    12578  }
    12579 
    12580  // This is a choice based on research.
    12581  // Option 1:
    12582  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12583  // Option 2:
    12584  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12585  // Option 3:
    12586  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12587 
    12588  size_t srcBlockMinIndex = 0;
    12589  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12590  /*
    12591  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12592  {
    12593  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12594  if(blocksWithNonMovableCount > 0)
    12595  {
    12596  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12597  }
    12598  }
    12599  */
    12600 
    12601  size_t srcBlockIndex = m_Blocks.size() - 1;
    12602  size_t srcAllocIndex = SIZE_MAX;
    12603  for(;;)
    12604  {
    12605  // 1. Find next allocation to move.
    12606  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12607  // 1.2. Then start from last to first m_Allocations.
    12608  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12609  {
    12610  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12611  {
    12612  // Finished: no more allocations to process.
    12613  if(srcBlockIndex == srcBlockMinIndex)
    12614  {
    12615  return VK_SUCCESS;
    12616  }
    12617  else
    12618  {
    12619  --srcBlockIndex;
    12620  srcAllocIndex = SIZE_MAX;
    12621  }
    12622  }
    12623  else
    12624  {
    12625  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12626  }
    12627  }
    12628 
    12629  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12630  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12631 
    12632  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12633  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12634  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12635  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12636 
    12637  // 2. Try to find new place for this allocation in preceding or current block.
    12638  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12639  {
    12640  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12641  VmaAllocationRequest dstAllocRequest;
    12642  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12643  m_CurrentFrameIndex,
    12644  m_pBlockVector->GetFrameInUseCount(),
    12645  m_pBlockVector->GetBufferImageGranularity(),
    12646  size,
    12647  alignment,
    12648  false, // upperAddress
    12649  suballocType,
    12650  false, // canMakeOtherLost
    12651  strategy,
    12652  &dstAllocRequest) &&
    12653  MoveMakesSense(
    12654  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12655  {
    12656  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12657 
    12658  // Reached limit on number of allocations or bytes to move.
    12659  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12660  (m_BytesMoved + size > maxBytesToMove))
    12661  {
    12662  return VK_SUCCESS;
    12663  }
    12664 
    12665  VmaDefragmentationMove move;
    12666  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12667  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12668  move.srcOffset = srcOffset;
    12669  move.dstOffset = dstAllocRequest.offset;
    12670  move.size = size;
    12671  moves.push_back(move);
    12672 
    12673  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12674  dstAllocRequest,
    12675  suballocType,
    12676  size,
    12677  allocInfo.m_hAllocation);
    12678  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12679 
    12680  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12681 
    12682  if(allocInfo.m_pChanged != VMA_NULL)
    12683  {
    12684  *allocInfo.m_pChanged = VK_TRUE;
    12685  }
    12686 
    12687  ++m_AllocationsMoved;
    12688  m_BytesMoved += size;
    12689 
    12690  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12691 
    12692  break;
    12693  }
    12694  }
    12695 
    12696  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12697 
    12698  if(srcAllocIndex > 0)
    12699  {
    12700  --srcAllocIndex;
    12701  }
    12702  else
    12703  {
    12704  if(srcBlockIndex > 0)
    12705  {
    12706  --srcBlockIndex;
    12707  srcAllocIndex = SIZE_MAX;
    12708  }
    12709  else
    12710  {
    12711  return VK_SUCCESS;
    12712  }
    12713  }
    12714  }
    12715 }
    12716 
    12717 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12718 {
    12719  size_t result = 0;
    12720  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12721  {
    12722  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12723  {
    12724  ++result;
    12725  }
    12726  }
    12727  return result;
    12728 }
    12729 
    12730 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12731  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12732  VkDeviceSize maxBytesToMove,
    12733  uint32_t maxAllocationsToMove)
    12734 {
    12735  if(!m_AllAllocations && m_AllocationCount == 0)
    12736  {
    12737  return VK_SUCCESS;
    12738  }
    12739 
    12740  const size_t blockCount = m_Blocks.size();
    12741  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12742  {
    12743  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12744 
    12745  if(m_AllAllocations)
    12746  {
    12747  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12748  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12749  it != pMetadata->m_Suballocations.end();
    12750  ++it)
    12751  {
    12752  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12753  {
    12754  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12755  pBlockInfo->m_Allocations.push_back(allocInfo);
    12756  }
    12757  }
    12758  }
    12759 
    12760  pBlockInfo->CalcHasNonMovableAllocations();
    12761 
    12762  // This is a choice based on research.
    12763  // Option 1:
    12764  pBlockInfo->SortAllocationsByOffsetDescending();
    12765  // Option 2:
    12766  //pBlockInfo->SortAllocationsBySizeDescending();
    12767  }
    12768 
    12769  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12770  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12771 
    12772  // This is a choice based on research.
    12773  const uint32_t roundCount = 2;
    12774 
    12775  // Execute defragmentation rounds (the main part).
    12776  VkResult result = VK_SUCCESS;
    12777  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12778  {
    12779  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12780  }
    12781 
    12782  return result;
    12783 }
    12784 
    12785 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12786  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12787  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12788 {
    12789  if(dstBlockIndex < srcBlockIndex)
    12790  {
    12791  return true;
    12792  }
    12793  if(dstBlockIndex > srcBlockIndex)
    12794  {
    12795  return false;
    12796  }
    12797  if(dstOffset < srcOffset)
    12798  {
    12799  return true;
    12800  }
    12801  return false;
    12802 }
    12803 
    12805 // VmaDefragmentationAlgorithm_Fast
    12806 
    12807 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12808  VmaAllocator hAllocator,
    12809  VmaBlockVector* pBlockVector,
    12810  uint32_t currentFrameIndex,
    12811  bool overlappingMoveSupported) :
    12812  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12813  m_OverlappingMoveSupported(overlappingMoveSupported),
    12814  m_AllocationCount(0),
    12815  m_AllAllocations(false),
    12816  m_BytesMoved(0),
    12817  m_AllocationsMoved(0),
    12818  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12819 {
    12820  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12821 
    12822 }
    12823 
    12824 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12825 {
    12826 }
    12827 
    12828 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12829  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12830  VkDeviceSize maxBytesToMove,
    12831  uint32_t maxAllocationsToMove)
    12832 {
    12833  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12834 
    12835  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12836  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12837  {
    12838  return VK_SUCCESS;
    12839  }
    12840 
    12841  PreprocessMetadata();
    12842 
    12843  // Sort blocks in order from most destination.
    12844 
    12845  m_BlockInfos.resize(blockCount);
    12846  for(size_t i = 0; i < blockCount; ++i)
    12847  {
    12848  m_BlockInfos[i].origBlockIndex = i;
    12849  }
    12850 
    12851  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12852  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12853  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12854  });
    12855 
    12856  // THE MAIN ALGORITHM
    12857 
    12858  FreeSpaceDatabase freeSpaceDb;
    12859 
    12860  size_t dstBlockInfoIndex = 0;
    12861  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12862  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12863  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12864  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12865  VkDeviceSize dstOffset = 0;
    12866 
    12867  bool end = false;
    12868  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12869  {
    12870  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12871  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12872  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12873  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12874  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12875  {
    12876  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12877  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12878  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12879  if(m_AllocationsMoved == maxAllocationsToMove ||
    12880  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12881  {
    12882  end = true;
    12883  break;
    12884  }
    12885  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12886 
    12887  // Try to place it in one of free spaces from the database.
    12888  size_t freeSpaceInfoIndex;
    12889  VkDeviceSize dstAllocOffset;
    12890  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12891  freeSpaceInfoIndex, dstAllocOffset))
    12892  {
    12893  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12894  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12895  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12896 
    12897  // Same block
    12898  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12899  {
    12900  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12901 
    12902  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12903 
    12904  VmaSuballocation suballoc = *srcSuballocIt;
    12905  suballoc.offset = dstAllocOffset;
    12906  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12907  m_BytesMoved += srcAllocSize;
    12908  ++m_AllocationsMoved;
    12909 
    12910  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12911  ++nextSuballocIt;
    12912  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12913  srcSuballocIt = nextSuballocIt;
    12914 
    12915  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12916 
    12917  VmaDefragmentationMove move = {
    12918  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12919  srcAllocOffset, dstAllocOffset,
    12920  srcAllocSize };
    12921  moves.push_back(move);
    12922  }
    12923  // Different block
    12924  else
    12925  {
    12926  // MOVE OPTION 2: Move the allocation to a different block.
    12927 
    12928  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    12929 
    12930  VmaSuballocation suballoc = *srcSuballocIt;
    12931  suballoc.offset = dstAllocOffset;
    12932  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    12933  m_BytesMoved += srcAllocSize;
    12934  ++m_AllocationsMoved;
    12935 
    12936  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    12937  ++nextSuballocIt;
    12938  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    12939  srcSuballocIt = nextSuballocIt;
    12940 
    12941  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    12942 
    12943  VmaDefragmentationMove move = {
    12944  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    12945  srcAllocOffset, dstAllocOffset,
    12946  srcAllocSize };
    12947  moves.push_back(move);
    12948  }
    12949  }
    12950  else
    12951  {
    12952  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    12953 
    12954  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    12955  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    12956  dstAllocOffset + srcAllocSize > dstBlockSize)
    12957  {
    12958  // But before that, register remaining free space at the end of dst block.
    12959  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    12960 
    12961  ++dstBlockInfoIndex;
    12962  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12963  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12964  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12965  dstBlockSize = pDstMetadata->GetSize();
    12966  dstOffset = 0;
    12967  dstAllocOffset = 0;
    12968  }
    12969 
    12970  // Same block
    12971  if(dstBlockInfoIndex == srcBlockInfoIndex)
    12972  {
    12973  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12974 
    12975  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    12976 
    12977  bool skipOver = overlap;
    12978  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    12979  {
    12980  // If destination and source place overlap, skip if it would move it
    12981  // by only < 1/64 of its size.
    12982  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    12983  }
    12984 
    12985  if(skipOver)
    12986  {
    12987  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    12988 
    12989  dstOffset = srcAllocOffset + srcAllocSize;
    12990  ++srcSuballocIt;
    12991  }
    12992  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12993  else
    12994  {
    12995  srcSuballocIt->offset = dstAllocOffset;
    12996  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    12997  dstOffset = dstAllocOffset + srcAllocSize;
    12998  m_BytesMoved += srcAllocSize;
    12999  ++m_AllocationsMoved;
    13000  ++srcSuballocIt;
    13001  VmaDefragmentationMove move = {
    13002  srcOrigBlockIndex, dstOrigBlockIndex,
    13003  srcAllocOffset, dstAllocOffset,
    13004  srcAllocSize };
    13005  moves.push_back(move);
    13006  }
    13007  }
    13008  // Different block
    13009  else
    13010  {
    13011  // MOVE OPTION 2: Move the allocation to a different block.
    13012 
    13013  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13014  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13015 
    13016  VmaSuballocation suballoc = *srcSuballocIt;
    13017  suballoc.offset = dstAllocOffset;
    13018  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13019  dstOffset = dstAllocOffset + srcAllocSize;
    13020  m_BytesMoved += srcAllocSize;
    13021  ++m_AllocationsMoved;
    13022 
    13023  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13024  ++nextSuballocIt;
    13025  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13026  srcSuballocIt = nextSuballocIt;
    13027 
    13028  pDstMetadata->m_Suballocations.push_back(suballoc);
    13029 
    13030  VmaDefragmentationMove move = {
    13031  srcOrigBlockIndex, dstOrigBlockIndex,
    13032  srcAllocOffset, dstAllocOffset,
    13033  srcAllocSize };
    13034  moves.push_back(move);
    13035  }
    13036  }
    13037  }
    13038  }
    13039 
    13040  m_BlockInfos.clear();
    13041 
    13042  PostprocessMetadata();
    13043 
    13044  return VK_SUCCESS;
    13045 }
    13046 
    13047 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13048 {
    13049  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13050  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13051  {
    13052  VmaBlockMetadata_Generic* const pMetadata =
    13053  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13054  pMetadata->m_FreeCount = 0;
    13055  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13056  pMetadata->m_FreeSuballocationsBySize.clear();
    13057  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13058  it != pMetadata->m_Suballocations.end(); )
    13059  {
    13060  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13061  {
    13062  VmaSuballocationList::iterator nextIt = it;
    13063  ++nextIt;
    13064  pMetadata->m_Suballocations.erase(it);
    13065  it = nextIt;
    13066  }
    13067  else
    13068  {
    13069  ++it;
    13070  }
    13071  }
    13072  }
    13073 }
    13074 
    13075 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13076 {
    13077  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13078  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13079  {
    13080  VmaBlockMetadata_Generic* const pMetadata =
    13081  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13082  const VkDeviceSize blockSize = pMetadata->GetSize();
    13083 
    13084  // No allocations in this block - entire area is free.
    13085  if(pMetadata->m_Suballocations.empty())
    13086  {
    13087  pMetadata->m_FreeCount = 1;
    13088  //pMetadata->m_SumFreeSize is already set to blockSize.
    13089  VmaSuballocation suballoc = {
    13090  0, // offset
    13091  blockSize, // size
    13092  VMA_NULL, // hAllocation
    13093  VMA_SUBALLOCATION_TYPE_FREE };
    13094  pMetadata->m_Suballocations.push_back(suballoc);
    13095  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13096  }
    13097  // There are some allocations in this block.
    13098  else
    13099  {
    13100  VkDeviceSize offset = 0;
    13101  VmaSuballocationList::iterator it;
    13102  for(it = pMetadata->m_Suballocations.begin();
    13103  it != pMetadata->m_Suballocations.end();
    13104  ++it)
    13105  {
    13106  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13107  VMA_ASSERT(it->offset >= offset);
    13108 
    13109  // Need to insert preceding free space.
    13110  if(it->offset > offset)
    13111  {
    13112  ++pMetadata->m_FreeCount;
    13113  const VkDeviceSize freeSize = it->offset - offset;
    13114  VmaSuballocation suballoc = {
    13115  offset, // offset
    13116  freeSize, // size
    13117  VMA_NULL, // hAllocation
    13118  VMA_SUBALLOCATION_TYPE_FREE };
    13119  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13120  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13121  {
    13122  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13123  }
    13124  }
    13125 
    13126  pMetadata->m_SumFreeSize -= it->size;
    13127  offset = it->offset + it->size;
    13128  }
    13129 
    13130  // Need to insert trailing free space.
    13131  if(offset < blockSize)
    13132  {
    13133  ++pMetadata->m_FreeCount;
    13134  const VkDeviceSize freeSize = blockSize - offset;
    13135  VmaSuballocation suballoc = {
    13136  offset, // offset
    13137  freeSize, // size
    13138  VMA_NULL, // hAllocation
    13139  VMA_SUBALLOCATION_TYPE_FREE };
    13140  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13141  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13142  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13143  {
    13144  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13145  }
    13146  }
    13147 
    13148  VMA_SORT(
    13149  pMetadata->m_FreeSuballocationsBySize.begin(),
    13150  pMetadata->m_FreeSuballocationsBySize.end(),
    13151  VmaSuballocationItemSizeLess());
    13152  }
    13153 
    13154  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13155  }
    13156 }
    13157 
    13158 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13159 {
    13160  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13161  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13162  while(it != pMetadata->m_Suballocations.end())
    13163  {
    13164  if(it->offset < suballoc.offset)
    13165  {
    13166  ++it;
    13167  }
    13168  }
    13169  pMetadata->m_Suballocations.insert(it, suballoc);
    13170 }
    13171 
    13173 // VmaBlockVectorDefragmentationContext
    13174 
    13175 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13176  VmaAllocator hAllocator,
    13177  VmaPool hCustomPool,
    13178  VmaBlockVector* pBlockVector,
    13179  uint32_t currFrameIndex) :
    13180  res(VK_SUCCESS),
    13181  mutexLocked(false),
    13182  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13183  m_hAllocator(hAllocator),
    13184  m_hCustomPool(hCustomPool),
    13185  m_pBlockVector(pBlockVector),
    13186  m_CurrFrameIndex(currFrameIndex),
    13187  m_pAlgorithm(VMA_NULL),
    13188  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13189  m_AllAllocations(false)
    13190 {
    13191 }
    13192 
    13193 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13194 {
    13195  vma_delete(m_hAllocator, m_pAlgorithm);
    13196 }
    13197 
    13198 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13199 {
    13200  AllocInfo info = { hAlloc, pChanged };
    13201  m_Allocations.push_back(info);
    13202 }
    13203 
    13204 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13205 {
    13206  const bool allAllocations = m_AllAllocations ||
    13207  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13208 
    13209  /********************************
    13210  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13211  ********************************/
    13212 
    13213  /*
    13214  Fast algorithm is supported only when certain criteria are met:
    13215  - VMA_DEBUG_MARGIN is 0.
    13216  - All allocations in this block vector are moveable.
    13217  - There is no possibility of image/buffer granularity conflict.
    13218  */
    13219  if(VMA_DEBUG_MARGIN == 0 &&
    13220  allAllocations &&
    13221  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13222  {
    13223  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13224  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13225  }
    13226  else
    13227  {
    13228  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13229  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13230  }
    13231 
    13232  if(allAllocations)
    13233  {
    13234  m_pAlgorithm->AddAll();
    13235  }
    13236  else
    13237  {
    13238  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13239  {
    13240  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13241  }
    13242  }
    13243 }
    13244 
    13246 // VmaDefragmentationContext
    13247 
    13248 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13249  VmaAllocator hAllocator,
    13250  uint32_t currFrameIndex,
    13251  uint32_t flags,
    13252  VmaDefragmentationStats* pStats) :
    13253  m_hAllocator(hAllocator),
    13254  m_CurrFrameIndex(currFrameIndex),
    13255  m_Flags(flags),
    13256  m_pStats(pStats),
    13257  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13258 {
    13259  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13260 }
    13261 
    13262 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13263 {
    13264  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13265  {
    13266  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13267  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13268  vma_delete(m_hAllocator, pBlockVectorCtx);
    13269  }
    13270  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13271  {
    13272  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13273  if(pBlockVectorCtx)
    13274  {
    13275  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13276  vma_delete(m_hAllocator, pBlockVectorCtx);
    13277  }
    13278  }
    13279 }
    13280 
    13281 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13282 {
    13283  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13284  {
    13285  VmaPool pool = pPools[poolIndex];
    13286  VMA_ASSERT(pool);
    13287  // Pools with algorithm other than default are not defragmented.
    13288  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13289  {
    13290  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13291 
    13292  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13293  {
    13294  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13295  {
    13296  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13297  break;
    13298  }
    13299  }
    13300 
    13301  if(!pBlockVectorDefragCtx)
    13302  {
    13303  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13304  m_hAllocator,
    13305  pool,
    13306  &pool->m_BlockVector,
    13307  m_CurrFrameIndex);
    13308  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13309  }
    13310 
    13311  pBlockVectorDefragCtx->AddAll();
    13312  }
    13313  }
    13314 }
    13315 
    13316 void VmaDefragmentationContext_T::AddAllocations(
    13317  uint32_t allocationCount,
    13318  VmaAllocation* pAllocations,
    13319  VkBool32* pAllocationsChanged)
    13320 {
    13321  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13322  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13323  {
    13324  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13325  VMA_ASSERT(hAlloc);
    13326  // DedicatedAlloc cannot be defragmented.
    13327  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13328  // Lost allocation cannot be defragmented.
    13329  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13330  {
    13331  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13332 
    13333  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13334  // This allocation belongs to custom pool.
    13335  if(hAllocPool != VK_NULL_HANDLE)
    13336  {
    13337  // Pools with algorithm other than default are not defragmented.
    13338  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13339  {
    13340  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13341  {
    13342  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13343  {
    13344  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13345  break;
    13346  }
    13347  }
    13348  if(!pBlockVectorDefragCtx)
    13349  {
    13350  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13351  m_hAllocator,
    13352  hAllocPool,
    13353  &hAllocPool->m_BlockVector,
    13354  m_CurrFrameIndex);
    13355  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13356  }
    13357  }
    13358  }
    13359  // This allocation belongs to default pool.
    13360  else
    13361  {
    13362  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13363  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13364  if(!pBlockVectorDefragCtx)
    13365  {
    13366  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13367  m_hAllocator,
    13368  VMA_NULL, // hCustomPool
    13369  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13370  m_CurrFrameIndex);
    13371  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13372  }
    13373  }
    13374 
    13375  if(pBlockVectorDefragCtx)
    13376  {
    13377  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13378  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13379  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13380  }
    13381  }
    13382  }
    13383 }
    13384 
    13385 VkResult VmaDefragmentationContext_T::Defragment(
    13386  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13387  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13388  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13389 {
    13390  if(pStats)
    13391  {
    13392  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13393  }
    13394 
    13395  if(commandBuffer == VK_NULL_HANDLE)
    13396  {
    13397  maxGpuBytesToMove = 0;
    13398  maxGpuAllocationsToMove = 0;
    13399  }
    13400 
    13401  VkResult res = VK_SUCCESS;
    13402 
    13403  // Process default pools.
    13404  for(uint32_t memTypeIndex = 0;
    13405  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13406  ++memTypeIndex)
    13407  {
    13408  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13409  if(pBlockVectorCtx)
    13410  {
    13411  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13412  pBlockVectorCtx->GetBlockVector()->Defragment(
    13413  pBlockVectorCtx,
    13414  pStats,
    13415  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13416  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13417  commandBuffer);
    13418  if(pBlockVectorCtx->res != VK_SUCCESS)
    13419  {
    13420  res = pBlockVectorCtx->res;
    13421  }
    13422  }
    13423  }
    13424 
    13425  // Process custom pools.
    13426  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13427  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13428  ++customCtxIndex)
    13429  {
    13430  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13431  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13432  pBlockVectorCtx->GetBlockVector()->Defragment(
    13433  pBlockVectorCtx,
    13434  pStats,
    13435  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13436  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13437  commandBuffer);
    13438  if(pBlockVectorCtx->res != VK_SUCCESS)
    13439  {
    13440  res = pBlockVectorCtx->res;
    13441  }
    13442  }
    13443 
    13444  return res;
    13445 }
    13446 
    13448 // VmaRecorder
    13449 
    13450 #if VMA_RECORDING_ENABLED
    13451 
    13452 VmaRecorder::VmaRecorder() :
    13453  m_UseMutex(true),
    13454  m_Flags(0),
    13455  m_File(VMA_NULL),
    13456  m_Freq(INT64_MAX),
    13457  m_StartCounter(INT64_MAX)
    13458 {
    13459 }
    13460 
    13461 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13462 {
    13463  m_UseMutex = useMutex;
    13464  m_Flags = settings.flags;
    13465 
    13466  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13467  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13468 
    13469  // Open file for writing.
    13470  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13471  if(err != 0)
    13472  {
    13473  return VK_ERROR_INITIALIZATION_FAILED;
    13474  }
    13475 
    13476  // Write header.
    13477  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13478  fprintf(m_File, "%s\n", "1,6");
    13479 
    13480  return VK_SUCCESS;
    13481 }
    13482 
    13483 VmaRecorder::~VmaRecorder()
    13484 {
    13485  if(m_File != VMA_NULL)
    13486  {
    13487  fclose(m_File);
    13488  }
    13489 }
    13490 
    13491 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13492 {
    13493  CallParams callParams;
    13494  GetBasicParams(callParams);
    13495 
    13496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13497  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13498  Flush();
    13499 }
    13500 
    13501 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13502 {
    13503  CallParams callParams;
    13504  GetBasicParams(callParams);
    13505 
    13506  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13507  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13508  Flush();
    13509 }
    13510 
    13511 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13512 {
    13513  CallParams callParams;
    13514  GetBasicParams(callParams);
    13515 
    13516  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13517  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13518  createInfo.memoryTypeIndex,
    13519  createInfo.flags,
    13520  createInfo.blockSize,
    13521  (uint64_t)createInfo.minBlockCount,
    13522  (uint64_t)createInfo.maxBlockCount,
    13523  createInfo.frameInUseCount,
    13524  pool);
    13525  Flush();
    13526 }
    13527 
    13528 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13529 {
    13530  CallParams callParams;
    13531  GetBasicParams(callParams);
    13532 
    13533  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13534  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13535  pool);
    13536  Flush();
    13537 }
    13538 
    13539 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13540  const VkMemoryRequirements& vkMemReq,
    13541  const VmaAllocationCreateInfo& createInfo,
    13542  VmaAllocation allocation)
    13543 {
    13544  CallParams callParams;
    13545  GetBasicParams(callParams);
    13546 
    13547  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13548  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13549  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13550  vkMemReq.size,
    13551  vkMemReq.alignment,
    13552  vkMemReq.memoryTypeBits,
    13553  createInfo.flags,
    13554  createInfo.usage,
    13555  createInfo.requiredFlags,
    13556  createInfo.preferredFlags,
    13557  createInfo.memoryTypeBits,
    13558  createInfo.pool,
    13559  allocation,
    13560  userDataStr.GetString());
    13561  Flush();
    13562 }
    13563 
    13564 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13565  const VkMemoryRequirements& vkMemReq,
    13566  const VmaAllocationCreateInfo& createInfo,
    13567  uint64_t allocationCount,
    13568  const VmaAllocation* pAllocations)
    13569 {
    13570  CallParams callParams;
    13571  GetBasicParams(callParams);
    13572 
    13573  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13574  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13575  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13576  vkMemReq.size,
    13577  vkMemReq.alignment,
    13578  vkMemReq.memoryTypeBits,
    13579  createInfo.flags,
    13580  createInfo.usage,
    13581  createInfo.requiredFlags,
    13582  createInfo.preferredFlags,
    13583  createInfo.memoryTypeBits,
    13584  createInfo.pool);
    13585  PrintPointerList(allocationCount, pAllocations);
    13586  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13587  Flush();
    13588 }
    13589 
    13590 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13591  const VkMemoryRequirements& vkMemReq,
    13592  bool requiresDedicatedAllocation,
    13593  bool prefersDedicatedAllocation,
    13594  const VmaAllocationCreateInfo& createInfo,
    13595  VmaAllocation allocation)
    13596 {
    13597  CallParams callParams;
    13598  GetBasicParams(callParams);
    13599 
    13600  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13601  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13602  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13603  vkMemReq.size,
    13604  vkMemReq.alignment,
    13605  vkMemReq.memoryTypeBits,
    13606  requiresDedicatedAllocation ? 1 : 0,
    13607  prefersDedicatedAllocation ? 1 : 0,
    13608  createInfo.flags,
    13609  createInfo.usage,
    13610  createInfo.requiredFlags,
    13611  createInfo.preferredFlags,
    13612  createInfo.memoryTypeBits,
    13613  createInfo.pool,
    13614  allocation,
    13615  userDataStr.GetString());
    13616  Flush();
    13617 }
    13618 
    13619 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13620  const VkMemoryRequirements& vkMemReq,
    13621  bool requiresDedicatedAllocation,
    13622  bool prefersDedicatedAllocation,
    13623  const VmaAllocationCreateInfo& createInfo,
    13624  VmaAllocation allocation)
    13625 {
    13626  CallParams callParams;
    13627  GetBasicParams(callParams);
    13628 
    13629  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13630  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13631  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13632  vkMemReq.size,
    13633  vkMemReq.alignment,
    13634  vkMemReq.memoryTypeBits,
    13635  requiresDedicatedAllocation ? 1 : 0,
    13636  prefersDedicatedAllocation ? 1 : 0,
    13637  createInfo.flags,
    13638  createInfo.usage,
    13639  createInfo.requiredFlags,
    13640  createInfo.preferredFlags,
    13641  createInfo.memoryTypeBits,
    13642  createInfo.pool,
    13643  allocation,
    13644  userDataStr.GetString());
    13645  Flush();
    13646 }
    13647 
    13648 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13649  VmaAllocation allocation)
    13650 {
    13651  CallParams callParams;
    13652  GetBasicParams(callParams);
    13653 
    13654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13655  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13656  allocation);
    13657  Flush();
    13658 }
    13659 
    13660 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13661  uint64_t allocationCount,
    13662  const VmaAllocation* pAllocations)
    13663 {
    13664  CallParams callParams;
    13665  GetBasicParams(callParams);
    13666 
    13667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13668  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13669  PrintPointerList(allocationCount, pAllocations);
    13670  fprintf(m_File, "\n");
    13671  Flush();
    13672 }
    13673 
    13674 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13675  VmaAllocation allocation,
    13676  const void* pUserData)
    13677 {
    13678  CallParams callParams;
    13679  GetBasicParams(callParams);
    13680 
    13681  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13682  UserDataString userDataStr(
    13683  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13684  pUserData);
    13685  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13686  allocation,
    13687  userDataStr.GetString());
    13688  Flush();
    13689 }
    13690 
    13691 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13692  VmaAllocation allocation)
    13693 {
    13694  CallParams callParams;
    13695  GetBasicParams(callParams);
    13696 
    13697  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13698  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13699  allocation);
    13700  Flush();
    13701 }
    13702 
    13703 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13704  VmaAllocation allocation)
    13705 {
    13706  CallParams callParams;
    13707  GetBasicParams(callParams);
    13708 
    13709  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13710  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13711  allocation);
    13712  Flush();
    13713 }
    13714 
    13715 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13716  VmaAllocation allocation)
    13717 {
    13718  CallParams callParams;
    13719  GetBasicParams(callParams);
    13720 
    13721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13722  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13723  allocation);
    13724  Flush();
    13725 }
    13726 
    13727 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13728  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13729 {
    13730  CallParams callParams;
    13731  GetBasicParams(callParams);
    13732 
    13733  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13734  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13735  allocation,
    13736  offset,
    13737  size);
    13738  Flush();
    13739 }
    13740 
    13741 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13742  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13743 {
    13744  CallParams callParams;
    13745  GetBasicParams(callParams);
    13746 
    13747  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13748  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13749  allocation,
    13750  offset,
    13751  size);
    13752  Flush();
    13753 }
    13754 
    13755 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13756  const VkBufferCreateInfo& bufCreateInfo,
    13757  const VmaAllocationCreateInfo& allocCreateInfo,
    13758  VmaAllocation allocation)
    13759 {
    13760  CallParams callParams;
    13761  GetBasicParams(callParams);
    13762 
    13763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13764  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13765  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13766  bufCreateInfo.flags,
    13767  bufCreateInfo.size,
    13768  bufCreateInfo.usage,
    13769  bufCreateInfo.sharingMode,
    13770  allocCreateInfo.flags,
    13771  allocCreateInfo.usage,
    13772  allocCreateInfo.requiredFlags,
    13773  allocCreateInfo.preferredFlags,
    13774  allocCreateInfo.memoryTypeBits,
    13775  allocCreateInfo.pool,
    13776  allocation,
    13777  userDataStr.GetString());
    13778  Flush();
    13779 }
    13780 
    13781 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13782  const VkImageCreateInfo& imageCreateInfo,
    13783  const VmaAllocationCreateInfo& allocCreateInfo,
    13784  VmaAllocation allocation)
    13785 {
    13786  CallParams callParams;
    13787  GetBasicParams(callParams);
    13788 
    13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13790  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13791  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13792  imageCreateInfo.flags,
    13793  imageCreateInfo.imageType,
    13794  imageCreateInfo.format,
    13795  imageCreateInfo.extent.width,
    13796  imageCreateInfo.extent.height,
    13797  imageCreateInfo.extent.depth,
    13798  imageCreateInfo.mipLevels,
    13799  imageCreateInfo.arrayLayers,
    13800  imageCreateInfo.samples,
    13801  imageCreateInfo.tiling,
    13802  imageCreateInfo.usage,
    13803  imageCreateInfo.sharingMode,
    13804  imageCreateInfo.initialLayout,
    13805  allocCreateInfo.flags,
    13806  allocCreateInfo.usage,
    13807  allocCreateInfo.requiredFlags,
    13808  allocCreateInfo.preferredFlags,
    13809  allocCreateInfo.memoryTypeBits,
    13810  allocCreateInfo.pool,
    13811  allocation,
    13812  userDataStr.GetString());
    13813  Flush();
    13814 }
    13815 
    13816 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13817  VmaAllocation allocation)
    13818 {
    13819  CallParams callParams;
    13820  GetBasicParams(callParams);
    13821 
    13822  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13823  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13824  allocation);
    13825  Flush();
    13826 }
    13827 
    13828 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13829  VmaAllocation allocation)
    13830 {
    13831  CallParams callParams;
    13832  GetBasicParams(callParams);
    13833 
    13834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13835  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13836  allocation);
    13837  Flush();
    13838 }
    13839 
    13840 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13841  VmaAllocation allocation)
    13842 {
    13843  CallParams callParams;
    13844  GetBasicParams(callParams);
    13845 
    13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13847  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13848  allocation);
    13849  Flush();
    13850 }
    13851 
    13852 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13853  VmaAllocation allocation)
    13854 {
    13855  CallParams callParams;
    13856  GetBasicParams(callParams);
    13857 
    13858  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13859  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13860  allocation);
    13861  Flush();
    13862 }
    13863 
    13864 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13865  VmaPool pool)
    13866 {
    13867  CallParams callParams;
    13868  GetBasicParams(callParams);
    13869 
    13870  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13871  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13872  pool);
    13873  Flush();
    13874 }
    13875 
    13876 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13877  const VmaDefragmentationInfo2& info,
    13879 {
    13880  CallParams callParams;
    13881  GetBasicParams(callParams);
    13882 
    13883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13884  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13885  info.flags);
    13886  PrintPointerList(info.allocationCount, info.pAllocations);
    13887  fprintf(m_File, ",");
    13888  PrintPointerList(info.poolCount, info.pPools);
    13889  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    13890  info.maxCpuBytesToMove,
    13892  info.maxGpuBytesToMove,
    13894  info.commandBuffer,
    13895  ctx);
    13896  Flush();
    13897 }
    13898 
    13899 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    13901 {
    13902  CallParams callParams;
    13903  GetBasicParams(callParams);
    13904 
    13905  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13906  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    13907  ctx);
    13908  Flush();
    13909 }
    13910 
    13911 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    13912 {
    13913  if(pUserData != VMA_NULL)
    13914  {
    13915  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    13916  {
    13917  m_Str = (const char*)pUserData;
    13918  }
    13919  else
    13920  {
    13921  sprintf_s(m_PtrStr, "%p", pUserData);
    13922  m_Str = m_PtrStr;
    13923  }
    13924  }
    13925  else
    13926  {
    13927  m_Str = "";
    13928  }
    13929 }
    13930 
    13931 void VmaRecorder::WriteConfiguration(
    13932  const VkPhysicalDeviceProperties& devProps,
    13933  const VkPhysicalDeviceMemoryProperties& memProps,
    13934  bool dedicatedAllocationExtensionEnabled)
    13935 {
    13936  fprintf(m_File, "Config,Begin\n");
    13937 
    13938  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    13939  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    13940  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    13941  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    13942  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    13943  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    13944 
    13945  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    13946  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    13947  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    13948 
    13949  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    13950  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    13951  {
    13952  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    13953  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    13954  }
    13955  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    13956  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    13957  {
    13958  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    13959  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    13960  }
    13961 
    13962  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    13963 
    13964  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    13965  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    13966  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    13967  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    13968  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    13969  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    13970  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    13971  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    13972  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    13973 
    13974  fprintf(m_File, "Config,End\n");
    13975 }
    13976 
    13977 void VmaRecorder::GetBasicParams(CallParams& outParams)
    13978 {
    13979  outParams.threadId = GetCurrentThreadId();
    13980 
    13981  LARGE_INTEGER counter;
    13982  QueryPerformanceCounter(&counter);
    13983  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    13984 }
    13985 
    13986 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    13987 {
    13988  if(count)
    13989  {
    13990  fprintf(m_File, "%p", pItems[0]);
    13991  for(uint64_t i = 1; i < count; ++i)
    13992  {
    13993  fprintf(m_File, " %p", pItems[i]);
    13994  }
    13995  }
    13996 }
    13997 
    13998 void VmaRecorder::Flush()
    13999 {
    14000  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14001  {
    14002  fflush(m_File);
    14003  }
    14004 }
    14005 
    14006 #endif // #if VMA_RECORDING_ENABLED
    14007 
    14009 // VmaAllocationObjectAllocator
    14010 
    14011 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14012  m_Allocator(pAllocationCallbacks, 1024)
    14013 {
    14014 }
    14015 
    14016 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14017 {
    14018  VmaMutexLock mutexLock(m_Mutex);
    14019  return m_Allocator.Alloc();
    14020 }
    14021 
    14022 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14023 {
    14024  VmaMutexLock mutexLock(m_Mutex);
    14025  m_Allocator.Free(hAlloc);
    14026 }
    14027 
    14029 // VmaAllocator_T
    14030 
    14031 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14032  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14033  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14034  m_hDevice(pCreateInfo->device),
    14035  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14036  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14037  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14038  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14039  m_PreferredLargeHeapBlockSize(0),
    14040  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14041  m_CurrentFrameIndex(0),
    14042  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14043  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14044  m_NextPoolId(0)
    14046  ,m_pRecorder(VMA_NULL)
    14047 #endif
    14048 {
    14049  if(VMA_DEBUG_DETECT_CORRUPTION)
    14050  {
    14051  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14052  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14053  }
    14054 
    14055  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14056 
    14057 #if !(VMA_DEDICATED_ALLOCATION)
    14059  {
    14060  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14061  }
    14062 #endif
    14063 
    14064  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14065  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14066  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14067 
    14068  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14069  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14070 
    14071  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14072  {
    14073  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14074  }
    14075 
    14076  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14077  {
    14078  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14079  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14080  }
    14081 
    14082  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14083 
    14084  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14085  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14086 
    14087  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14088  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14089  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14090  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14091 
    14092  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14093  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14094 
    14095  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14096  {
    14097  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14098  {
    14099  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14100  if(limit != VK_WHOLE_SIZE)
    14101  {
    14102  m_HeapSizeLimit[heapIndex] = limit;
    14103  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14104  {
    14105  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14106  }
    14107  }
    14108  }
    14109  }
    14110 
    14111  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14112  {
    14113  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14114 
    14115  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14116  this,
    14117  VK_NULL_HANDLE, // hParentPool
    14118  memTypeIndex,
    14119  preferredBlockSize,
    14120  0,
    14121  SIZE_MAX,
    14122  GetBufferImageGranularity(),
    14123  pCreateInfo->frameInUseCount,
    14124  false, // isCustomPool
    14125  false, // explicitBlockSize
    14126  false); // linearAlgorithm
    14127  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14128  // becase minBlockCount is 0.
    14129  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14130 
    14131  }
    14132 }
    14133 
    14134 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14135 {
    14136  VkResult res = VK_SUCCESS;
    14137 
    14138  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14139  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14140  {
    14141 #if VMA_RECORDING_ENABLED
    14142  m_pRecorder = vma_new(this, VmaRecorder)();
    14143  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14144  if(res != VK_SUCCESS)
    14145  {
    14146  return res;
    14147  }
    14148  m_pRecorder->WriteConfiguration(
    14149  m_PhysicalDeviceProperties,
    14150  m_MemProps,
    14151  m_UseKhrDedicatedAllocation);
    14152  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14153 #else
    14154  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14155  return VK_ERROR_FEATURE_NOT_PRESENT;
    14156 #endif
    14157  }
    14158 
    14159  return res;
    14160 }
    14161 
    14162 VmaAllocator_T::~VmaAllocator_T()
    14163 {
    14164 #if VMA_RECORDING_ENABLED
    14165  if(m_pRecorder != VMA_NULL)
    14166  {
    14167  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14168  vma_delete(this, m_pRecorder);
    14169  }
    14170 #endif
    14171 
    14172  VMA_ASSERT(m_Pools.empty());
    14173 
    14174  for(size_t i = GetMemoryTypeCount(); i--; )
    14175  {
    14176  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14177  {
    14178  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14179  }
    14180 
    14181  vma_delete(this, m_pDedicatedAllocations[i]);
    14182  vma_delete(this, m_pBlockVectors[i]);
    14183  }
    14184 }
    14185 
    14186 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14187 {
    14188 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14189  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14190  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14191  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14192  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14193  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14194  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14195  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14196  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14197  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14198  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14199  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14200  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14201  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14202  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14203  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14204  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14205  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14206 #if VMA_DEDICATED_ALLOCATION
    14207  if(m_UseKhrDedicatedAllocation)
    14208  {
    14209  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14210  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14211  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14212  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14213  }
    14214 #endif // #if VMA_DEDICATED_ALLOCATION
    14215 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14216 
    14217 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14218  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14219 
    14220  if(pVulkanFunctions != VMA_NULL)
    14221  {
    14222  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14223  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14224  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14225  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14226  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14227  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14228  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14229  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14230  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14231  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14232  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14233  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14234  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14235  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14236  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14237  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14238  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14239 #if VMA_DEDICATED_ALLOCATION
    14240  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14241  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14242 #endif
    14243  }
    14244 
    14245 #undef VMA_COPY_IF_NOT_NULL
    14246 
    14247  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14248  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14249  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14250  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14251  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14252  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14253  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14254  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14255  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14256  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14257  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14258  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14259  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14260  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14261  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14262  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14263  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14264  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14265  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14266 #if VMA_DEDICATED_ALLOCATION
    14267  if(m_UseKhrDedicatedAllocation)
    14268  {
    14269  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14270  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14271  }
    14272 #endif
    14273 }
    14274 
    14275 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14276 {
    14277  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14278  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14279  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14280  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14281 }
    14282 
    14283 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14284  VkDeviceSize size,
    14285  VkDeviceSize alignment,
    14286  bool dedicatedAllocation,
    14287  VkBuffer dedicatedBuffer,
    14288  VkImage dedicatedImage,
    14289  const VmaAllocationCreateInfo& createInfo,
    14290  uint32_t memTypeIndex,
    14291  VmaSuballocationType suballocType,
    14292  size_t allocationCount,
    14293  VmaAllocation* pAllocations)
    14294 {
    14295  VMA_ASSERT(pAllocations != VMA_NULL);
    14296  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14297 
    14298  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14299 
    14300  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14301  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14302  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14303  {
    14304  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14305  }
    14306 
    14307  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14308  VMA_ASSERT(blockVector);
    14309 
    14310  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14311  bool preferDedicatedMemory =
    14312  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14313  dedicatedAllocation ||
    14314  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14315  size > preferredBlockSize / 2;
    14316 
    14317  if(preferDedicatedMemory &&
    14318  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14319  finalCreateInfo.pool == VK_NULL_HANDLE)
    14320  {
    14322  }
    14323 
    14324  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14325  {
    14326  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14327  {
    14328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14329  }
    14330  else
    14331  {
    14332  return AllocateDedicatedMemory(
    14333  size,
    14334  suballocType,
    14335  memTypeIndex,
    14336  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14338  finalCreateInfo.pUserData,
    14339  dedicatedBuffer,
    14340  dedicatedImage,
    14341  allocationCount,
    14342  pAllocations);
    14343  }
    14344  }
    14345  else
    14346  {
    14347  VkResult res = blockVector->Allocate(
    14348  m_CurrentFrameIndex.load(),
    14349  size,
    14350  alignment,
    14351  finalCreateInfo,
    14352  suballocType,
    14353  allocationCount,
    14354  pAllocations);
    14355  if(res == VK_SUCCESS)
    14356  {
    14357  return res;
    14358  }
    14359 
    14360  // 5. Try dedicated memory.
    14361  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14362  {
    14363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14364  }
    14365  else
    14366  {
    14367  res = AllocateDedicatedMemory(
    14368  size,
    14369  suballocType,
    14370  memTypeIndex,
    14371  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14372  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14373  finalCreateInfo.pUserData,
    14374  dedicatedBuffer,
    14375  dedicatedImage,
    14376  allocationCount,
    14377  pAllocations);
    14378  if(res == VK_SUCCESS)
    14379  {
    14380  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14381  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14382  return VK_SUCCESS;
    14383  }
    14384  else
    14385  {
    14386  // Everything failed: Return error code.
    14387  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14388  return res;
    14389  }
    14390  }
    14391  }
    14392 }
    14393 
    14394 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14395  VkDeviceSize size,
    14396  VmaSuballocationType suballocType,
    14397  uint32_t memTypeIndex,
    14398  bool map,
    14399  bool isUserDataString,
    14400  void* pUserData,
    14401  VkBuffer dedicatedBuffer,
    14402  VkImage dedicatedImage,
    14403  size_t allocationCount,
    14404  VmaAllocation* pAllocations)
    14405 {
    14406  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14407 
    14408  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14409  allocInfo.memoryTypeIndex = memTypeIndex;
    14410  allocInfo.allocationSize = size;
    14411 
    14412 #if VMA_DEDICATED_ALLOCATION
    14413  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14414  if(m_UseKhrDedicatedAllocation)
    14415  {
    14416  if(dedicatedBuffer != VK_NULL_HANDLE)
    14417  {
    14418  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14419  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14420  allocInfo.pNext = &dedicatedAllocInfo;
    14421  }
    14422  else if(dedicatedImage != VK_NULL_HANDLE)
    14423  {
    14424  dedicatedAllocInfo.image = dedicatedImage;
    14425  allocInfo.pNext = &dedicatedAllocInfo;
    14426  }
    14427  }
    14428 #endif // #if VMA_DEDICATED_ALLOCATION
    14429 
    14430  size_t allocIndex;
    14431  VkResult res = VK_SUCCESS;
    14432  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14433  {
    14434  res = AllocateDedicatedMemoryPage(
    14435  size,
    14436  suballocType,
    14437  memTypeIndex,
    14438  allocInfo,
    14439  map,
    14440  isUserDataString,
    14441  pUserData,
    14442  pAllocations + allocIndex);
    14443  if(res != VK_SUCCESS)
    14444  {
    14445  break;
    14446  }
    14447  }
    14448 
    14449  if(res == VK_SUCCESS)
    14450  {
    14451  // Register them in m_pDedicatedAllocations.
    14452  {
    14453  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14454  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14455  VMA_ASSERT(pDedicatedAllocations);
    14456  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14457  {
    14458  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14459  }
    14460  }
    14461 
    14462  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14463  }
    14464  else
    14465  {
    14466  // Free all already created allocations.
    14467  while(allocIndex--)
    14468  {
    14469  VmaAllocation currAlloc = pAllocations[allocIndex];
    14470  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14471 
    14472  /*
    14473  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14474  before vkFreeMemory.
    14475 
    14476  if(currAlloc->GetMappedData() != VMA_NULL)
    14477  {
    14478  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14479  }
    14480  */
    14481 
    14482  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14483 
    14484  currAlloc->SetUserData(this, VMA_NULL);
    14485  currAlloc->Dtor();
    14486  m_AllocationObjectAllocator.Free(currAlloc);
    14487  }
    14488 
    14489  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14490  }
    14491 
    14492  return res;
    14493 }
    14494 
    14495 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14496  VkDeviceSize size,
    14497  VmaSuballocationType suballocType,
    14498  uint32_t memTypeIndex,
    14499  const VkMemoryAllocateInfo& allocInfo,
    14500  bool map,
    14501  bool isUserDataString,
    14502  void* pUserData,
    14503  VmaAllocation* pAllocation)
    14504 {
    14505  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14506  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14507  if(res < 0)
    14508  {
    14509  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14510  return res;
    14511  }
    14512 
    14513  void* pMappedData = VMA_NULL;
    14514  if(map)
    14515  {
    14516  res = (*m_VulkanFunctions.vkMapMemory)(
    14517  m_hDevice,
    14518  hMemory,
    14519  0,
    14520  VK_WHOLE_SIZE,
    14521  0,
    14522  &pMappedData);
    14523  if(res < 0)
    14524  {
    14525  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14526  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14527  return res;
    14528  }
    14529  }
    14530 
    14531  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14532  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14533  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14534  (*pAllocation)->SetUserData(this, pUserData);
    14535  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14536  {
    14537  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14538  }
    14539 
    14540  return VK_SUCCESS;
    14541 }
    14542 
    14543 void VmaAllocator_T::GetBufferMemoryRequirements(
    14544  VkBuffer hBuffer,
    14545  VkMemoryRequirements& memReq,
    14546  bool& requiresDedicatedAllocation,
    14547  bool& prefersDedicatedAllocation) const
    14548 {
    14549 #if VMA_DEDICATED_ALLOCATION
    14550  if(m_UseKhrDedicatedAllocation)
    14551  {
    14552  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14553  memReqInfo.buffer = hBuffer;
    14554 
    14555  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14556 
    14557  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14558  memReq2.pNext = &memDedicatedReq;
    14559 
    14560  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14561 
    14562  memReq = memReq2.memoryRequirements;
    14563  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14564  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14565  }
    14566  else
    14567 #endif // #if VMA_DEDICATED_ALLOCATION
    14568  {
    14569  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14570  requiresDedicatedAllocation = false;
    14571  prefersDedicatedAllocation = false;
    14572  }
    14573 }
    14574 
    14575 void VmaAllocator_T::GetImageMemoryRequirements(
    14576  VkImage hImage,
    14577  VkMemoryRequirements& memReq,
    14578  bool& requiresDedicatedAllocation,
    14579  bool& prefersDedicatedAllocation) const
    14580 {
    14581 #if VMA_DEDICATED_ALLOCATION
    14582  if(m_UseKhrDedicatedAllocation)
    14583  {
    14584  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14585  memReqInfo.image = hImage;
    14586 
    14587  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14588 
    14589  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14590  memReq2.pNext = &memDedicatedReq;
    14591 
    14592  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14593 
    14594  memReq = memReq2.memoryRequirements;
    14595  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14596  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14597  }
    14598  else
    14599 #endif // #if VMA_DEDICATED_ALLOCATION
    14600  {
    14601  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14602  requiresDedicatedAllocation = false;
    14603  prefersDedicatedAllocation = false;
    14604  }
    14605 }
    14606 
    14607 VkResult VmaAllocator_T::AllocateMemory(
    14608  const VkMemoryRequirements& vkMemReq,
    14609  bool requiresDedicatedAllocation,
    14610  bool prefersDedicatedAllocation,
    14611  VkBuffer dedicatedBuffer,
    14612  VkImage dedicatedImage,
    14613  const VmaAllocationCreateInfo& createInfo,
    14614  VmaSuballocationType suballocType,
    14615  size_t allocationCount,
    14616  VmaAllocation* pAllocations)
    14617 {
    14618  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14619 
    14620  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14621 
    14622  if(vkMemReq.size == 0)
    14623  {
    14624  return VK_ERROR_VALIDATION_FAILED_EXT;
    14625  }
    14626  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14627  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14628  {
    14629  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14630  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14631  }
    14632  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14634  {
    14635  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14636  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14637  }
    14638  if(requiresDedicatedAllocation)
    14639  {
    14640  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14641  {
    14642  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14643  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14644  }
    14645  if(createInfo.pool != VK_NULL_HANDLE)
    14646  {
    14647  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14648  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14649  }
    14650  }
    14651  if((createInfo.pool != VK_NULL_HANDLE) &&
    14652  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14653  {
    14654  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14655  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14656  }
    14657 
    14658  if(createInfo.pool != VK_NULL_HANDLE)
    14659  {
    14660  const VkDeviceSize alignmentForPool = VMA_MAX(
    14661  vkMemReq.alignment,
    14662  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14663 
    14664  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14665  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14666  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14667  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14668  {
    14669  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14670  }
    14671 
    14672  return createInfo.pool->m_BlockVector.Allocate(
    14673  m_CurrentFrameIndex.load(),
    14674  vkMemReq.size,
    14675  alignmentForPool,
    14676  createInfoForPool,
    14677  suballocType,
    14678  allocationCount,
    14679  pAllocations);
    14680  }
    14681  else
    14682  {
    14683  // Bit mask of memory Vulkan types acceptable for this allocation.
    14684  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14685  uint32_t memTypeIndex = UINT32_MAX;
    14686  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14687  if(res == VK_SUCCESS)
    14688  {
    14689  VkDeviceSize alignmentForMemType = VMA_MAX(
    14690  vkMemReq.alignment,
    14691  GetMemoryTypeMinAlignment(memTypeIndex));
    14692 
    14693  res = AllocateMemoryOfType(
    14694  vkMemReq.size,
    14695  alignmentForMemType,
    14696  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14697  dedicatedBuffer,
    14698  dedicatedImage,
    14699  createInfo,
    14700  memTypeIndex,
    14701  suballocType,
    14702  allocationCount,
    14703  pAllocations);
    14704  // Succeeded on first try.
    14705  if(res == VK_SUCCESS)
    14706  {
    14707  return res;
    14708  }
    14709  // Allocation from this memory type failed. Try other compatible memory types.
    14710  else
    14711  {
    14712  for(;;)
    14713  {
    14714  // Remove old memTypeIndex from list of possibilities.
    14715  memoryTypeBits &= ~(1u << memTypeIndex);
    14716  // Find alternative memTypeIndex.
    14717  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14718  if(res == VK_SUCCESS)
    14719  {
    14720  alignmentForMemType = VMA_MAX(
    14721  vkMemReq.alignment,
    14722  GetMemoryTypeMinAlignment(memTypeIndex));
    14723 
    14724  res = AllocateMemoryOfType(
    14725  vkMemReq.size,
    14726  alignmentForMemType,
    14727  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14728  dedicatedBuffer,
    14729  dedicatedImage,
    14730  createInfo,
    14731  memTypeIndex,
    14732  suballocType,
    14733  allocationCount,
    14734  pAllocations);
    14735  // Allocation from this alternative memory type succeeded.
    14736  if(res == VK_SUCCESS)
    14737  {
    14738  return res;
    14739  }
    14740  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14741  }
    14742  // No other matching memory type index could be found.
    14743  else
    14744  {
    14745  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14746  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14747  }
    14748  }
    14749  }
    14750  }
    14751  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14752  else
    14753  return res;
    14754  }
    14755 }
    14756 
    14757 void VmaAllocator_T::FreeMemory(
    14758  size_t allocationCount,
    14759  const VmaAllocation* pAllocations)
    14760 {
    14761  VMA_ASSERT(pAllocations);
    14762 
    14763  for(size_t allocIndex = allocationCount; allocIndex--; )
    14764  {
    14765  VmaAllocation allocation = pAllocations[allocIndex];
    14766 
    14767  if(allocation != VK_NULL_HANDLE)
    14768  {
    14769  if(TouchAllocation(allocation))
    14770  {
    14771  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14772  {
    14773  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14774  }
    14775 
    14776  switch(allocation->GetType())
    14777  {
    14778  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14779  {
    14780  VmaBlockVector* pBlockVector = VMA_NULL;
    14781  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14782  if(hPool != VK_NULL_HANDLE)
    14783  {
    14784  pBlockVector = &hPool->m_BlockVector;
    14785  }
    14786  else
    14787  {
    14788  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14789  pBlockVector = m_pBlockVectors[memTypeIndex];
    14790  }
    14791  pBlockVector->Free(allocation);
    14792  }
    14793  break;
    14794  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14795  FreeDedicatedMemory(allocation);
    14796  break;
    14797  default:
    14798  VMA_ASSERT(0);
    14799  }
    14800  }
    14801 
    14802  allocation->SetUserData(this, VMA_NULL);
    14803  allocation->Dtor();
    14804  m_AllocationObjectAllocator.Free(allocation);
    14805  }
    14806  }
    14807 }
    14808 
    14809 VkResult VmaAllocator_T::ResizeAllocation(
    14810  const VmaAllocation alloc,
    14811  VkDeviceSize newSize)
    14812 {
    14813  // This function is deprecated and so it does nothing. It's left for backward compatibility.
    14814  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14815  {
    14816  return VK_ERROR_VALIDATION_FAILED_EXT;
    14817  }
    14818  if(newSize == alloc->GetSize())
    14819  {
    14820  return VK_SUCCESS;
    14821  }
    14822  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14823 }
    14824 
    14825 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14826 {
    14827  // Initialize.
    14828  InitStatInfo(pStats->total);
    14829  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14830  InitStatInfo(pStats->memoryType[i]);
    14831  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14832  InitStatInfo(pStats->memoryHeap[i]);
    14833 
    14834  // Process default pools.
    14835  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14836  {
    14837  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14838  VMA_ASSERT(pBlockVector);
    14839  pBlockVector->AddStats(pStats);
    14840  }
    14841 
    14842  // Process custom pools.
    14843  {
    14844  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14845  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14846  {
    14847  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14848  }
    14849  }
    14850 
    14851  // Process dedicated allocations.
    14852  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14853  {
    14854  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14855  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14856  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14857  VMA_ASSERT(pDedicatedAllocVector);
    14858  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14859  {
    14860  VmaStatInfo allocationStatInfo;
    14861  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14862  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14863  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14864  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14865  }
    14866  }
    14867 
    14868  // Postprocess.
    14869  VmaPostprocessCalcStatInfo(pStats->total);
    14870  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14871  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14872  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14873  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14874 }
    14875 
    14876 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14877 
    14878 VkResult VmaAllocator_T::DefragmentationBegin(
    14879  const VmaDefragmentationInfo2& info,
    14880  VmaDefragmentationStats* pStats,
    14881  VmaDefragmentationContext* pContext)
    14882 {
    14883  if(info.pAllocationsChanged != VMA_NULL)
    14884  {
    14885  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    14886  }
    14887 
    14888  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    14889  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    14890 
    14891  (*pContext)->AddPools(info.poolCount, info.pPools);
    14892  (*pContext)->AddAllocations(
    14894 
    14895  VkResult res = (*pContext)->Defragment(
    14898  info.commandBuffer, pStats);
    14899 
    14900  if(res != VK_NOT_READY)
    14901  {
    14902  vma_delete(this, *pContext);
    14903  *pContext = VMA_NULL;
    14904  }
    14905 
    14906  return res;
    14907 }
    14908 
    14909 VkResult VmaAllocator_T::DefragmentationEnd(
    14910  VmaDefragmentationContext context)
    14911 {
    14912  vma_delete(this, context);
    14913  return VK_SUCCESS;
    14914 }
    14915 
    14916 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    14917 {
    14918  if(hAllocation->CanBecomeLost())
    14919  {
    14920  /*
    14921  Warning: This is a carefully designed algorithm.
    14922  Do not modify unless you really know what you're doing :)
    14923  */
    14924  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14925  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14926  for(;;)
    14927  {
    14928  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14929  {
    14930  pAllocationInfo->memoryType = UINT32_MAX;
    14931  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    14932  pAllocationInfo->offset = 0;
    14933  pAllocationInfo->size = hAllocation->GetSize();
    14934  pAllocationInfo->pMappedData = VMA_NULL;
    14935  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14936  return;
    14937  }
    14938  else if(localLastUseFrameIndex == localCurrFrameIndex)
    14939  {
    14940  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14941  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14942  pAllocationInfo->offset = hAllocation->GetOffset();
    14943  pAllocationInfo->size = hAllocation->GetSize();
    14944  pAllocationInfo->pMappedData = VMA_NULL;
    14945  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14946  return;
    14947  }
    14948  else // Last use time earlier than current time.
    14949  {
    14950  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14951  {
    14952  localLastUseFrameIndex = localCurrFrameIndex;
    14953  }
    14954  }
    14955  }
    14956  }
    14957  else
    14958  {
    14959 #if VMA_STATS_STRING_ENABLED
    14960  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14961  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14962  for(;;)
    14963  {
    14964  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    14965  if(localLastUseFrameIndex == localCurrFrameIndex)
    14966  {
    14967  break;
    14968  }
    14969  else // Last use time earlier than current time.
    14970  {
    14971  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    14972  {
    14973  localLastUseFrameIndex = localCurrFrameIndex;
    14974  }
    14975  }
    14976  }
    14977 #endif
    14978 
    14979  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    14980  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    14981  pAllocationInfo->offset = hAllocation->GetOffset();
    14982  pAllocationInfo->size = hAllocation->GetSize();
    14983  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    14984  pAllocationInfo->pUserData = hAllocation->GetUserData();
    14985  }
    14986 }
    14987 
    14988 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    14989 {
    14990  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    14991  if(hAllocation->CanBecomeLost())
    14992  {
    14993  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    14994  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    14995  for(;;)
    14996  {
    14997  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    14998  {
    14999  return false;
    15000  }
    15001  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15002  {
    15003  return true;
    15004  }
    15005  else // Last use time earlier than current time.
    15006  {
    15007  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15008  {
    15009  localLastUseFrameIndex = localCurrFrameIndex;
    15010  }
    15011  }
    15012  }
    15013  }
    15014  else
    15015  {
    15016 #if VMA_STATS_STRING_ENABLED
    15017  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15018  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15019  for(;;)
    15020  {
    15021  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15022  if(localLastUseFrameIndex == localCurrFrameIndex)
    15023  {
    15024  break;
    15025  }
    15026  else // Last use time earlier than current time.
    15027  {
    15028  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15029  {
    15030  localLastUseFrameIndex = localCurrFrameIndex;
    15031  }
    15032  }
    15033  }
    15034 #endif
    15035 
    15036  return true;
    15037  }
    15038 }
    15039 
    15040 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15041 {
    15042  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15043 
    15044  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15045 
    15046  if(newCreateInfo.maxBlockCount == 0)
    15047  {
    15048  newCreateInfo.maxBlockCount = SIZE_MAX;
    15049  }
    15050  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15051  {
    15052  return VK_ERROR_INITIALIZATION_FAILED;
    15053  }
    15054 
    15055  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15056 
    15057  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15058 
    15059  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15060  if(res != VK_SUCCESS)
    15061  {
    15062  vma_delete(this, *pPool);
    15063  *pPool = VMA_NULL;
    15064  return res;
    15065  }
    15066 
    15067  // Add to m_Pools.
    15068  {
    15069  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15070  (*pPool)->SetId(m_NextPoolId++);
    15071  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15072  }
    15073 
    15074  return VK_SUCCESS;
    15075 }
    15076 
    15077 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15078 {
    15079  // Remove from m_Pools.
    15080  {
    15081  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15082  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15083  VMA_ASSERT(success && "Pool not found in Allocator.");
    15084  }
    15085 
    15086  vma_delete(this, pool);
    15087 }
    15088 
    15089 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15090 {
    15091  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15092 }
    15093 
    15094 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15095 {
    15096  m_CurrentFrameIndex.store(frameIndex);
    15097 }
    15098 
    15099 void VmaAllocator_T::MakePoolAllocationsLost(
    15100  VmaPool hPool,
    15101  size_t* pLostAllocationCount)
    15102 {
    15103  hPool->m_BlockVector.MakePoolAllocationsLost(
    15104  m_CurrentFrameIndex.load(),
    15105  pLostAllocationCount);
    15106 }
    15107 
    15108 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15109 {
    15110  return hPool->m_BlockVector.CheckCorruption();
    15111 }
    15112 
    15113 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15114 {
    15115  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15116 
    15117  // Process default pools.
    15118  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15119  {
    15120  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15121  {
    15122  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15123  VMA_ASSERT(pBlockVector);
    15124  VkResult localRes = pBlockVector->CheckCorruption();
    15125  switch(localRes)
    15126  {
    15127  case VK_ERROR_FEATURE_NOT_PRESENT:
    15128  break;
    15129  case VK_SUCCESS:
    15130  finalRes = VK_SUCCESS;
    15131  break;
    15132  default:
    15133  return localRes;
    15134  }
    15135  }
    15136  }
    15137 
    15138  // Process custom pools.
    15139  {
    15140  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15141  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15142  {
    15143  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15144  {
    15145  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15146  switch(localRes)
    15147  {
    15148  case VK_ERROR_FEATURE_NOT_PRESENT:
    15149  break;
    15150  case VK_SUCCESS:
    15151  finalRes = VK_SUCCESS;
    15152  break;
    15153  default:
    15154  return localRes;
    15155  }
    15156  }
    15157  }
    15158  }
    15159 
    15160  return finalRes;
    15161 }
    15162 
    15163 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15164 {
    15165  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15166  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15167  (*pAllocation)->InitLost();
    15168 }
    15169 
    15170 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15171 {
    15172  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15173 
    15174  VkResult res;
    15175  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15176  {
    15177  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15178  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15179  {
    15180  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15181  if(res == VK_SUCCESS)
    15182  {
    15183  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15184  }
    15185  }
    15186  else
    15187  {
    15188  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15189  }
    15190  }
    15191  else
    15192  {
    15193  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15194  }
    15195 
    15196  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15197  {
    15198  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15199  }
    15200 
    15201  return res;
    15202 }
    15203 
    15204 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15205 {
    15206  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15207  {
    15208  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15209  }
    15210 
    15211  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15212 
    15213  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15214  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15215  {
    15216  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15217  m_HeapSizeLimit[heapIndex] += size;
    15218  }
    15219 }
    15220 
    15221 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15222 {
    15223  if(hAllocation->CanBecomeLost())
    15224  {
    15225  return VK_ERROR_MEMORY_MAP_FAILED;
    15226  }
    15227 
    15228  switch(hAllocation->GetType())
    15229  {
    15230  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15231  {
    15232  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15233  char *pBytes = VMA_NULL;
    15234  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15235  if(res == VK_SUCCESS)
    15236  {
    15237  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15238  hAllocation->BlockAllocMap();
    15239  }
    15240  return res;
    15241  }
    15242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15243  return hAllocation->DedicatedAllocMap(this, ppData);
    15244  default:
    15245  VMA_ASSERT(0);
    15246  return VK_ERROR_MEMORY_MAP_FAILED;
    15247  }
    15248 }
    15249 
    15250 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15251 {
    15252  switch(hAllocation->GetType())
    15253  {
    15254  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15255  {
    15256  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15257  hAllocation->BlockAllocUnmap();
    15258  pBlock->Unmap(this, 1);
    15259  }
    15260  break;
    15261  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15262  hAllocation->DedicatedAllocUnmap(this);
    15263  break;
    15264  default:
    15265  VMA_ASSERT(0);
    15266  }
    15267 }
    15268 
    15269 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15270 {
    15271  VkResult res = VK_SUCCESS;
    15272  switch(hAllocation->GetType())
    15273  {
    15274  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15275  res = GetVulkanFunctions().vkBindBufferMemory(
    15276  m_hDevice,
    15277  hBuffer,
    15278  hAllocation->GetMemory(),
    15279  0); //memoryOffset
    15280  break;
    15281  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15282  {
    15283  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15284  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15285  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15286  break;
    15287  }
    15288  default:
    15289  VMA_ASSERT(0);
    15290  }
    15291  return res;
    15292 }
    15293 
    15294 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15295 {
    15296  VkResult res = VK_SUCCESS;
    15297  switch(hAllocation->GetType())
    15298  {
    15299  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15300  res = GetVulkanFunctions().vkBindImageMemory(
    15301  m_hDevice,
    15302  hImage,
    15303  hAllocation->GetMemory(),
    15304  0); //memoryOffset
    15305  break;
    15306  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15307  {
    15308  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15309  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15310  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15311  break;
    15312  }
    15313  default:
    15314  VMA_ASSERT(0);
    15315  }
    15316  return res;
    15317 }
    15318 
    15319 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15320  VmaAllocation hAllocation,
    15321  VkDeviceSize offset, VkDeviceSize size,
    15322  VMA_CACHE_OPERATION op)
    15323 {
    15324  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15325  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15326  {
    15327  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15328  VMA_ASSERT(offset <= allocationSize);
    15329 
    15330  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15331 
    15332  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15333  memRange.memory = hAllocation->GetMemory();
    15334 
    15335  switch(hAllocation->GetType())
    15336  {
    15337  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15338  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15339  if(size == VK_WHOLE_SIZE)
    15340  {
    15341  memRange.size = allocationSize - memRange.offset;
    15342  }
    15343  else
    15344  {
    15345  VMA_ASSERT(offset + size <= allocationSize);
    15346  memRange.size = VMA_MIN(
    15347  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15348  allocationSize - memRange.offset);
    15349  }
    15350  break;
    15351 
    15352  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15353  {
    15354  // 1. Still within this allocation.
    15355  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15356  if(size == VK_WHOLE_SIZE)
    15357  {
    15358  size = allocationSize - offset;
    15359  }
    15360  else
    15361  {
    15362  VMA_ASSERT(offset + size <= allocationSize);
    15363  }
    15364  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15365 
    15366  // 2. Adjust to whole block.
    15367  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15368  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15369  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15370  memRange.offset += allocationOffset;
    15371  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15372 
    15373  break;
    15374  }
    15375 
    15376  default:
    15377  VMA_ASSERT(0);
    15378  }
    15379 
    15380  switch(op)
    15381  {
    15382  case VMA_CACHE_FLUSH:
    15383  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15384  break;
    15385  case VMA_CACHE_INVALIDATE:
    15386  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15387  break;
    15388  default:
    15389  VMA_ASSERT(0);
    15390  }
    15391  }
    15392  // else: Just ignore this call.
    15393 }
    15394 
    15395 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15396 {
    15397  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15398 
    15399  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15400  {
    15401  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15402  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15403  VMA_ASSERT(pDedicatedAllocations);
    15404  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15405  VMA_ASSERT(success);
    15406  }
    15407 
    15408  VkDeviceMemory hMemory = allocation->GetMemory();
    15409 
    15410  /*
    15411  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15412  before vkFreeMemory.
    15413 
    15414  if(allocation->GetMappedData() != VMA_NULL)
    15415  {
    15416  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15417  }
    15418  */
    15419 
    15420  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15421 
    15422  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15423 }
    15424 
    15425 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15426 {
    15427  VkBufferCreateInfo dummyBufCreateInfo;
    15428  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15429 
    15430  uint32_t memoryTypeBits = 0;
    15431 
    15432  // Create buffer.
    15433  VkBuffer buf = VK_NULL_HANDLE;
    15434  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15435  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15436  if(res == VK_SUCCESS)
    15437  {
    15438  // Query for supported memory types.
    15439  VkMemoryRequirements memReq;
    15440  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15441  memoryTypeBits = memReq.memoryTypeBits;
    15442 
    15443  // Destroy buffer.
    15444  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15445  }
    15446 
    15447  return memoryTypeBits;
    15448 }
    15449 
    15450 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15451 {
    15452  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15453  !hAllocation->CanBecomeLost() &&
    15454  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15455  {
    15456  void* pData = VMA_NULL;
    15457  VkResult res = Map(hAllocation, &pData);
    15458  if(res == VK_SUCCESS)
    15459  {
    15460  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15461  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15462  Unmap(hAllocation);
    15463  }
    15464  else
    15465  {
    15466  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15467  }
    15468  }
    15469 }
    15470 
    15471 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15472 {
    15473  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15474  if(memoryTypeBits == UINT32_MAX)
    15475  {
    15476  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15477  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15478  }
    15479  return memoryTypeBits;
    15480 }
    15481 
    15482 #if VMA_STATS_STRING_ENABLED
    15483 
    15484 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15485 {
    15486  bool dedicatedAllocationsStarted = false;
    15487  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15488  {
    15489  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15490  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15491  VMA_ASSERT(pDedicatedAllocVector);
    15492  if(pDedicatedAllocVector->empty() == false)
    15493  {
    15494  if(dedicatedAllocationsStarted == false)
    15495  {
    15496  dedicatedAllocationsStarted = true;
    15497  json.WriteString("DedicatedAllocations");
    15498  json.BeginObject();
    15499  }
    15500 
    15501  json.BeginString("Type ");
    15502  json.ContinueString(memTypeIndex);
    15503  json.EndString();
    15504 
    15505  json.BeginArray();
    15506 
    15507  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15508  {
    15509  json.BeginObject(true);
    15510  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15511  hAlloc->PrintParameters(json);
    15512  json.EndObject();
    15513  }
    15514 
    15515  json.EndArray();
    15516  }
    15517  }
    15518  if(dedicatedAllocationsStarted)
    15519  {
    15520  json.EndObject();
    15521  }
    15522 
    15523  {
    15524  bool allocationsStarted = false;
    15525  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15526  {
    15527  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15528  {
    15529  if(allocationsStarted == false)
    15530  {
    15531  allocationsStarted = true;
    15532  json.WriteString("DefaultPools");
    15533  json.BeginObject();
    15534  }
    15535 
    15536  json.BeginString("Type ");
    15537  json.ContinueString(memTypeIndex);
    15538  json.EndString();
    15539 
    15540  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15541  }
    15542  }
    15543  if(allocationsStarted)
    15544  {
    15545  json.EndObject();
    15546  }
    15547  }
    15548 
    15549  // Custom pools
    15550  {
    15551  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15552  const size_t poolCount = m_Pools.size();
    15553  if(poolCount > 0)
    15554  {
    15555  json.WriteString("Pools");
    15556  json.BeginObject();
    15557  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15558  {
    15559  json.BeginString();
    15560  json.ContinueString(m_Pools[poolIndex]->GetId());
    15561  json.EndString();
    15562 
    15563  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15564  }
    15565  json.EndObject();
    15566  }
    15567  }
    15568 }
    15569 
    15570 #endif // #if VMA_STATS_STRING_ENABLED
    15571 
    15573 // Public interface
    15574 
    15575 VkResult vmaCreateAllocator(
    15576  const VmaAllocatorCreateInfo* pCreateInfo,
    15577  VmaAllocator* pAllocator)
    15578 {
    15579  VMA_ASSERT(pCreateInfo && pAllocator);
    15580  VMA_DEBUG_LOG("vmaCreateAllocator");
    15581  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15582  return (*pAllocator)->Init(pCreateInfo);
    15583 }
    15584 
    15585 void vmaDestroyAllocator(
    15586  VmaAllocator allocator)
    15587 {
    15588  if(allocator != VK_NULL_HANDLE)
    15589  {
    15590  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15591  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15592  vma_delete(&allocationCallbacks, allocator);
    15593  }
    15594 }
    15595 
    15597  VmaAllocator allocator,
    15598  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15599 {
    15600  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15601  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15602 }
    15603 
    15605  VmaAllocator allocator,
    15606  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15607 {
    15608  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15609  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15610 }
    15611 
    15613  VmaAllocator allocator,
    15614  uint32_t memoryTypeIndex,
    15615  VkMemoryPropertyFlags* pFlags)
    15616 {
    15617  VMA_ASSERT(allocator && pFlags);
    15618  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15619  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15620 }
    15621 
    15623  VmaAllocator allocator,
    15624  uint32_t frameIndex)
    15625 {
    15626  VMA_ASSERT(allocator);
    15627  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15628 
    15629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15630 
    15631  allocator->SetCurrentFrameIndex(frameIndex);
    15632 }
    15633 
    15634 void vmaCalculateStats(
    15635  VmaAllocator allocator,
    15636  VmaStats* pStats)
    15637 {
    15638  VMA_ASSERT(allocator && pStats);
    15639  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15640  allocator->CalculateStats(pStats);
    15641 }
    15642 
    15643 #if VMA_STATS_STRING_ENABLED
    15644 
    15645 void vmaBuildStatsString(
    15646  VmaAllocator allocator,
    15647  char** ppStatsString,
    15648  VkBool32 detailedMap)
    15649 {
    15650  VMA_ASSERT(allocator && ppStatsString);
    15651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15652 
    15653  VmaStringBuilder sb(allocator);
    15654  {
    15655  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15656  json.BeginObject();
    15657 
    15658  VmaStats stats;
    15659  allocator->CalculateStats(&stats);
    15660 
    15661  json.WriteString("Total");
    15662  VmaPrintStatInfo(json, stats.total);
    15663 
    15664  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15665  {
    15666  json.BeginString("Heap ");
    15667  json.ContinueString(heapIndex);
    15668  json.EndString();
    15669  json.BeginObject();
    15670 
    15671  json.WriteString("Size");
    15672  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15673 
    15674  json.WriteString("Flags");
    15675  json.BeginArray(true);
    15676  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15677  {
    15678  json.WriteString("DEVICE_LOCAL");
    15679  }
    15680  json.EndArray();
    15681 
    15682  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15683  {
    15684  json.WriteString("Stats");
    15685  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15686  }
    15687 
    15688  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15689  {
    15690  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15691  {
    15692  json.BeginString("Type ");
    15693  json.ContinueString(typeIndex);
    15694  json.EndString();
    15695 
    15696  json.BeginObject();
    15697 
    15698  json.WriteString("Flags");
    15699  json.BeginArray(true);
    15700  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15701  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15702  {
    15703  json.WriteString("DEVICE_LOCAL");
    15704  }
    15705  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15706  {
    15707  json.WriteString("HOST_VISIBLE");
    15708  }
    15709  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15710  {
    15711  json.WriteString("HOST_COHERENT");
    15712  }
    15713  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15714  {
    15715  json.WriteString("HOST_CACHED");
    15716  }
    15717  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15718  {
    15719  json.WriteString("LAZILY_ALLOCATED");
    15720  }
    15721  json.EndArray();
    15722 
    15723  if(stats.memoryType[typeIndex].blockCount > 0)
    15724  {
    15725  json.WriteString("Stats");
    15726  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15727  }
    15728 
    15729  json.EndObject();
    15730  }
    15731  }
    15732 
    15733  json.EndObject();
    15734  }
    15735  if(detailedMap == VK_TRUE)
    15736  {
    15737  allocator->PrintDetailedMap(json);
    15738  }
    15739 
    15740  json.EndObject();
    15741  }
    15742 
    15743  const size_t len = sb.GetLength();
    15744  char* const pChars = vma_new_array(allocator, char, len + 1);
    15745  if(len > 0)
    15746  {
    15747  memcpy(pChars, sb.GetData(), len);
    15748  }
    15749  pChars[len] = '\0';
    15750  *ppStatsString = pChars;
    15751 }
    15752 
    15753 void vmaFreeStatsString(
    15754  VmaAllocator allocator,
    15755  char* pStatsString)
    15756 {
    15757  if(pStatsString != VMA_NULL)
    15758  {
    15759  VMA_ASSERT(allocator);
    15760  size_t len = strlen(pStatsString);
    15761  vma_delete_array(allocator, pStatsString, len + 1);
    15762  }
    15763 }
    15764 
    15765 #endif // #if VMA_STATS_STRING_ENABLED
    15766 
    15767 /*
    15768 This function is not protected by any mutex because it just reads immutable data.
    15769 */
    15770 VkResult vmaFindMemoryTypeIndex(
    15771  VmaAllocator allocator,
    15772  uint32_t memoryTypeBits,
    15773  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15774  uint32_t* pMemoryTypeIndex)
    15775 {
    15776  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15777  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15778  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15779 
    15780  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15781  {
    15782  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15783  }
    15784 
    15785  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15786  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15787 
    15788  // Convert usage to requiredFlags and preferredFlags.
    15789  switch(pAllocationCreateInfo->usage)
    15790  {
    15792  break;
    15794  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15795  {
    15796  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15797  }
    15798  break;
    15800  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15801  break;
    15803  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15804  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15805  {
    15806  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15807  }
    15808  break;
    15810  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15811  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15812  break;
    15813  default:
    15814  break;
    15815  }
    15816 
    15817  *pMemoryTypeIndex = UINT32_MAX;
    15818  uint32_t minCost = UINT32_MAX;
    15819  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15820  memTypeIndex < allocator->GetMemoryTypeCount();
    15821  ++memTypeIndex, memTypeBit <<= 1)
    15822  {
    15823  // This memory type is acceptable according to memoryTypeBits bitmask.
    15824  if((memTypeBit & memoryTypeBits) != 0)
    15825  {
    15826  const VkMemoryPropertyFlags currFlags =
    15827  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15828  // This memory type contains requiredFlags.
    15829  if((requiredFlags & ~currFlags) == 0)
    15830  {
    15831  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15832  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15833  // Remember memory type with lowest cost.
    15834  if(currCost < minCost)
    15835  {
    15836  *pMemoryTypeIndex = memTypeIndex;
    15837  if(currCost == 0)
    15838  {
    15839  return VK_SUCCESS;
    15840  }
    15841  minCost = currCost;
    15842  }
    15843  }
    15844  }
    15845  }
    15846  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15847 }
    15848 
    15850  VmaAllocator allocator,
    15851  const VkBufferCreateInfo* pBufferCreateInfo,
    15852  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15853  uint32_t* pMemoryTypeIndex)
    15854 {
    15855  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15856  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15857  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15858  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15859 
    15860  const VkDevice hDev = allocator->m_hDevice;
    15861  VkBuffer hBuffer = VK_NULL_HANDLE;
    15862  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15863  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15864  if(res == VK_SUCCESS)
    15865  {
    15866  VkMemoryRequirements memReq = {};
    15867  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15868  hDev, hBuffer, &memReq);
    15869 
    15870  res = vmaFindMemoryTypeIndex(
    15871  allocator,
    15872  memReq.memoryTypeBits,
    15873  pAllocationCreateInfo,
    15874  pMemoryTypeIndex);
    15875 
    15876  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15877  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15878  }
    15879  return res;
    15880 }
    15881 
    15883  VmaAllocator allocator,
    15884  const VkImageCreateInfo* pImageCreateInfo,
    15885  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15886  uint32_t* pMemoryTypeIndex)
    15887 {
    15888  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15889  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15890  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15891  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15892 
    15893  const VkDevice hDev = allocator->m_hDevice;
    15894  VkImage hImage = VK_NULL_HANDLE;
    15895  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15896  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15897  if(res == VK_SUCCESS)
    15898  {
    15899  VkMemoryRequirements memReq = {};
    15900  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15901  hDev, hImage, &memReq);
    15902 
    15903  res = vmaFindMemoryTypeIndex(
    15904  allocator,
    15905  memReq.memoryTypeBits,
    15906  pAllocationCreateInfo,
    15907  pMemoryTypeIndex);
    15908 
    15909  allocator->GetVulkanFunctions().vkDestroyImage(
    15910  hDev, hImage, allocator->GetAllocationCallbacks());
    15911  }
    15912  return res;
    15913 }
    15914 
    15915 VkResult vmaCreatePool(
    15916  VmaAllocator allocator,
    15917  const VmaPoolCreateInfo* pCreateInfo,
    15918  VmaPool* pPool)
    15919 {
    15920  VMA_ASSERT(allocator && pCreateInfo && pPool);
    15921 
    15922  VMA_DEBUG_LOG("vmaCreatePool");
    15923 
    15924  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15925 
    15926  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    15927 
    15928 #if VMA_RECORDING_ENABLED
    15929  if(allocator->GetRecorder() != VMA_NULL)
    15930  {
    15931  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    15932  }
    15933 #endif
    15934 
    15935  return res;
    15936 }
    15937 
    15938 void vmaDestroyPool(
    15939  VmaAllocator allocator,
    15940  VmaPool pool)
    15941 {
    15942  VMA_ASSERT(allocator);
    15943 
    15944  if(pool == VK_NULL_HANDLE)
    15945  {
    15946  return;
    15947  }
    15948 
    15949  VMA_DEBUG_LOG("vmaDestroyPool");
    15950 
    15951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15952 
    15953 #if VMA_RECORDING_ENABLED
    15954  if(allocator->GetRecorder() != VMA_NULL)
    15955  {
    15956  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    15957  }
    15958 #endif
    15959 
    15960  allocator->DestroyPool(pool);
    15961 }
    15962 
    15963 void vmaGetPoolStats(
    15964  VmaAllocator allocator,
    15965  VmaPool pool,
    15966  VmaPoolStats* pPoolStats)
    15967 {
    15968  VMA_ASSERT(allocator && pool && pPoolStats);
    15969 
    15970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15971 
    15972  allocator->GetPoolStats(pool, pPoolStats);
    15973 }
    15974 
    15976  VmaAllocator allocator,
    15977  VmaPool pool,
    15978  size_t* pLostAllocationCount)
    15979 {
    15980  VMA_ASSERT(allocator && pool);
    15981 
    15982  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15983 
    15984 #if VMA_RECORDING_ENABLED
    15985  if(allocator->GetRecorder() != VMA_NULL)
    15986  {
    15987  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    15988  }
    15989 #endif
    15990 
    15991  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    15992 }
    15993 
    15994 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    15995 {
    15996  VMA_ASSERT(allocator && pool);
    15997 
    15998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15999 
    16000  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16001 
    16002  return allocator->CheckPoolCorruption(pool);
    16003 }
    16004 
    16005 VkResult vmaAllocateMemory(
    16006  VmaAllocator allocator,
    16007  const VkMemoryRequirements* pVkMemoryRequirements,
    16008  const VmaAllocationCreateInfo* pCreateInfo,
    16009  VmaAllocation* pAllocation,
    16010  VmaAllocationInfo* pAllocationInfo)
    16011 {
    16012  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16013 
    16014  VMA_DEBUG_LOG("vmaAllocateMemory");
    16015 
    16016  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16017 
    16018  VkResult result = allocator->AllocateMemory(
    16019  *pVkMemoryRequirements,
    16020  false, // requiresDedicatedAllocation
    16021  false, // prefersDedicatedAllocation
    16022  VK_NULL_HANDLE, // dedicatedBuffer
    16023  VK_NULL_HANDLE, // dedicatedImage
    16024  *pCreateInfo,
    16025  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16026  1, // allocationCount
    16027  pAllocation);
    16028 
    16029 #if VMA_RECORDING_ENABLED
    16030  if(allocator->GetRecorder() != VMA_NULL)
    16031  {
    16032  allocator->GetRecorder()->RecordAllocateMemory(
    16033  allocator->GetCurrentFrameIndex(),
    16034  *pVkMemoryRequirements,
    16035  *pCreateInfo,
    16036  *pAllocation);
    16037  }
    16038 #endif
    16039 
    16040  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16041  {
    16042  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16043  }
    16044 
    16045  return result;
    16046 }
    16047 
    16048 VkResult vmaAllocateMemoryPages(
    16049  VmaAllocator allocator,
    16050  const VkMemoryRequirements* pVkMemoryRequirements,
    16051  const VmaAllocationCreateInfo* pCreateInfo,
    16052  size_t allocationCount,
    16053  VmaAllocation* pAllocations,
    16054  VmaAllocationInfo* pAllocationInfo)
    16055 {
    16056  if(allocationCount == 0)
    16057  {
    16058  return VK_SUCCESS;
    16059  }
    16060 
    16061  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16062 
    16063  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16064 
    16065  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16066 
    16067  VkResult result = allocator->AllocateMemory(
    16068  *pVkMemoryRequirements,
    16069  false, // requiresDedicatedAllocation
    16070  false, // prefersDedicatedAllocation
    16071  VK_NULL_HANDLE, // dedicatedBuffer
    16072  VK_NULL_HANDLE, // dedicatedImage
    16073  *pCreateInfo,
    16074  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16075  allocationCount,
    16076  pAllocations);
    16077 
    16078 #if VMA_RECORDING_ENABLED
    16079  if(allocator->GetRecorder() != VMA_NULL)
    16080  {
    16081  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16082  allocator->GetCurrentFrameIndex(),
    16083  *pVkMemoryRequirements,
    16084  *pCreateInfo,
    16085  (uint64_t)allocationCount,
    16086  pAllocations);
    16087  }
    16088 #endif
    16089 
    16090  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16091  {
    16092  for(size_t i = 0; i < allocationCount; ++i)
    16093  {
    16094  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16095  }
    16096  }
    16097 
    16098  return result;
    16099 }
    16100 
    16102  VmaAllocator allocator,
    16103  VkBuffer buffer,
    16104  const VmaAllocationCreateInfo* pCreateInfo,
    16105  VmaAllocation* pAllocation,
    16106  VmaAllocationInfo* pAllocationInfo)
    16107 {
    16108  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16109 
    16110  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16111 
    16112  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16113 
    16114  VkMemoryRequirements vkMemReq = {};
    16115  bool requiresDedicatedAllocation = false;
    16116  bool prefersDedicatedAllocation = false;
    16117  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16118  requiresDedicatedAllocation,
    16119  prefersDedicatedAllocation);
    16120 
    16121  VkResult result = allocator->AllocateMemory(
    16122  vkMemReq,
    16123  requiresDedicatedAllocation,
    16124  prefersDedicatedAllocation,
    16125  buffer, // dedicatedBuffer
    16126  VK_NULL_HANDLE, // dedicatedImage
    16127  *pCreateInfo,
    16128  VMA_SUBALLOCATION_TYPE_BUFFER,
    16129  1, // allocationCount
    16130  pAllocation);
    16131 
    16132 #if VMA_RECORDING_ENABLED
    16133  if(allocator->GetRecorder() != VMA_NULL)
    16134  {
    16135  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16136  allocator->GetCurrentFrameIndex(),
    16137  vkMemReq,
    16138  requiresDedicatedAllocation,
    16139  prefersDedicatedAllocation,
    16140  *pCreateInfo,
    16141  *pAllocation);
    16142  }
    16143 #endif
    16144 
    16145  if(pAllocationInfo && result == VK_SUCCESS)
    16146  {
    16147  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16148  }
    16149 
    16150  return result;
    16151 }
    16152 
    16153 VkResult vmaAllocateMemoryForImage(
    16154  VmaAllocator allocator,
    16155  VkImage image,
    16156  const VmaAllocationCreateInfo* pCreateInfo,
    16157  VmaAllocation* pAllocation,
    16158  VmaAllocationInfo* pAllocationInfo)
    16159 {
    16160  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16161 
    16162  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16163 
    16164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16165 
    16166  VkMemoryRequirements vkMemReq = {};
    16167  bool requiresDedicatedAllocation = false;
    16168  bool prefersDedicatedAllocation = false;
    16169  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16170  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16171 
    16172  VkResult result = allocator->AllocateMemory(
    16173  vkMemReq,
    16174  requiresDedicatedAllocation,
    16175  prefersDedicatedAllocation,
    16176  VK_NULL_HANDLE, // dedicatedBuffer
    16177  image, // dedicatedImage
    16178  *pCreateInfo,
    16179  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16180  1, // allocationCount
    16181  pAllocation);
    16182 
    16183 #if VMA_RECORDING_ENABLED
    16184  if(allocator->GetRecorder() != VMA_NULL)
    16185  {
    16186  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16187  allocator->GetCurrentFrameIndex(),
    16188  vkMemReq,
    16189  requiresDedicatedAllocation,
    16190  prefersDedicatedAllocation,
    16191  *pCreateInfo,
    16192  *pAllocation);
    16193  }
    16194 #endif
    16195 
    16196  if(pAllocationInfo && result == VK_SUCCESS)
    16197  {
    16198  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16199  }
    16200 
    16201  return result;
    16202 }
    16203 
    16204 void vmaFreeMemory(
    16205  VmaAllocator allocator,
    16206  VmaAllocation allocation)
    16207 {
    16208  VMA_ASSERT(allocator);
    16209 
    16210  if(allocation == VK_NULL_HANDLE)
    16211  {
    16212  return;
    16213  }
    16214 
    16215  VMA_DEBUG_LOG("vmaFreeMemory");
    16216 
    16217  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16218 
    16219 #if VMA_RECORDING_ENABLED
    16220  if(allocator->GetRecorder() != VMA_NULL)
    16221  {
    16222  allocator->GetRecorder()->RecordFreeMemory(
    16223  allocator->GetCurrentFrameIndex(),
    16224  allocation);
    16225  }
    16226 #endif
    16227 
    16228  allocator->FreeMemory(
    16229  1, // allocationCount
    16230  &allocation);
    16231 }
    16232 
    16233 void vmaFreeMemoryPages(
    16234  VmaAllocator allocator,
    16235  size_t allocationCount,
    16236  VmaAllocation* pAllocations)
    16237 {
    16238  if(allocationCount == 0)
    16239  {
    16240  return;
    16241  }
    16242 
    16243  VMA_ASSERT(allocator);
    16244 
    16245  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16246 
    16247  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16248 
    16249 #if VMA_RECORDING_ENABLED
    16250  if(allocator->GetRecorder() != VMA_NULL)
    16251  {
    16252  allocator->GetRecorder()->RecordFreeMemoryPages(
    16253  allocator->GetCurrentFrameIndex(),
    16254  (uint64_t)allocationCount,
    16255  pAllocations);
    16256  }
    16257 #endif
    16258 
    16259  allocator->FreeMemory(allocationCount, pAllocations);
    16260 }
    16261 
    16262 VkResult vmaResizeAllocation(
    16263  VmaAllocator allocator,
    16264  VmaAllocation allocation,
    16265  VkDeviceSize newSize)
    16266 {
    16267  VMA_ASSERT(allocator && allocation);
    16268 
    16269  VMA_DEBUG_LOG("vmaResizeAllocation");
    16270 
    16271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16272 
    16273  return allocator->ResizeAllocation(allocation, newSize);
    16274 }
    16275 
    16277  VmaAllocator allocator,
    16278  VmaAllocation allocation,
    16279  VmaAllocationInfo* pAllocationInfo)
    16280 {
    16281  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16282 
    16283  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16284 
    16285 #if VMA_RECORDING_ENABLED
    16286  if(allocator->GetRecorder() != VMA_NULL)
    16287  {
    16288  allocator->GetRecorder()->RecordGetAllocationInfo(
    16289  allocator->GetCurrentFrameIndex(),
    16290  allocation);
    16291  }
    16292 #endif
    16293 
    16294  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16295 }
    16296 
    16297 VkBool32 vmaTouchAllocation(
    16298  VmaAllocator allocator,
    16299  VmaAllocation allocation)
    16300 {
    16301  VMA_ASSERT(allocator && allocation);
    16302 
    16303  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16304 
    16305 #if VMA_RECORDING_ENABLED
    16306  if(allocator->GetRecorder() != VMA_NULL)
    16307  {
    16308  allocator->GetRecorder()->RecordTouchAllocation(
    16309  allocator->GetCurrentFrameIndex(),
    16310  allocation);
    16311  }
    16312 #endif
    16313 
    16314  return allocator->TouchAllocation(allocation);
    16315 }
    16316 
    16318  VmaAllocator allocator,
    16319  VmaAllocation allocation,
    16320  void* pUserData)
    16321 {
    16322  VMA_ASSERT(allocator && allocation);
    16323 
    16324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16325 
    16326  allocation->SetUserData(allocator, pUserData);
    16327 
    16328 #if VMA_RECORDING_ENABLED
    16329  if(allocator->GetRecorder() != VMA_NULL)
    16330  {
    16331  allocator->GetRecorder()->RecordSetAllocationUserData(
    16332  allocator->GetCurrentFrameIndex(),
    16333  allocation,
    16334  pUserData);
    16335  }
    16336 #endif
    16337 }
    16338 
    16340  VmaAllocator allocator,
    16341  VmaAllocation* pAllocation)
    16342 {
    16343  VMA_ASSERT(allocator && pAllocation);
    16344 
    16345  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16346 
    16347  allocator->CreateLostAllocation(pAllocation);
    16348 
    16349 #if VMA_RECORDING_ENABLED
    16350  if(allocator->GetRecorder() != VMA_NULL)
    16351  {
    16352  allocator->GetRecorder()->RecordCreateLostAllocation(
    16353  allocator->GetCurrentFrameIndex(),
    16354  *pAllocation);
    16355  }
    16356 #endif
    16357 }
    16358 
    16359 VkResult vmaMapMemory(
    16360  VmaAllocator allocator,
    16361  VmaAllocation allocation,
    16362  void** ppData)
    16363 {
    16364  VMA_ASSERT(allocator && allocation && ppData);
    16365 
    16366  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16367 
    16368  VkResult res = allocator->Map(allocation, ppData);
    16369 
    16370 #if VMA_RECORDING_ENABLED
    16371  if(allocator->GetRecorder() != VMA_NULL)
    16372  {
    16373  allocator->GetRecorder()->RecordMapMemory(
    16374  allocator->GetCurrentFrameIndex(),
    16375  allocation);
    16376  }
    16377 #endif
    16378 
    16379  return res;
    16380 }
    16381 
    16382 void vmaUnmapMemory(
    16383  VmaAllocator allocator,
    16384  VmaAllocation allocation)
    16385 {
    16386  VMA_ASSERT(allocator && allocation);
    16387 
    16388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16389 
    16390 #if VMA_RECORDING_ENABLED
    16391  if(allocator->GetRecorder() != VMA_NULL)
    16392  {
    16393  allocator->GetRecorder()->RecordUnmapMemory(
    16394  allocator->GetCurrentFrameIndex(),
    16395  allocation);
    16396  }
    16397 #endif
    16398 
    16399  allocator->Unmap(allocation);
    16400 }
    16401 
    16402 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16403 {
    16404  VMA_ASSERT(allocator && allocation);
    16405 
    16406  VMA_DEBUG_LOG("vmaFlushAllocation");
    16407 
    16408  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16409 
    16410  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16411 
    16412 #if VMA_RECORDING_ENABLED
    16413  if(allocator->GetRecorder() != VMA_NULL)
    16414  {
    16415  allocator->GetRecorder()->RecordFlushAllocation(
    16416  allocator->GetCurrentFrameIndex(),
    16417  allocation, offset, size);
    16418  }
    16419 #endif
    16420 }
    16421 
    16422 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16423 {
    16424  VMA_ASSERT(allocator && allocation);
    16425 
    16426  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16427 
    16428  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16429 
    16430  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16431 
    16432 #if VMA_RECORDING_ENABLED
    16433  if(allocator->GetRecorder() != VMA_NULL)
    16434  {
    16435  allocator->GetRecorder()->RecordInvalidateAllocation(
    16436  allocator->GetCurrentFrameIndex(),
    16437  allocation, offset, size);
    16438  }
    16439 #endif
    16440 }
    16441 
    16442 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16443 {
    16444  VMA_ASSERT(allocator);
    16445 
    16446  VMA_DEBUG_LOG("vmaCheckCorruption");
    16447 
    16448  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16449 
    16450  return allocator->CheckCorruption(memoryTypeBits);
    16451 }
    16452 
    16453 VkResult vmaDefragment(
    16454  VmaAllocator allocator,
    16455  VmaAllocation* pAllocations,
    16456  size_t allocationCount,
    16457  VkBool32* pAllocationsChanged,
    16458  const VmaDefragmentationInfo *pDefragmentationInfo,
    16459  VmaDefragmentationStats* pDefragmentationStats)
    16460 {
    16461  // Deprecated interface, reimplemented using new one.
    16462 
    16463  VmaDefragmentationInfo2 info2 = {};
    16464  info2.allocationCount = (uint32_t)allocationCount;
    16465  info2.pAllocations = pAllocations;
    16466  info2.pAllocationsChanged = pAllocationsChanged;
    16467  if(pDefragmentationInfo != VMA_NULL)
    16468  {
    16469  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16470  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16471  }
    16472  else
    16473  {
    16474  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16475  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16476  }
    16477  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16478 
    16480  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16481  if(res == VK_NOT_READY)
    16482  {
    16483  res = vmaDefragmentationEnd( allocator, ctx);
    16484  }
    16485  return res;
    16486 }
    16487 
    16488 VkResult vmaDefragmentationBegin(
    16489  VmaAllocator allocator,
    16490  const VmaDefragmentationInfo2* pInfo,
    16491  VmaDefragmentationStats* pStats,
    16492  VmaDefragmentationContext *pContext)
    16493 {
    16494  VMA_ASSERT(allocator && pInfo && pContext);
    16495 
    16496  // Degenerate case: Nothing to defragment.
    16497  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16498  {
    16499  return VK_SUCCESS;
    16500  }
    16501 
    16502  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16503  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16504  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16505  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16506 
    16507  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16508 
    16509  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16510 
    16511  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16512 
    16513 #if VMA_RECORDING_ENABLED
    16514  if(allocator->GetRecorder() != VMA_NULL)
    16515  {
    16516  allocator->GetRecorder()->RecordDefragmentationBegin(
    16517  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16518  }
    16519 #endif
    16520 
    16521  return res;
    16522 }
    16523 
    16524 VkResult vmaDefragmentationEnd(
    16525  VmaAllocator allocator,
    16526  VmaDefragmentationContext context)
    16527 {
    16528  VMA_ASSERT(allocator);
    16529 
    16530  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16531 
    16532  if(context != VK_NULL_HANDLE)
    16533  {
    16534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16535 
    16536 #if VMA_RECORDING_ENABLED
    16537  if(allocator->GetRecorder() != VMA_NULL)
    16538  {
    16539  allocator->GetRecorder()->RecordDefragmentationEnd(
    16540  allocator->GetCurrentFrameIndex(), context);
    16541  }
    16542 #endif
    16543 
    16544  return allocator->DefragmentationEnd(context);
    16545  }
    16546  else
    16547  {
    16548  return VK_SUCCESS;
    16549  }
    16550 }
    16551 
    16552 VkResult vmaBindBufferMemory(
    16553  VmaAllocator allocator,
    16554  VmaAllocation allocation,
    16555  VkBuffer buffer)
    16556 {
    16557  VMA_ASSERT(allocator && allocation && buffer);
    16558 
    16559  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16560 
    16561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16562 
    16563  return allocator->BindBufferMemory(allocation, buffer);
    16564 }
    16565 
    16566 VkResult vmaBindImageMemory(
    16567  VmaAllocator allocator,
    16568  VmaAllocation allocation,
    16569  VkImage image)
    16570 {
    16571  VMA_ASSERT(allocator && allocation && image);
    16572 
    16573  VMA_DEBUG_LOG("vmaBindImageMemory");
    16574 
    16575  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16576 
    16577  return allocator->BindImageMemory(allocation, image);
    16578 }
    16579 
    16580 VkResult vmaCreateBuffer(
    16581  VmaAllocator allocator,
    16582  const VkBufferCreateInfo* pBufferCreateInfo,
    16583  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16584  VkBuffer* pBuffer,
    16585  VmaAllocation* pAllocation,
    16586  VmaAllocationInfo* pAllocationInfo)
    16587 {
    16588  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16589 
    16590  if(pBufferCreateInfo->size == 0)
    16591  {
    16592  return VK_ERROR_VALIDATION_FAILED_EXT;
    16593  }
    16594 
    16595  VMA_DEBUG_LOG("vmaCreateBuffer");
    16596 
    16597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16598 
    16599  *pBuffer = VK_NULL_HANDLE;
    16600  *pAllocation = VK_NULL_HANDLE;
    16601 
    16602  // 1. Create VkBuffer.
    16603  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16604  allocator->m_hDevice,
    16605  pBufferCreateInfo,
    16606  allocator->GetAllocationCallbacks(),
    16607  pBuffer);
    16608  if(res >= 0)
    16609  {
    16610  // 2. vkGetBufferMemoryRequirements.
    16611  VkMemoryRequirements vkMemReq = {};
    16612  bool requiresDedicatedAllocation = false;
    16613  bool prefersDedicatedAllocation = false;
    16614  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16615  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16616 
    16617  // Make sure alignment requirements for specific buffer usages reported
    16618  // in Physical Device Properties are included in alignment reported by memory requirements.
    16619  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16620  {
    16621  VMA_ASSERT(vkMemReq.alignment %
    16622  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16623  }
    16624  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16625  {
    16626  VMA_ASSERT(vkMemReq.alignment %
    16627  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16628  }
    16629  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16630  {
    16631  VMA_ASSERT(vkMemReq.alignment %
    16632  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16633  }
    16634 
    16635  // 3. Allocate memory using allocator.
    16636  res = allocator->AllocateMemory(
    16637  vkMemReq,
    16638  requiresDedicatedAllocation,
    16639  prefersDedicatedAllocation,
    16640  *pBuffer, // dedicatedBuffer
    16641  VK_NULL_HANDLE, // dedicatedImage
    16642  *pAllocationCreateInfo,
    16643  VMA_SUBALLOCATION_TYPE_BUFFER,
    16644  1, // allocationCount
    16645  pAllocation);
    16646 
    16647 #if VMA_RECORDING_ENABLED
    16648  if(allocator->GetRecorder() != VMA_NULL)
    16649  {
    16650  allocator->GetRecorder()->RecordCreateBuffer(
    16651  allocator->GetCurrentFrameIndex(),
    16652  *pBufferCreateInfo,
    16653  *pAllocationCreateInfo,
    16654  *pAllocation);
    16655  }
    16656 #endif
    16657 
    16658  if(res >= 0)
    16659  {
    16660  // 3. Bind buffer with memory.
    16661  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16662  {
    16663  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16664  }
    16665  if(res >= 0)
    16666  {
    16667  // All steps succeeded.
    16668  #if VMA_STATS_STRING_ENABLED
    16669  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16670  #endif
    16671  if(pAllocationInfo != VMA_NULL)
    16672  {
    16673  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16674  }
    16675 
    16676  return VK_SUCCESS;
    16677  }
    16678  allocator->FreeMemory(
    16679  1, // allocationCount
    16680  pAllocation);
    16681  *pAllocation = VK_NULL_HANDLE;
    16682  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16683  *pBuffer = VK_NULL_HANDLE;
    16684  return res;
    16685  }
    16686  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16687  *pBuffer = VK_NULL_HANDLE;
    16688  return res;
    16689  }
    16690  return res;
    16691 }
    16692 
    16693 void vmaDestroyBuffer(
    16694  VmaAllocator allocator,
    16695  VkBuffer buffer,
    16696  VmaAllocation allocation)
    16697 {
    16698  VMA_ASSERT(allocator);
    16699 
    16700  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16701  {
    16702  return;
    16703  }
    16704 
    16705  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16706 
    16707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16708 
    16709 #if VMA_RECORDING_ENABLED
    16710  if(allocator->GetRecorder() != VMA_NULL)
    16711  {
    16712  allocator->GetRecorder()->RecordDestroyBuffer(
    16713  allocator->GetCurrentFrameIndex(),
    16714  allocation);
    16715  }
    16716 #endif
    16717 
    16718  if(buffer != VK_NULL_HANDLE)
    16719  {
    16720  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16721  }
    16722 
    16723  if(allocation != VK_NULL_HANDLE)
    16724  {
    16725  allocator->FreeMemory(
    16726  1, // allocationCount
    16727  &allocation);
    16728  }
    16729 }
    16730 
    16731 VkResult vmaCreateImage(
    16732  VmaAllocator allocator,
    16733  const VkImageCreateInfo* pImageCreateInfo,
    16734  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16735  VkImage* pImage,
    16736  VmaAllocation* pAllocation,
    16737  VmaAllocationInfo* pAllocationInfo)
    16738 {
    16739  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16740 
    16741  if(pImageCreateInfo->extent.width == 0 ||
    16742  pImageCreateInfo->extent.height == 0 ||
    16743  pImageCreateInfo->extent.depth == 0 ||
    16744  pImageCreateInfo->mipLevels == 0 ||
    16745  pImageCreateInfo->arrayLayers == 0)
    16746  {
    16747  return VK_ERROR_VALIDATION_FAILED_EXT;
    16748  }
    16749 
    16750  VMA_DEBUG_LOG("vmaCreateImage");
    16751 
    16752  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16753 
    16754  *pImage = VK_NULL_HANDLE;
    16755  *pAllocation = VK_NULL_HANDLE;
    16756 
    16757  // 1. Create VkImage.
    16758  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16759  allocator->m_hDevice,
    16760  pImageCreateInfo,
    16761  allocator->GetAllocationCallbacks(),
    16762  pImage);
    16763  if(res >= 0)
    16764  {
    16765  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16766  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16767  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16768 
    16769  // 2. Allocate memory using allocator.
    16770  VkMemoryRequirements vkMemReq = {};
    16771  bool requiresDedicatedAllocation = false;
    16772  bool prefersDedicatedAllocation = false;
    16773  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16774  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16775 
    16776  res = allocator->AllocateMemory(
    16777  vkMemReq,
    16778  requiresDedicatedAllocation,
    16779  prefersDedicatedAllocation,
    16780  VK_NULL_HANDLE, // dedicatedBuffer
    16781  *pImage, // dedicatedImage
    16782  *pAllocationCreateInfo,
    16783  suballocType,
    16784  1, // allocationCount
    16785  pAllocation);
    16786 
    16787 #if VMA_RECORDING_ENABLED
    16788  if(allocator->GetRecorder() != VMA_NULL)
    16789  {
    16790  allocator->GetRecorder()->RecordCreateImage(
    16791  allocator->GetCurrentFrameIndex(),
    16792  *pImageCreateInfo,
    16793  *pAllocationCreateInfo,
    16794  *pAllocation);
    16795  }
    16796 #endif
    16797 
    16798  if(res >= 0)
    16799  {
    16800  // 3. Bind image with memory.
    16801  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16802  {
    16803  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16804  }
    16805  if(res >= 0)
    16806  {
    16807  // All steps succeeded.
    16808  #if VMA_STATS_STRING_ENABLED
    16809  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16810  #endif
    16811  if(pAllocationInfo != VMA_NULL)
    16812  {
    16813  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16814  }
    16815 
    16816  return VK_SUCCESS;
    16817  }
    16818  allocator->FreeMemory(
    16819  1, // allocationCount
    16820  pAllocation);
    16821  *pAllocation = VK_NULL_HANDLE;
    16822  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16823  *pImage = VK_NULL_HANDLE;
    16824  return res;
    16825  }
    16826  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16827  *pImage = VK_NULL_HANDLE;
    16828  return res;
    16829  }
    16830  return res;
    16831 }
    16832 
    16833 void vmaDestroyImage(
    16834  VmaAllocator allocator,
    16835  VkImage image,
    16836  VmaAllocation allocation)
    16837 {
    16838  VMA_ASSERT(allocator);
    16839 
    16840  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16841  {
    16842  return;
    16843  }
    16844 
    16845  VMA_DEBUG_LOG("vmaDestroyImage");
    16846 
    16847  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16848 
    16849 #if VMA_RECORDING_ENABLED
    16850  if(allocator->GetRecorder() != VMA_NULL)
    16851  {
    16852  allocator->GetRecorder()->RecordDestroyImage(
    16853  allocator->GetCurrentFrameIndex(),
    16854  allocation);
    16855  }
    16856 #endif
    16857 
    16858  if(image != VK_NULL_HANDLE)
    16859  {
    16860  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16861  }
    16862  if(allocation != VK_NULL_HANDLE)
    16863  {
    16864  allocator->FreeMemory(
    16865  1, // allocationCount
    16866  &allocation);
    16867  }
    16868 }
    16869 
    16870 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1786
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2086
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1678 /*
    1679 Define this macro to 0/1 to disable/enable support for recording functionality,
    1680 available through VmaAllocatorCreateInfo::pRecordSettings.
    1681 */
    1682 #ifndef VMA_RECORDING_ENABLED
    1683  #ifdef _WIN32
    1684  #define VMA_RECORDING_ENABLED 1
    1685  #else
    1686  #define VMA_RECORDING_ENABLED 0
    1687  #endif
    1688 #endif
    1689 
    1690 #ifndef NOMINMAX
    1691  #define NOMINMAX // For windows.h
    1692 #endif
    1693 
    1694 #ifndef VULKAN_H_
    1695  #include <vulkan/vulkan.h>
    1696 #endif
    1697 
    1698 #if VMA_RECORDING_ENABLED
    1699  #include <windows.h>
    1700 #endif
    1701 
    1702 #if !defined(VMA_DEDICATED_ALLOCATION)
    1703  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1704  #define VMA_DEDICATED_ALLOCATION 1
    1705  #else
    1706  #define VMA_DEDICATED_ALLOCATION 0
    1707  #endif
    1708 #endif
    1709 
    1710 #if !defined(VMA_BIND_MEMORY2)
    1711  #if VK_KHR_bind_memory2
    1712  #define VMA_BIND_MEMORY2 1
    1713  #else
    1714  #define VMA_BIND_MEMORY2 0
    1715  #endif
    1716 #endif
    1717 
    1727 VK_DEFINE_HANDLE(VmaAllocator)
    1728 
    1729 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1731  VmaAllocator allocator,
    1732  uint32_t memoryType,
    1733  VkDeviceMemory memory,
    1734  VkDeviceSize size);
    1736 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1737  VmaAllocator allocator,
    1738  uint32_t memoryType,
    1739  VkDeviceMemory memory,
    1740  VkDeviceSize size);
    1741 
    1755 
    1797 
    1800 typedef VkFlags VmaAllocatorCreateFlags;
    1801 
    1806 typedef struct VmaVulkanFunctions {
    1807  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1808  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1809  PFN_vkAllocateMemory vkAllocateMemory;
    1810  PFN_vkFreeMemory vkFreeMemory;
    1811  PFN_vkMapMemory vkMapMemory;
    1812  PFN_vkUnmapMemory vkUnmapMemory;
    1813  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1814  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1815  PFN_vkBindBufferMemory vkBindBufferMemory;
    1816  PFN_vkBindImageMemory vkBindImageMemory;
    1817  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1818  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1819  PFN_vkCreateBuffer vkCreateBuffer;
    1820  PFN_vkDestroyBuffer vkDestroyBuffer;
    1821  PFN_vkCreateImage vkCreateImage;
    1822  PFN_vkDestroyImage vkDestroyImage;
    1823  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1824 #if VMA_DEDICATED_ALLOCATION
    1825  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1826  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1827 #endif
    1828 #if VMA_BIND_MEMORY2
    1829  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
    1830  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
    1831 #endif
    1833 
    1835 typedef enum VmaRecordFlagBits {
    1842 
    1845 typedef VkFlags VmaRecordFlags;
    1846 
    1848 typedef struct VmaRecordSettings
    1849 {
    1859  const char* pFilePath;
    1861 
    1864 {
    1868 
    1869  VkPhysicalDevice physicalDevice;
    1871 
    1872  VkDevice device;
    1874 
    1877 
    1878  const VkAllocationCallbacks* pAllocationCallbacks;
    1880 
    1920  const VkDeviceSize* pHeapSizeLimit;
    1941 
    1943 VkResult vmaCreateAllocator(
    1944  const VmaAllocatorCreateInfo* pCreateInfo,
    1945  VmaAllocator* pAllocator);
    1946 
    1948 void vmaDestroyAllocator(
    1949  VmaAllocator allocator);
    1950 
    1956  VmaAllocator allocator,
    1957  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1958 
    1964  VmaAllocator allocator,
    1965  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1966 
    1974  VmaAllocator allocator,
    1975  uint32_t memoryTypeIndex,
    1976  VkMemoryPropertyFlags* pFlags);
    1977 
    1987  VmaAllocator allocator,
    1988  uint32_t frameIndex);
    1989 
    1992 typedef struct VmaStatInfo
    1993 {
    1995  uint32_t blockCount;
    2001  VkDeviceSize usedBytes;
    2003  VkDeviceSize unusedBytes;
    2006 } VmaStatInfo;
    2007 
    2009 typedef struct VmaStats
    2010 {
    2011  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    2012  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    2014 } VmaStats;
    2015 
    2017 void vmaCalculateStats(
    2018  VmaAllocator allocator,
    2019  VmaStats* pStats);
    2020 
    2021 #ifndef VMA_STATS_STRING_ENABLED
    2022 #define VMA_STATS_STRING_ENABLED 1
    2023 #endif
    2024 
    2025 #if VMA_STATS_STRING_ENABLED
    2026 
    2028 
    2030 void vmaBuildStatsString(
    2031  VmaAllocator allocator,
    2032  char** ppStatsString,
    2033  VkBool32 detailedMap);
    2034 
    2035 void vmaFreeStatsString(
    2036  VmaAllocator allocator,
    2037  char* pStatsString);
    2038 
    2039 #endif // #if VMA_STATS_STRING_ENABLED
    2040 
    2049 VK_DEFINE_HANDLE(VmaPool)
    2050 
    2051 typedef enum VmaMemoryUsage
    2052 {
    2101 } VmaMemoryUsage;
    2102 
    2112 
    2173 
    2189 
    2199 
    2206 
    2210 
    2212 {
    2225  VkMemoryPropertyFlags requiredFlags;
    2230  VkMemoryPropertyFlags preferredFlags;
    2238  uint32_t memoryTypeBits;
    2251  void* pUserData;
    2253 
    2270 VkResult vmaFindMemoryTypeIndex(
    2271  VmaAllocator allocator,
    2272  uint32_t memoryTypeBits,
    2273  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2274  uint32_t* pMemoryTypeIndex);
    2275 
    2289  VmaAllocator allocator,
    2290  const VkBufferCreateInfo* pBufferCreateInfo,
    2291  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2292  uint32_t* pMemoryTypeIndex);
    2293 
    2307  VmaAllocator allocator,
    2308  const VkImageCreateInfo* pImageCreateInfo,
    2309  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2310  uint32_t* pMemoryTypeIndex);
    2311 
    2332 
    2349 
    2360 
    2366 
    2369 typedef VkFlags VmaPoolCreateFlags;
    2370 
    2373 typedef struct VmaPoolCreateInfo {
    2388  VkDeviceSize blockSize;
    2417 
    2420 typedef struct VmaPoolStats {
    2423  VkDeviceSize size;
    2426  VkDeviceSize unusedSize;
    2439  VkDeviceSize unusedRangeSizeMax;
    2442  size_t blockCount;
    2443 } VmaPoolStats;
    2444 
    2451 VkResult vmaCreatePool(
    2452  VmaAllocator allocator,
    2453  const VmaPoolCreateInfo* pCreateInfo,
    2454  VmaPool* pPool);
    2455 
    2458 void vmaDestroyPool(
    2459  VmaAllocator allocator,
    2460  VmaPool pool);
    2461 
    2468 void vmaGetPoolStats(
    2469  VmaAllocator allocator,
    2470  VmaPool pool,
    2471  VmaPoolStats* pPoolStats);
    2472 
    2480  VmaAllocator allocator,
    2481  VmaPool pool,
    2482  size_t* pLostAllocationCount);
    2483 
    2498 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2499 
    2524 VK_DEFINE_HANDLE(VmaAllocation)
    2525 
    2526 
    2528 typedef struct VmaAllocationInfo {
    2533  uint32_t memoryType;
    2542  VkDeviceMemory deviceMemory;
    2547  VkDeviceSize offset;
    2552  VkDeviceSize size;
    2566  void* pUserData;
    2568 
    2579 VkResult vmaAllocateMemory(
    2580  VmaAllocator allocator,
    2581  const VkMemoryRequirements* pVkMemoryRequirements,
    2582  const VmaAllocationCreateInfo* pCreateInfo,
    2583  VmaAllocation* pAllocation,
    2584  VmaAllocationInfo* pAllocationInfo);
    2585 
    2605 VkResult vmaAllocateMemoryPages(
    2606  VmaAllocator allocator,
    2607  const VkMemoryRequirements* pVkMemoryRequirements,
    2608  const VmaAllocationCreateInfo* pCreateInfo,
    2609  size_t allocationCount,
    2610  VmaAllocation* pAllocations,
    2611  VmaAllocationInfo* pAllocationInfo);
    2612 
    2620  VmaAllocator allocator,
    2621  VkBuffer buffer,
    2622  const VmaAllocationCreateInfo* pCreateInfo,
    2623  VmaAllocation* pAllocation,
    2624  VmaAllocationInfo* pAllocationInfo);
    2625 
    2627 VkResult vmaAllocateMemoryForImage(
    2628  VmaAllocator allocator,
    2629  VkImage image,
    2630  const VmaAllocationCreateInfo* pCreateInfo,
    2631  VmaAllocation* pAllocation,
    2632  VmaAllocationInfo* pAllocationInfo);
    2633 
    2638 void vmaFreeMemory(
    2639  VmaAllocator allocator,
    2640  VmaAllocation allocation);
    2641 
    2652 void vmaFreeMemoryPages(
    2653  VmaAllocator allocator,
    2654  size_t allocationCount,
    2655  VmaAllocation* pAllocations);
    2656 
    2663 VkResult vmaResizeAllocation(
    2664  VmaAllocator allocator,
    2665  VmaAllocation allocation,
    2666  VkDeviceSize newSize);
    2667 
    2685  VmaAllocator allocator,
    2686  VmaAllocation allocation,
    2687  VmaAllocationInfo* pAllocationInfo);
    2688 
    2703 VkBool32 vmaTouchAllocation(
    2704  VmaAllocator allocator,
    2705  VmaAllocation allocation);
    2706 
    2721  VmaAllocator allocator,
    2722  VmaAllocation allocation,
    2723  void* pUserData);
    2724 
    2736  VmaAllocator allocator,
    2737  VmaAllocation* pAllocation);
    2738 
    2773 VkResult vmaMapMemory(
    2774  VmaAllocator allocator,
    2775  VmaAllocation allocation,
    2776  void** ppData);
    2777 
    2782 void vmaUnmapMemory(
    2783  VmaAllocator allocator,
    2784  VmaAllocation allocation);
    2785 
    2802 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2803 
    2820 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2821 
    2838 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2839 
    2846 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2847 
    2848 typedef enum VmaDefragmentationFlagBits {
    2852 typedef VkFlags VmaDefragmentationFlags;
    2853 
    2858 typedef struct VmaDefragmentationInfo2 {
    2882  uint32_t poolCount;
    2903  VkDeviceSize maxCpuBytesToMove;
    2913  VkDeviceSize maxGpuBytesToMove;
    2927  VkCommandBuffer commandBuffer;
    2929 
    2934 typedef struct VmaDefragmentationInfo {
    2939  VkDeviceSize maxBytesToMove;
    2946 
    2948 typedef struct VmaDefragmentationStats {
    2950  VkDeviceSize bytesMoved;
    2952  VkDeviceSize bytesFreed;
    2958 
    2988 VkResult vmaDefragmentationBegin(
    2989  VmaAllocator allocator,
    2990  const VmaDefragmentationInfo2* pInfo,
    2991  VmaDefragmentationStats* pStats,
    2992  VmaDefragmentationContext *pContext);
    2993 
    2999 VkResult vmaDefragmentationEnd(
    3000  VmaAllocator allocator,
    3001  VmaDefragmentationContext context);
    3002 
    3043 VkResult vmaDefragment(
    3044  VmaAllocator allocator,
    3045  VmaAllocation* pAllocations,
    3046  size_t allocationCount,
    3047  VkBool32* pAllocationsChanged,
    3048  const VmaDefragmentationInfo *pDefragmentationInfo,
    3049  VmaDefragmentationStats* pDefragmentationStats);
    3050 
    3063 VkResult vmaBindBufferMemory(
    3064  VmaAllocator allocator,
    3065  VmaAllocation allocation,
    3066  VkBuffer buffer);
    3067 
    3078 VkResult vmaBindBufferMemory2(
    3079  VmaAllocator allocator,
    3080  VmaAllocation allocation,
    3081  VkDeviceSize allocationLocalOffset,
    3082  VkBuffer buffer,
    3083  const void* pNext);
    3084 
    3097 VkResult vmaBindImageMemory(
    3098  VmaAllocator allocator,
    3099  VmaAllocation allocation,
    3100  VkImage image);
    3101 
    3112 VkResult vmaBindImageMemory2(
    3113  VmaAllocator allocator,
    3114  VmaAllocation allocation,
    3115  VkDeviceSize allocationLocalOffset,
    3116  VkImage image,
    3117  const void* pNext);
    3118 
    3145 VkResult vmaCreateBuffer(
    3146  VmaAllocator allocator,
    3147  const VkBufferCreateInfo* pBufferCreateInfo,
    3148  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3149  VkBuffer* pBuffer,
    3150  VmaAllocation* pAllocation,
    3151  VmaAllocationInfo* pAllocationInfo);
    3152 
    3164 void vmaDestroyBuffer(
    3165  VmaAllocator allocator,
    3166  VkBuffer buffer,
    3167  VmaAllocation allocation);
    3168 
    3170 VkResult vmaCreateImage(
    3171  VmaAllocator allocator,
    3172  const VkImageCreateInfo* pImageCreateInfo,
    3173  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3174  VkImage* pImage,
    3175  VmaAllocation* pAllocation,
    3176  VmaAllocationInfo* pAllocationInfo);
    3177 
    3189 void vmaDestroyImage(
    3190  VmaAllocator allocator,
    3191  VkImage image,
    3192  VmaAllocation allocation);
    3193 
    3194 #ifdef __cplusplus
    3195 }
    3196 #endif
    3197 
    3198 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3199 
    3200 // For Visual Studio IntelliSense.
    3201 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3202 #define VMA_IMPLEMENTATION
    3203 #endif
    3204 
    3205 #ifdef VMA_IMPLEMENTATION
    3206 #undef VMA_IMPLEMENTATION
    3207 
    3208 #include <cstdint>
    3209 #include <cstdlib>
    3210 #include <cstring>
    3211 
    3212 /*******************************************************************************
    3213 CONFIGURATION SECTION
    3214 
    3215 Define some of these macros before each #include of this header or change them
    3216 here if you need other then default behavior depending on your environment.
    3217 */
    3218 
    3219 /*
    3220 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3221 internally, like:
    3222 
    3223  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3224 
    3225 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3226 VmaAllocatorCreateInfo::pVulkanFunctions.
    3227 */
    3228 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3229 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3230 #endif
    3231 
    3232 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3233 //#define VMA_USE_STL_CONTAINERS 1
    3234 
    3235 /* Set this macro to 1 to make the library including and using STL containers:
    3236 std::pair, std::vector, std::list, std::unordered_map.
    3237 
    3238 Set it to 0 or undefined to make the library using its own implementation of
    3239 the containers.
    3240 */
    3241 #if VMA_USE_STL_CONTAINERS
    3242  #define VMA_USE_STL_VECTOR 1
    3243  #define VMA_USE_STL_UNORDERED_MAP 1
    3244  #define VMA_USE_STL_LIST 1
    3245 #endif
    3246 
    3247 #ifndef VMA_USE_STL_SHARED_MUTEX
    3248  // Compiler conforms to C++17.
    3249  #if __cplusplus >= 201703L
    3250  #define VMA_USE_STL_SHARED_MUTEX 1
    3251  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3252  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3253  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3254  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3255  #define VMA_USE_STL_SHARED_MUTEX 1
    3256  #else
    3257  #define VMA_USE_STL_SHARED_MUTEX 0
    3258  #endif
    3259 #endif
    3260 
    3261 /*
    3262 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3263 Library has its own container implementation.
    3264 */
    3265 #if VMA_USE_STL_VECTOR
    3266  #include <vector>
    3267 #endif
    3268 
    3269 #if VMA_USE_STL_UNORDERED_MAP
    3270  #include <unordered_map>
    3271 #endif
    3272 
    3273 #if VMA_USE_STL_LIST
    3274  #include <list>
    3275 #endif
    3276 
    3277 /*
    3278 Following headers are used in this CONFIGURATION section only, so feel free to
    3279 remove them if not needed.
    3280 */
    3281 #include <cassert> // for assert
    3282 #include <algorithm> // for min, max
    3283 #include <mutex>
    3284 
    3285 #ifndef VMA_NULL
    3286  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3287  #define VMA_NULL nullptr
    3288 #endif
    3289 
    3290 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3291 #include <cstdlib>
    3292 void *aligned_alloc(size_t alignment, size_t size)
    3293 {
    3294  // alignment must be >= sizeof(void*)
    3295  if(alignment < sizeof(void*))
    3296  {
    3297  alignment = sizeof(void*);
    3298  }
    3299 
    3300  return memalign(alignment, size);
    3301 }
    3302 #elif defined(__APPLE__) || defined(__ANDROID__)
    3303 #include <cstdlib>
    3304 void *aligned_alloc(size_t alignment, size_t size)
    3305 {
    3306  // alignment must be >= sizeof(void*)
    3307  if(alignment < sizeof(void*))
    3308  {
    3309  alignment = sizeof(void*);
    3310  }
    3311 
    3312  void *pointer;
    3313  if(posix_memalign(&pointer, alignment, size) == 0)
    3314  return pointer;
    3315  return VMA_NULL;
    3316 }
    3317 #endif
    3318 
    3319 // If your compiler is not compatible with C++11 and definition of
    3320 // aligned_alloc() function is missing, uncommeting following line may help:
    3321 
    3322 //#include <malloc.h>
    3323 
    3324 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3325 #ifndef VMA_ASSERT
    3326  #ifdef _DEBUG
    3327  #define VMA_ASSERT(expr) assert(expr)
    3328  #else
    3329  #define VMA_ASSERT(expr)
    3330  #endif
    3331 #endif
    3332 
    3333 // Assert that will be called very often, like inside data structures e.g. operator[].
    3334 // Making it non-empty can make program slow.
    3335 #ifndef VMA_HEAVY_ASSERT
    3336  #ifdef _DEBUG
    3337  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3338  #else
    3339  #define VMA_HEAVY_ASSERT(expr)
    3340  #endif
    3341 #endif
    3342 
    3343 #ifndef VMA_ALIGN_OF
    3344  #define VMA_ALIGN_OF(type) (__alignof(type))
    3345 #endif
    3346 
    3347 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3348  #if defined(_WIN32)
    3349  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3350  #else
    3351  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3352  #endif
    3353 #endif
    3354 
    3355 #ifndef VMA_SYSTEM_FREE
    3356  #if defined(_WIN32)
    3357  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3358  #else
    3359  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3360  #endif
    3361 #endif
    3362 
    3363 #ifndef VMA_MIN
    3364  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3365 #endif
    3366 
    3367 #ifndef VMA_MAX
    3368  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3369 #endif
    3370 
    3371 #ifndef VMA_SWAP
    3372  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3373 #endif
    3374 
    3375 #ifndef VMA_SORT
    3376  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3377 #endif
    3378 
    3379 #ifndef VMA_DEBUG_LOG
    3380  #define VMA_DEBUG_LOG(format, ...)
    3381  /*
    3382  #define VMA_DEBUG_LOG(format, ...) do { \
    3383  printf(format, __VA_ARGS__); \
    3384  printf("\n"); \
    3385  } while(false)
    3386  */
    3387 #endif
    3388 
    3389 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3390 #if VMA_STATS_STRING_ENABLED
    3391  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3392  {
    3393  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3394  }
    3395  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3396  {
    3397  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3398  }
    3399  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3400  {
    3401  snprintf(outStr, strLen, "%p", ptr);
    3402  }
    3403 #endif
    3404 
    3405 #ifndef VMA_MUTEX
    3406  class VmaMutex
    3407  {
    3408  public:
    3409  void Lock() { m_Mutex.lock(); }
    3410  void Unlock() { m_Mutex.unlock(); }
    3411  private:
    3412  std::mutex m_Mutex;
    3413  };
    3414  #define VMA_MUTEX VmaMutex
    3415 #endif
    3416 
    3417 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3418 #ifndef VMA_RW_MUTEX
    3419  #if VMA_USE_STL_SHARED_MUTEX
    3420  // Use std::shared_mutex from C++17.
    3421  #include <shared_mutex>
    3422  class VmaRWMutex
    3423  {
    3424  public:
    3425  void LockRead() { m_Mutex.lock_shared(); }
    3426  void UnlockRead() { m_Mutex.unlock_shared(); }
    3427  void LockWrite() { m_Mutex.lock(); }
    3428  void UnlockWrite() { m_Mutex.unlock(); }
    3429  private:
    3430  std::shared_mutex m_Mutex;
    3431  };
    3432  #define VMA_RW_MUTEX VmaRWMutex
    3433  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3434  // Use SRWLOCK from WinAPI.
    3435  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3436  class VmaRWMutex
    3437  {
    3438  public:
    3439  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3440  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3441  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3442  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3443  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3444  private:
    3445  SRWLOCK m_Lock;
    3446  };
    3447  #define VMA_RW_MUTEX VmaRWMutex
    3448  #else
    3449  // Less efficient fallback: Use normal mutex.
    3450  class VmaRWMutex
    3451  {
    3452  public:
    3453  void LockRead() { m_Mutex.Lock(); }
    3454  void UnlockRead() { m_Mutex.Unlock(); }
    3455  void LockWrite() { m_Mutex.Lock(); }
    3456  void UnlockWrite() { m_Mutex.Unlock(); }
    3457  private:
    3458  VMA_MUTEX m_Mutex;
    3459  };
    3460  #define VMA_RW_MUTEX VmaRWMutex
    3461  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3462 #endif // #ifndef VMA_RW_MUTEX
    3463 
    3464 /*
    3465 If providing your own implementation, you need to implement a subset of std::atomic:
    3466 
    3467 - Constructor(uint32_t desired)
    3468 - uint32_t load() const
    3469 - void store(uint32_t desired)
    3470 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3471 */
    3472 #ifndef VMA_ATOMIC_UINT32
    3473  #include <atomic>
    3474  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3475 #endif
    3476 
    3477 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3478 
    3482  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3483 #endif
    3484 
    3485 #ifndef VMA_DEBUG_ALIGNMENT
    3486 
    3490  #define VMA_DEBUG_ALIGNMENT (1)
    3491 #endif
    3492 
    3493 #ifndef VMA_DEBUG_MARGIN
    3494 
    3498  #define VMA_DEBUG_MARGIN (0)
    3499 #endif
    3500 
    3501 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3502 
    3506  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3507 #endif
    3508 
    3509 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3510 
    3515  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3516 #endif
    3517 
    3518 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3519 
    3523  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3524 #endif
    3525 
    3526 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3527 
    3531  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3532 #endif
    3533 
    3534 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3535  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3537 #endif
    3538 
    3539 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3540  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3542 #endif
    3543 
    3544 #ifndef VMA_CLASS_NO_COPY
    3545  #define VMA_CLASS_NO_COPY(className) \
    3546  private: \
    3547  className(const className&) = delete; \
    3548  className& operator=(const className&) = delete;
    3549 #endif
    3550 
    3551 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3552 
    3553 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3554 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3555 
    3556 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3557 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3558 
    3559 /*******************************************************************************
    3560 END OF CONFIGURATION
    3561 */
    3562 
    3563 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3564 
    3565 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3566  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3567 
    3568 // Returns number of bits set to 1 in (v).
    3569 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3570 {
    3571  uint32_t c = v - ((v >> 1) & 0x55555555);
    3572  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3573  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3574  c = ((c >> 8) + c) & 0x00FF00FF;
    3575  c = ((c >> 16) + c) & 0x0000FFFF;
    3576  return c;
    3577 }
    3578 
    3579 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3580 // Use types like uint32_t, uint64_t as T.
    3581 template <typename T>
    3582 static inline T VmaAlignUp(T val, T align)
    3583 {
    3584  return (val + align - 1) / align * align;
    3585 }
    3586 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3587 // Use types like uint32_t, uint64_t as T.
    3588 template <typename T>
    3589 static inline T VmaAlignDown(T val, T align)
    3590 {
    3591  return val / align * align;
    3592 }
    3593 
    3594 // Division with mathematical rounding to nearest number.
    3595 template <typename T>
    3596 static inline T VmaRoundDiv(T x, T y)
    3597 {
    3598  return (x + (y / (T)2)) / y;
    3599 }
    3600 
    3601 /*
    3602 Returns true if given number is a power of two.
    3603 T must be unsigned integer number or signed integer but always nonnegative.
    3604 For 0 returns true.
    3605 */
    3606 template <typename T>
    3607 inline bool VmaIsPow2(T x)
    3608 {
    3609  return (x & (x-1)) == 0;
    3610 }
    3611 
    3612 // Returns smallest power of 2 greater or equal to v.
    3613 static inline uint32_t VmaNextPow2(uint32_t v)
    3614 {
    3615  v--;
    3616  v |= v >> 1;
    3617  v |= v >> 2;
    3618  v |= v >> 4;
    3619  v |= v >> 8;
    3620  v |= v >> 16;
    3621  v++;
    3622  return v;
    3623 }
    3624 static inline uint64_t VmaNextPow2(uint64_t v)
    3625 {
    3626  v--;
    3627  v |= v >> 1;
    3628  v |= v >> 2;
    3629  v |= v >> 4;
    3630  v |= v >> 8;
    3631  v |= v >> 16;
    3632  v |= v >> 32;
    3633  v++;
    3634  return v;
    3635 }
    3636 
    3637 // Returns largest power of 2 less or equal to v.
    3638 static inline uint32_t VmaPrevPow2(uint32_t v)
    3639 {
    3640  v |= v >> 1;
    3641  v |= v >> 2;
    3642  v |= v >> 4;
    3643  v |= v >> 8;
    3644  v |= v >> 16;
    3645  v = v ^ (v >> 1);
    3646  return v;
    3647 }
    3648 static inline uint64_t VmaPrevPow2(uint64_t v)
    3649 {
    3650  v |= v >> 1;
    3651  v |= v >> 2;
    3652  v |= v >> 4;
    3653  v |= v >> 8;
    3654  v |= v >> 16;
    3655  v |= v >> 32;
    3656  v = v ^ (v >> 1);
    3657  return v;
    3658 }
    3659 
    3660 static inline bool VmaStrIsEmpty(const char* pStr)
    3661 {
    3662  return pStr == VMA_NULL || *pStr == '\0';
    3663 }
    3664 
    3665 #if VMA_STATS_STRING_ENABLED
    3666 
    3667 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3668 {
    3669  switch(algorithm)
    3670  {
    3672  return "Linear";
    3674  return "Buddy";
    3675  case 0:
    3676  return "Default";
    3677  default:
    3678  VMA_ASSERT(0);
    3679  return "";
    3680  }
    3681 }
    3682 
    3683 #endif // #if VMA_STATS_STRING_ENABLED
    3684 
    3685 #ifndef VMA_SORT
    3686 
    3687 template<typename Iterator, typename Compare>
    3688 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3689 {
    3690  Iterator centerValue = end; --centerValue;
    3691  Iterator insertIndex = beg;
    3692  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3693  {
    3694  if(cmp(*memTypeIndex, *centerValue))
    3695  {
    3696  if(insertIndex != memTypeIndex)
    3697  {
    3698  VMA_SWAP(*memTypeIndex, *insertIndex);
    3699  }
    3700  ++insertIndex;
    3701  }
    3702  }
    3703  if(insertIndex != centerValue)
    3704  {
    3705  VMA_SWAP(*insertIndex, *centerValue);
    3706  }
    3707  return insertIndex;
    3708 }
    3709 
    3710 template<typename Iterator, typename Compare>
    3711 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3712 {
    3713  if(beg < end)
    3714  {
    3715  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3716  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3717  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3718  }
    3719 }
    3720 
    3721 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3722 
    3723 #endif // #ifndef VMA_SORT
    3724 
    3725 /*
    3726 Returns true if two memory blocks occupy overlapping pages.
    3727 ResourceA must be in less memory offset than ResourceB.
    3728 
    3729 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3730 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3731 */
    3732 static inline bool VmaBlocksOnSamePage(
    3733  VkDeviceSize resourceAOffset,
    3734  VkDeviceSize resourceASize,
    3735  VkDeviceSize resourceBOffset,
    3736  VkDeviceSize pageSize)
    3737 {
    3738  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3739  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3740  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3741  VkDeviceSize resourceBStart = resourceBOffset;
    3742  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3743  return resourceAEndPage == resourceBStartPage;
    3744 }
    3745 
    3746 enum VmaSuballocationType
    3747 {
    3748  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3749  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3750  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3751  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3752  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3753  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3754  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3755 };
    3756 
    3757 /*
    3758 Returns true if given suballocation types could conflict and must respect
    3759 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3760 or linear image and another one is optimal image. If type is unknown, behave
    3761 conservatively.
    3762 */
    3763 static inline bool VmaIsBufferImageGranularityConflict(
    3764  VmaSuballocationType suballocType1,
    3765  VmaSuballocationType suballocType2)
    3766 {
    3767  if(suballocType1 > suballocType2)
    3768  {
    3769  VMA_SWAP(suballocType1, suballocType2);
    3770  }
    3771 
    3772  switch(suballocType1)
    3773  {
    3774  case VMA_SUBALLOCATION_TYPE_FREE:
    3775  return false;
    3776  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3777  return true;
    3778  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3779  return
    3780  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3781  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3782  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3783  return
    3784  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3785  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3786  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3787  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3788  return
    3789  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3790  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3791  return false;
    3792  default:
    3793  VMA_ASSERT(0);
    3794  return true;
    3795  }
    3796 }
    3797 
    3798 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3799 {
    3800 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3801  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3802  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3803  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3804  {
    3805  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3806  }
    3807 #else
    3808  // no-op
    3809 #endif
    3810 }
    3811 
    3812 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3813 {
    3814 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3815  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3816  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3817  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3818  {
    3819  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3820  {
    3821  return false;
    3822  }
    3823  }
    3824 #endif
    3825  return true;
    3826 }
    3827 
    3828 /*
    3829 Fills structure with parameters of an example buffer to be used for transfers
    3830 during GPU memory defragmentation.
    3831 */
    3832 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3833 {
    3834  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3835  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3836  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3837  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3838 }
    3839 
    3840 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3841 struct VmaMutexLock
    3842 {
    3843  VMA_CLASS_NO_COPY(VmaMutexLock)
    3844 public:
    3845  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3846  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3847  { if(m_pMutex) { m_pMutex->Lock(); } }
    3848  ~VmaMutexLock()
    3849  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3850 private:
    3851  VMA_MUTEX* m_pMutex;
    3852 };
    3853 
    3854 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3855 struct VmaMutexLockRead
    3856 {
    3857  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3858 public:
    3859  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3860  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3861  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3862  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3863 private:
    3864  VMA_RW_MUTEX* m_pMutex;
    3865 };
    3866 
    3867 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3868 struct VmaMutexLockWrite
    3869 {
    3870  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3871 public:
    3872  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3873  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3874  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3875  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3876 private:
    3877  VMA_RW_MUTEX* m_pMutex;
    3878 };
    3879 
    3880 #if VMA_DEBUG_GLOBAL_MUTEX
    3881  static VMA_MUTEX gDebugGlobalMutex;
    3882  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3883 #else
    3884  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3885 #endif
    3886 
    3887 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3888 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3889 
    3890 /*
    3891 Performs binary search and returns iterator to first element that is greater or
    3892 equal to (key), according to comparison (cmp).
    3893 
    3894 Cmp should return true if first argument is less than second argument.
    3895 
    3896 Returned value is the found element, if present in the collection or place where
    3897 new element with value (key) should be inserted.
    3898 */
    3899 template <typename CmpLess, typename IterT, typename KeyT>
    3900 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3901 {
    3902  size_t down = 0, up = (end - beg);
    3903  while(down < up)
    3904  {
    3905  const size_t mid = (down + up) / 2;
    3906  if(cmp(*(beg+mid), key))
    3907  {
    3908  down = mid + 1;
    3909  }
    3910  else
    3911  {
    3912  up = mid;
    3913  }
    3914  }
    3915  return beg + down;
    3916 }
    3917 
    3918 template<typename CmpLess, typename IterT, typename KeyT>
    3919 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3920 {
    3921  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3922  beg, end, value, cmp);
    3923  if(it == end ||
    3924  (!cmp(*it, value) && !cmp(value, *it)))
    3925  {
    3926  return it;
    3927  }
    3928  return end;
    3929 }
    3930 
    3931 /*
    3932 Returns true if all pointers in the array are not-null and unique.
    3933 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3934 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3935 */
    3936 template<typename T>
    3937 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3938 {
    3939  for(uint32_t i = 0; i < count; ++i)
    3940  {
    3941  const T iPtr = arr[i];
    3942  if(iPtr == VMA_NULL)
    3943  {
    3944  return false;
    3945  }
    3946  for(uint32_t j = i + 1; j < count; ++j)
    3947  {
    3948  if(iPtr == arr[j])
    3949  {
    3950  return false;
    3951  }
    3952  }
    3953  }
    3954  return true;
    3955 }
    3956 
    3958 // Memory allocation
    3959 
    3960 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3961 {
    3962  if((pAllocationCallbacks != VMA_NULL) &&
    3963  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3964  {
    3965  return (*pAllocationCallbacks->pfnAllocation)(
    3966  pAllocationCallbacks->pUserData,
    3967  size,
    3968  alignment,
    3969  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3970  }
    3971  else
    3972  {
    3973  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3974  }
    3975 }
    3976 
    3977 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3978 {
    3979  if((pAllocationCallbacks != VMA_NULL) &&
    3980  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3981  {
    3982  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3983  }
    3984  else
    3985  {
    3986  VMA_SYSTEM_FREE(ptr);
    3987  }
    3988 }
    3989 
    3990 template<typename T>
    3991 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3992 {
    3993  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3994 }
    3995 
    3996 template<typename T>
    3997 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3998 {
    3999  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    4000 }
    4001 
    4002 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    4003 
    4004 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    4005 
    4006 template<typename T>
    4007 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    4008 {
    4009  ptr->~T();
    4010  VmaFree(pAllocationCallbacks, ptr);
    4011 }
    4012 
    4013 template<typename T>
    4014 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    4015 {
    4016  if(ptr != VMA_NULL)
    4017  {
    4018  for(size_t i = count; i--; )
    4019  {
    4020  ptr[i].~T();
    4021  }
    4022  VmaFree(pAllocationCallbacks, ptr);
    4023  }
    4024 }
    4025 
    4026 // STL-compatible allocator.
    4027 template<typename T>
    4028 class VmaStlAllocator
    4029 {
    4030 public:
    4031  const VkAllocationCallbacks* const m_pCallbacks;
    4032  typedef T value_type;
    4033 
    4034  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    4035  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    4036 
    4037  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    4038  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    4039 
    4040  template<typename U>
    4041  bool operator==(const VmaStlAllocator<U>& rhs) const
    4042  {
    4043  return m_pCallbacks == rhs.m_pCallbacks;
    4044  }
    4045  template<typename U>
    4046  bool operator!=(const VmaStlAllocator<U>& rhs) const
    4047  {
    4048  return m_pCallbacks != rhs.m_pCallbacks;
    4049  }
    4050 
    4051  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    4052 };
    4053 
    4054 #if VMA_USE_STL_VECTOR
    4055 
    4056 #define VmaVector std::vector
    4057 
    4058 template<typename T, typename allocatorT>
    4059 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    4060 {
    4061  vec.insert(vec.begin() + index, item);
    4062 }
    4063 
    4064 template<typename T, typename allocatorT>
    4065 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    4066 {
    4067  vec.erase(vec.begin() + index);
    4068 }
    4069 
    4070 #else // #if VMA_USE_STL_VECTOR
    4071 
    4072 /* Class with interface compatible with subset of std::vector.
    4073 T must be POD because constructors and destructors are not called and memcpy is
    4074 used for these objects. */
    4075 template<typename T, typename AllocatorT>
    4076 class VmaVector
    4077 {
    4078 public:
    4079  typedef T value_type;
    4080 
    4081  VmaVector(const AllocatorT& allocator) :
    4082  m_Allocator(allocator),
    4083  m_pArray(VMA_NULL),
    4084  m_Count(0),
    4085  m_Capacity(0)
    4086  {
    4087  }
    4088 
    4089  VmaVector(size_t count, const AllocatorT& allocator) :
    4090  m_Allocator(allocator),
    4091  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4092  m_Count(count),
    4093  m_Capacity(count)
    4094  {
    4095  }
    4096 
    4097  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4098  m_Allocator(src.m_Allocator),
    4099  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4100  m_Count(src.m_Count),
    4101  m_Capacity(src.m_Count)
    4102  {
    4103  if(m_Count != 0)
    4104  {
    4105  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4106  }
    4107  }
    4108 
    4109  ~VmaVector()
    4110  {
    4111  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4112  }
    4113 
    4114  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4115  {
    4116  if(&rhs != this)
    4117  {
    4118  resize(rhs.m_Count);
    4119  if(m_Count != 0)
    4120  {
    4121  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4122  }
    4123  }
    4124  return *this;
    4125  }
    4126 
    4127  bool empty() const { return m_Count == 0; }
    4128  size_t size() const { return m_Count; }
    4129  T* data() { return m_pArray; }
    4130  const T* data() const { return m_pArray; }
    4131 
    4132  T& operator[](size_t index)
    4133  {
    4134  VMA_HEAVY_ASSERT(index < m_Count);
    4135  return m_pArray[index];
    4136  }
    4137  const T& operator[](size_t index) const
    4138  {
    4139  VMA_HEAVY_ASSERT(index < m_Count);
    4140  return m_pArray[index];
    4141  }
    4142 
    4143  T& front()
    4144  {
    4145  VMA_HEAVY_ASSERT(m_Count > 0);
    4146  return m_pArray[0];
    4147  }
    4148  const T& front() const
    4149  {
    4150  VMA_HEAVY_ASSERT(m_Count > 0);
    4151  return m_pArray[0];
    4152  }
    4153  T& back()
    4154  {
    4155  VMA_HEAVY_ASSERT(m_Count > 0);
    4156  return m_pArray[m_Count - 1];
    4157  }
    4158  const T& back() const
    4159  {
    4160  VMA_HEAVY_ASSERT(m_Count > 0);
    4161  return m_pArray[m_Count - 1];
    4162  }
    4163 
    4164  void reserve(size_t newCapacity, bool freeMemory = false)
    4165  {
    4166  newCapacity = VMA_MAX(newCapacity, m_Count);
    4167 
    4168  if((newCapacity < m_Capacity) && !freeMemory)
    4169  {
    4170  newCapacity = m_Capacity;
    4171  }
    4172 
    4173  if(newCapacity != m_Capacity)
    4174  {
    4175  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4176  if(m_Count != 0)
    4177  {
    4178  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4179  }
    4180  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4181  m_Capacity = newCapacity;
    4182  m_pArray = newArray;
    4183  }
    4184  }
    4185 
    4186  void resize(size_t newCount, bool freeMemory = false)
    4187  {
    4188  size_t newCapacity = m_Capacity;
    4189  if(newCount > m_Capacity)
    4190  {
    4191  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4192  }
    4193  else if(freeMemory)
    4194  {
    4195  newCapacity = newCount;
    4196  }
    4197 
    4198  if(newCapacity != m_Capacity)
    4199  {
    4200  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4201  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4202  if(elementsToCopy != 0)
    4203  {
    4204  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4205  }
    4206  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4207  m_Capacity = newCapacity;
    4208  m_pArray = newArray;
    4209  }
    4210 
    4211  m_Count = newCount;
    4212  }
    4213 
    4214  void clear(bool freeMemory = false)
    4215  {
    4216  resize(0, freeMemory);
    4217  }
    4218 
    4219  void insert(size_t index, const T& src)
    4220  {
    4221  VMA_HEAVY_ASSERT(index <= m_Count);
    4222  const size_t oldCount = size();
    4223  resize(oldCount + 1);
    4224  if(index < oldCount)
    4225  {
    4226  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4227  }
    4228  m_pArray[index] = src;
    4229  }
    4230 
    4231  void remove(size_t index)
    4232  {
    4233  VMA_HEAVY_ASSERT(index < m_Count);
    4234  const size_t oldCount = size();
    4235  if(index < oldCount - 1)
    4236  {
    4237  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4238  }
    4239  resize(oldCount - 1);
    4240  }
    4241 
    4242  void push_back(const T& src)
    4243  {
    4244  const size_t newIndex = size();
    4245  resize(newIndex + 1);
    4246  m_pArray[newIndex] = src;
    4247  }
    4248 
    4249  void pop_back()
    4250  {
    4251  VMA_HEAVY_ASSERT(m_Count > 0);
    4252  resize(size() - 1);
    4253  }
    4254 
    4255  void push_front(const T& src)
    4256  {
    4257  insert(0, src);
    4258  }
    4259 
    4260  void pop_front()
    4261  {
    4262  VMA_HEAVY_ASSERT(m_Count > 0);
    4263  remove(0);
    4264  }
    4265 
    4266  typedef T* iterator;
    4267 
    4268  iterator begin() { return m_pArray; }
    4269  iterator end() { return m_pArray + m_Count; }
    4270 
    4271 private:
    4272  AllocatorT m_Allocator;
    4273  T* m_pArray;
    4274  size_t m_Count;
    4275  size_t m_Capacity;
    4276 };
    4277 
    4278 template<typename T, typename allocatorT>
    4279 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4280 {
    4281  vec.insert(index, item);
    4282 }
    4283 
    4284 template<typename T, typename allocatorT>
    4285 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4286 {
    4287  vec.remove(index);
    4288 }
    4289 
    4290 #endif // #if VMA_USE_STL_VECTOR
    4291 
    4292 template<typename CmpLess, typename VectorT>
    4293 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4294 {
    4295  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4296  vector.data(),
    4297  vector.data() + vector.size(),
    4298  value,
    4299  CmpLess()) - vector.data();
    4300  VmaVectorInsert(vector, indexToInsert, value);
    4301  return indexToInsert;
    4302 }
    4303 
    4304 template<typename CmpLess, typename VectorT>
    4305 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4306 {
    4307  CmpLess comparator;
    4308  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4309  vector.begin(),
    4310  vector.end(),
    4311  value,
    4312  comparator);
    4313  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4314  {
    4315  size_t indexToRemove = it - vector.begin();
    4316  VmaVectorRemove(vector, indexToRemove);
    4317  return true;
    4318  }
    4319  return false;
    4320 }
    4321 
    4323 // class VmaPoolAllocator
    4324 
    4325 /*
    4326 Allocator for objects of type T using a list of arrays (pools) to speed up
    4327 allocation. Number of elements that can be allocated is not bounded because
    4328 allocator can create multiple blocks.
    4329 */
    4330 template<typename T>
    4331 class VmaPoolAllocator
    4332 {
    4333  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4334 public:
    4335  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4336  ~VmaPoolAllocator();
    4337  void Clear();
    4338  T* Alloc();
    4339  void Free(T* ptr);
    4340 
    4341 private:
    4342  union Item
    4343  {
    4344  uint32_t NextFreeIndex;
    4345  T Value;
    4346  };
    4347 
    4348  struct ItemBlock
    4349  {
    4350  Item* pItems;
    4351  uint32_t Capacity;
    4352  uint32_t FirstFreeIndex;
    4353  };
    4354 
    4355  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4356  const uint32_t m_FirstBlockCapacity;
    4357  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4358 
    4359  ItemBlock& CreateNewBlock();
    4360 };
    4361 
    4362 template<typename T>
    4363 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4364  m_pAllocationCallbacks(pAllocationCallbacks),
    4365  m_FirstBlockCapacity(firstBlockCapacity),
    4366  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4367 {
    4368  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4369 }
    4370 
    4371 template<typename T>
    4372 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4373 {
    4374  Clear();
    4375 }
    4376 
    4377 template<typename T>
    4378 void VmaPoolAllocator<T>::Clear()
    4379 {
    4380  for(size_t i = m_ItemBlocks.size(); i--; )
    4381  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4382  m_ItemBlocks.clear();
    4383 }
    4384 
    4385 template<typename T>
    4386 T* VmaPoolAllocator<T>::Alloc()
    4387 {
    4388  for(size_t i = m_ItemBlocks.size(); i--; )
    4389  {
    4390  ItemBlock& block = m_ItemBlocks[i];
    4391  // This block has some free items: Use first one.
    4392  if(block.FirstFreeIndex != UINT32_MAX)
    4393  {
    4394  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4395  block.FirstFreeIndex = pItem->NextFreeIndex;
    4396  return &pItem->Value;
    4397  }
    4398  }
    4399 
    4400  // No block has free item: Create new one and use it.
    4401  ItemBlock& newBlock = CreateNewBlock();
    4402  Item* const pItem = &newBlock.pItems[0];
    4403  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4404  return &pItem->Value;
    4405 }
    4406 
    4407 template<typename T>
    4408 void VmaPoolAllocator<T>::Free(T* ptr)
    4409 {
    4410  // Search all memory blocks to find ptr.
    4411  for(size_t i = m_ItemBlocks.size(); i--; )
    4412  {
    4413  ItemBlock& block = m_ItemBlocks[i];
    4414 
    4415  // Casting to union.
    4416  Item* pItemPtr;
    4417  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4418 
    4419  // Check if pItemPtr is in address range of this block.
    4420  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4421  {
    4422  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4423  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4424  block.FirstFreeIndex = index;
    4425  return;
    4426  }
    4427  }
    4428  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4429 }
    4430 
    4431 template<typename T>
    4432 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4433 {
    4434  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4435  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4436 
    4437  const ItemBlock newBlock = {
    4438  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4439  newBlockCapacity,
    4440  0 };
    4441 
    4442  m_ItemBlocks.push_back(newBlock);
    4443 
    4444  // Setup singly-linked list of all free items in this block.
    4445  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4446  newBlock.pItems[i].NextFreeIndex = i + 1;
    4447  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4448  return m_ItemBlocks.back();
    4449 }
    4450 
    4452 // class VmaRawList, VmaList
    4453 
    4454 #if VMA_USE_STL_LIST
    4455 
    4456 #define VmaList std::list
    4457 
    4458 #else // #if VMA_USE_STL_LIST
    4459 
    4460 template<typename T>
    4461 struct VmaListItem
    4462 {
    4463  VmaListItem* pPrev;
    4464  VmaListItem* pNext;
    4465  T Value;
    4466 };
    4467 
    4468 // Doubly linked list.
    4469 template<typename T>
    4470 class VmaRawList
    4471 {
    4472  VMA_CLASS_NO_COPY(VmaRawList)
    4473 public:
    4474  typedef VmaListItem<T> ItemType;
    4475 
    4476  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4477  ~VmaRawList();
    4478  void Clear();
    4479 
    4480  size_t GetCount() const { return m_Count; }
    4481  bool IsEmpty() const { return m_Count == 0; }
    4482 
    4483  ItemType* Front() { return m_pFront; }
    4484  const ItemType* Front() const { return m_pFront; }
    4485  ItemType* Back() { return m_pBack; }
    4486  const ItemType* Back() const { return m_pBack; }
    4487 
    4488  ItemType* PushBack();
    4489  ItemType* PushFront();
    4490  ItemType* PushBack(const T& value);
    4491  ItemType* PushFront(const T& value);
    4492  void PopBack();
    4493  void PopFront();
    4494 
    4495  // Item can be null - it means PushBack.
    4496  ItemType* InsertBefore(ItemType* pItem);
    4497  // Item can be null - it means PushFront.
    4498  ItemType* InsertAfter(ItemType* pItem);
    4499 
    4500  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4501  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4502 
    4503  void Remove(ItemType* pItem);
    4504 
    4505 private:
    4506  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4507  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4508  ItemType* m_pFront;
    4509  ItemType* m_pBack;
    4510  size_t m_Count;
    4511 };
    4512 
    4513 template<typename T>
    4514 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4515  m_pAllocationCallbacks(pAllocationCallbacks),
    4516  m_ItemAllocator(pAllocationCallbacks, 128),
    4517  m_pFront(VMA_NULL),
    4518  m_pBack(VMA_NULL),
    4519  m_Count(0)
    4520 {
    4521 }
    4522 
    4523 template<typename T>
    4524 VmaRawList<T>::~VmaRawList()
    4525 {
    4526  // Intentionally not calling Clear, because that would be unnecessary
    4527  // computations to return all items to m_ItemAllocator as free.
    4528 }
    4529 
    4530 template<typename T>
    4531 void VmaRawList<T>::Clear()
    4532 {
    4533  if(IsEmpty() == false)
    4534  {
    4535  ItemType* pItem = m_pBack;
    4536  while(pItem != VMA_NULL)
    4537  {
    4538  ItemType* const pPrevItem = pItem->pPrev;
    4539  m_ItemAllocator.Free(pItem);
    4540  pItem = pPrevItem;
    4541  }
    4542  m_pFront = VMA_NULL;
    4543  m_pBack = VMA_NULL;
    4544  m_Count = 0;
    4545  }
    4546 }
    4547 
    4548 template<typename T>
    4549 VmaListItem<T>* VmaRawList<T>::PushBack()
    4550 {
    4551  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4552  pNewItem->pNext = VMA_NULL;
    4553  if(IsEmpty())
    4554  {
    4555  pNewItem->pPrev = VMA_NULL;
    4556  m_pFront = pNewItem;
    4557  m_pBack = pNewItem;
    4558  m_Count = 1;
    4559  }
    4560  else
    4561  {
    4562  pNewItem->pPrev = m_pBack;
    4563  m_pBack->pNext = pNewItem;
    4564  m_pBack = pNewItem;
    4565  ++m_Count;
    4566  }
    4567  return pNewItem;
    4568 }
    4569 
    4570 template<typename T>
    4571 VmaListItem<T>* VmaRawList<T>::PushFront()
    4572 {
    4573  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4574  pNewItem->pPrev = VMA_NULL;
    4575  if(IsEmpty())
    4576  {
    4577  pNewItem->pNext = VMA_NULL;
    4578  m_pFront = pNewItem;
    4579  m_pBack = pNewItem;
    4580  m_Count = 1;
    4581  }
    4582  else
    4583  {
    4584  pNewItem->pNext = m_pFront;
    4585  m_pFront->pPrev = pNewItem;
    4586  m_pFront = pNewItem;
    4587  ++m_Count;
    4588  }
    4589  return pNewItem;
    4590 }
    4591 
    4592 template<typename T>
    4593 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4594 {
    4595  ItemType* const pNewItem = PushBack();
    4596  pNewItem->Value = value;
    4597  return pNewItem;
    4598 }
    4599 
    4600 template<typename T>
    4601 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4602 {
    4603  ItemType* const pNewItem = PushFront();
    4604  pNewItem->Value = value;
    4605  return pNewItem;
    4606 }
    4607 
    4608 template<typename T>
    4609 void VmaRawList<T>::PopBack()
    4610 {
    4611  VMA_HEAVY_ASSERT(m_Count > 0);
    4612  ItemType* const pBackItem = m_pBack;
    4613  ItemType* const pPrevItem = pBackItem->pPrev;
    4614  if(pPrevItem != VMA_NULL)
    4615  {
    4616  pPrevItem->pNext = VMA_NULL;
    4617  }
    4618  m_pBack = pPrevItem;
    4619  m_ItemAllocator.Free(pBackItem);
    4620  --m_Count;
    4621 }
    4622 
    4623 template<typename T>
    4624 void VmaRawList<T>::PopFront()
    4625 {
    4626  VMA_HEAVY_ASSERT(m_Count > 0);
    4627  ItemType* const pFrontItem = m_pFront;
    4628  ItemType* const pNextItem = pFrontItem->pNext;
    4629  if(pNextItem != VMA_NULL)
    4630  {
    4631  pNextItem->pPrev = VMA_NULL;
    4632  }
    4633  m_pFront = pNextItem;
    4634  m_ItemAllocator.Free(pFrontItem);
    4635  --m_Count;
    4636 }
    4637 
    4638 template<typename T>
    4639 void VmaRawList<T>::Remove(ItemType* pItem)
    4640 {
    4641  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4642  VMA_HEAVY_ASSERT(m_Count > 0);
    4643 
    4644  if(pItem->pPrev != VMA_NULL)
    4645  {
    4646  pItem->pPrev->pNext = pItem->pNext;
    4647  }
    4648  else
    4649  {
    4650  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4651  m_pFront = pItem->pNext;
    4652  }
    4653 
    4654  if(pItem->pNext != VMA_NULL)
    4655  {
    4656  pItem->pNext->pPrev = pItem->pPrev;
    4657  }
    4658  else
    4659  {
    4660  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4661  m_pBack = pItem->pPrev;
    4662  }
    4663 
    4664  m_ItemAllocator.Free(pItem);
    4665  --m_Count;
    4666 }
    4667 
    4668 template<typename T>
    4669 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4670 {
    4671  if(pItem != VMA_NULL)
    4672  {
    4673  ItemType* const prevItem = pItem->pPrev;
    4674  ItemType* const newItem = m_ItemAllocator.Alloc();
    4675  newItem->pPrev = prevItem;
    4676  newItem->pNext = pItem;
    4677  pItem->pPrev = newItem;
    4678  if(prevItem != VMA_NULL)
    4679  {
    4680  prevItem->pNext = newItem;
    4681  }
    4682  else
    4683  {
    4684  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4685  m_pFront = newItem;
    4686  }
    4687  ++m_Count;
    4688  return newItem;
    4689  }
    4690  else
    4691  return PushBack();
    4692 }
    4693 
    4694 template<typename T>
    4695 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4696 {
    4697  if(pItem != VMA_NULL)
    4698  {
    4699  ItemType* const nextItem = pItem->pNext;
    4700  ItemType* const newItem = m_ItemAllocator.Alloc();
    4701  newItem->pNext = nextItem;
    4702  newItem->pPrev = pItem;
    4703  pItem->pNext = newItem;
    4704  if(nextItem != VMA_NULL)
    4705  {
    4706  nextItem->pPrev = newItem;
    4707  }
    4708  else
    4709  {
    4710  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4711  m_pBack = newItem;
    4712  }
    4713  ++m_Count;
    4714  return newItem;
    4715  }
    4716  else
    4717  return PushFront();
    4718 }
    4719 
    4720 template<typename T>
    4721 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4722 {
    4723  ItemType* const newItem = InsertBefore(pItem);
    4724  newItem->Value = value;
    4725  return newItem;
    4726 }
    4727 
    4728 template<typename T>
    4729 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4730 {
    4731  ItemType* const newItem = InsertAfter(pItem);
    4732  newItem->Value = value;
    4733  return newItem;
    4734 }
    4735 
    4736 template<typename T, typename AllocatorT>
    4737 class VmaList
    4738 {
    4739  VMA_CLASS_NO_COPY(VmaList)
    4740 public:
    4741  class iterator
    4742  {
    4743  public:
    4744  iterator() :
    4745  m_pList(VMA_NULL),
    4746  m_pItem(VMA_NULL)
    4747  {
    4748  }
    4749 
    4750  T& operator*() const
    4751  {
    4752  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4753  return m_pItem->Value;
    4754  }
    4755  T* operator->() const
    4756  {
    4757  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4758  return &m_pItem->Value;
    4759  }
    4760 
    4761  iterator& operator++()
    4762  {
    4763  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4764  m_pItem = m_pItem->pNext;
    4765  return *this;
    4766  }
    4767  iterator& operator--()
    4768  {
    4769  if(m_pItem != VMA_NULL)
    4770  {
    4771  m_pItem = m_pItem->pPrev;
    4772  }
    4773  else
    4774  {
    4775  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4776  m_pItem = m_pList->Back();
    4777  }
    4778  return *this;
    4779  }
    4780 
    4781  iterator operator++(int)
    4782  {
    4783  iterator result = *this;
    4784  ++*this;
    4785  return result;
    4786  }
    4787  iterator operator--(int)
    4788  {
    4789  iterator result = *this;
    4790  --*this;
    4791  return result;
    4792  }
    4793 
    4794  bool operator==(const iterator& rhs) const
    4795  {
    4796  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4797  return m_pItem == rhs.m_pItem;
    4798  }
    4799  bool operator!=(const iterator& rhs) const
    4800  {
    4801  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4802  return m_pItem != rhs.m_pItem;
    4803  }
    4804 
    4805  private:
    4806  VmaRawList<T>* m_pList;
    4807  VmaListItem<T>* m_pItem;
    4808 
    4809  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4810  m_pList(pList),
    4811  m_pItem(pItem)
    4812  {
    4813  }
    4814 
    4815  friend class VmaList<T, AllocatorT>;
    4816  };
    4817 
    4818  class const_iterator
    4819  {
    4820  public:
    4821  const_iterator() :
    4822  m_pList(VMA_NULL),
    4823  m_pItem(VMA_NULL)
    4824  {
    4825  }
    4826 
    4827  const_iterator(const iterator& src) :
    4828  m_pList(src.m_pList),
    4829  m_pItem(src.m_pItem)
    4830  {
    4831  }
    4832 
    4833  const T& operator*() const
    4834  {
    4835  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4836  return m_pItem->Value;
    4837  }
    4838  const T* operator->() const
    4839  {
    4840  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4841  return &m_pItem->Value;
    4842  }
    4843 
    4844  const_iterator& operator++()
    4845  {
    4846  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4847  m_pItem = m_pItem->pNext;
    4848  return *this;
    4849  }
    4850  const_iterator& operator--()
    4851  {
    4852  if(m_pItem != VMA_NULL)
    4853  {
    4854  m_pItem = m_pItem->pPrev;
    4855  }
    4856  else
    4857  {
    4858  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4859  m_pItem = m_pList->Back();
    4860  }
    4861  return *this;
    4862  }
    4863 
    4864  const_iterator operator++(int)
    4865  {
    4866  const_iterator result = *this;
    4867  ++*this;
    4868  return result;
    4869  }
    4870  const_iterator operator--(int)
    4871  {
    4872  const_iterator result = *this;
    4873  --*this;
    4874  return result;
    4875  }
    4876 
    4877  bool operator==(const const_iterator& rhs) const
    4878  {
    4879  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4880  return m_pItem == rhs.m_pItem;
    4881  }
    4882  bool operator!=(const const_iterator& rhs) const
    4883  {
    4884  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4885  return m_pItem != rhs.m_pItem;
    4886  }
    4887 
    4888  private:
    4889  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4890  m_pList(pList),
    4891  m_pItem(pItem)
    4892  {
    4893  }
    4894 
    4895  const VmaRawList<T>* m_pList;
    4896  const VmaListItem<T>* m_pItem;
    4897 
    4898  friend class VmaList<T, AllocatorT>;
    4899  };
    4900 
    4901  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4902 
    4903  bool empty() const { return m_RawList.IsEmpty(); }
    4904  size_t size() const { return m_RawList.GetCount(); }
    4905 
    4906  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4907  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4908 
    4909  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4910  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4911 
    4912  void clear() { m_RawList.Clear(); }
    4913  void push_back(const T& value) { m_RawList.PushBack(value); }
    4914  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4915  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4916 
    4917 private:
    4918  VmaRawList<T> m_RawList;
    4919 };
    4920 
    4921 #endif // #if VMA_USE_STL_LIST
    4922 
    4924 // class VmaMap
    4925 
    4926 // Unused in this version.
    4927 #if 0
    4928 
    4929 #if VMA_USE_STL_UNORDERED_MAP
    4930 
    4931 #define VmaPair std::pair
    4932 
    4933 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4934  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4935 
    4936 #else // #if VMA_USE_STL_UNORDERED_MAP
    4937 
    4938 template<typename T1, typename T2>
    4939 struct VmaPair
    4940 {
    4941  T1 first;
    4942  T2 second;
    4943 
    4944  VmaPair() : first(), second() { }
    4945  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4946 };
    4947 
    4948 /* Class compatible with subset of interface of std::unordered_map.
    4949 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4950 */
    4951 template<typename KeyT, typename ValueT>
    4952 class VmaMap
    4953 {
    4954 public:
    4955  typedef VmaPair<KeyT, ValueT> PairType;
    4956  typedef PairType* iterator;
    4957 
    4958  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4959 
    4960  iterator begin() { return m_Vector.begin(); }
    4961  iterator end() { return m_Vector.end(); }
    4962 
    4963  void insert(const PairType& pair);
    4964  iterator find(const KeyT& key);
    4965  void erase(iterator it);
    4966 
    4967 private:
    4968  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4969 };
    4970 
    4971 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4972 
    4973 template<typename FirstT, typename SecondT>
    4974 struct VmaPairFirstLess
    4975 {
    4976  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4977  {
    4978  return lhs.first < rhs.first;
    4979  }
    4980  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4981  {
    4982  return lhs.first < rhsFirst;
    4983  }
    4984 };
    4985 
    4986 template<typename KeyT, typename ValueT>
    4987 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4988 {
    4989  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4990  m_Vector.data(),
    4991  m_Vector.data() + m_Vector.size(),
    4992  pair,
    4993  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4994  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4995 }
    4996 
    4997 template<typename KeyT, typename ValueT>
    4998 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4999 {
    5000  PairType* it = VmaBinaryFindFirstNotLess(
    5001  m_Vector.data(),
    5002  m_Vector.data() + m_Vector.size(),
    5003  key,
    5004  VmaPairFirstLess<KeyT, ValueT>());
    5005  if((it != m_Vector.end()) && (it->first == key))
    5006  {
    5007  return it;
    5008  }
    5009  else
    5010  {
    5011  return m_Vector.end();
    5012  }
    5013 }
    5014 
    5015 template<typename KeyT, typename ValueT>
    5016 void VmaMap<KeyT, ValueT>::erase(iterator it)
    5017 {
    5018  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    5019 }
    5020 
    5021 #endif // #if VMA_USE_STL_UNORDERED_MAP
    5022 
    5023 #endif // #if 0
    5024 
    5026 
    5027 class VmaDeviceMemoryBlock;
    5028 
    5029 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    5030 
    5031 struct VmaAllocation_T
    5032 {
    5033 private:
    5034  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    5035 
    5036  enum FLAGS
    5037  {
    5038  FLAG_USER_DATA_STRING = 0x01,
    5039  };
    5040 
    5041 public:
    5042  enum ALLOCATION_TYPE
    5043  {
    5044  ALLOCATION_TYPE_NONE,
    5045  ALLOCATION_TYPE_BLOCK,
    5046  ALLOCATION_TYPE_DEDICATED,
    5047  };
    5048 
    5049  /*
    5050  This struct cannot have constructor or destructor. It must be POD because it is
    5051  allocated using VmaPoolAllocator.
    5052  */
    5053 
    5054  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    5055  {
    5056  m_Alignment = 1;
    5057  m_Size = 0;
    5058  m_pUserData = VMA_NULL;
    5059  m_LastUseFrameIndex = currentFrameIndex;
    5060  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    5061  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    5062  m_MapCount = 0;
    5063  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    5064 
    5065 #if VMA_STATS_STRING_ENABLED
    5066  m_CreationFrameIndex = currentFrameIndex;
    5067  m_BufferImageUsage = 0;
    5068 #endif
    5069  }
    5070 
    5071  void Dtor()
    5072  {
    5073  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5074 
    5075  // Check if owned string was freed.
    5076  VMA_ASSERT(m_pUserData == VMA_NULL);
    5077  }
    5078 
    5079  void InitBlockAllocation(
    5080  VmaDeviceMemoryBlock* block,
    5081  VkDeviceSize offset,
    5082  VkDeviceSize alignment,
    5083  VkDeviceSize size,
    5084  VmaSuballocationType suballocationType,
    5085  bool mapped,
    5086  bool canBecomeLost)
    5087  {
    5088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5089  VMA_ASSERT(block != VMA_NULL);
    5090  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5091  m_Alignment = alignment;
    5092  m_Size = size;
    5093  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5094  m_SuballocationType = (uint8_t)suballocationType;
    5095  m_BlockAllocation.m_Block = block;
    5096  m_BlockAllocation.m_Offset = offset;
    5097  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5098  }
    5099 
    5100  void InitLost()
    5101  {
    5102  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5103  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5104  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5105  m_BlockAllocation.m_Block = VMA_NULL;
    5106  m_BlockAllocation.m_Offset = 0;
    5107  m_BlockAllocation.m_CanBecomeLost = true;
    5108  }
    5109 
    5110  void ChangeBlockAllocation(
    5111  VmaAllocator hAllocator,
    5112  VmaDeviceMemoryBlock* block,
    5113  VkDeviceSize offset);
    5114 
    5115  void ChangeOffset(VkDeviceSize newOffset);
    5116 
    5117  // pMappedData not null means allocation is created with MAPPED flag.
    5118  void InitDedicatedAllocation(
    5119  uint32_t memoryTypeIndex,
    5120  VkDeviceMemory hMemory,
    5121  VmaSuballocationType suballocationType,
    5122  void* pMappedData,
    5123  VkDeviceSize size)
    5124  {
    5125  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5126  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5127  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5128  m_Alignment = 0;
    5129  m_Size = size;
    5130  m_SuballocationType = (uint8_t)suballocationType;
    5131  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5132  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5133  m_DedicatedAllocation.m_hMemory = hMemory;
    5134  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5135  }
    5136 
    5137  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5138  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5139  VkDeviceSize GetSize() const { return m_Size; }
    5140  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5141  void* GetUserData() const { return m_pUserData; }
    5142  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5143  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5144 
    5145  VmaDeviceMemoryBlock* GetBlock() const
    5146  {
    5147  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5148  return m_BlockAllocation.m_Block;
    5149  }
    5150  VkDeviceSize GetOffset() const;
    5151  VkDeviceMemory GetMemory() const;
    5152  uint32_t GetMemoryTypeIndex() const;
    5153  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5154  void* GetMappedData() const;
    5155  bool CanBecomeLost() const;
    5156 
    5157  uint32_t GetLastUseFrameIndex() const
    5158  {
    5159  return m_LastUseFrameIndex.load();
    5160  }
    5161  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5162  {
    5163  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5164  }
    5165  /*
    5166  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5167  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5168  - Else, returns false.
    5169 
    5170  If hAllocation is already lost, assert - you should not call it then.
    5171  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5172  */
    5173  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5174 
    5175  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5176  {
    5177  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5178  outInfo.blockCount = 1;
    5179  outInfo.allocationCount = 1;
    5180  outInfo.unusedRangeCount = 0;
    5181  outInfo.usedBytes = m_Size;
    5182  outInfo.unusedBytes = 0;
    5183  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5184  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5185  outInfo.unusedRangeSizeMax = 0;
    5186  }
    5187 
    5188  void BlockAllocMap();
    5189  void BlockAllocUnmap();
    5190  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5191  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5192 
    5193 #if VMA_STATS_STRING_ENABLED
    5194  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5195  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5196 
    5197  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5198  {
    5199  VMA_ASSERT(m_BufferImageUsage == 0);
    5200  m_BufferImageUsage = bufferImageUsage;
    5201  }
    5202 
    5203  void PrintParameters(class VmaJsonWriter& json) const;
    5204 #endif
    5205 
    5206 private:
    5207  VkDeviceSize m_Alignment;
    5208  VkDeviceSize m_Size;
    5209  void* m_pUserData;
    5210  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5211  uint8_t m_Type; // ALLOCATION_TYPE
    5212  uint8_t m_SuballocationType; // VmaSuballocationType
    5213  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5214  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5215  uint8_t m_MapCount;
    5216  uint8_t m_Flags; // enum FLAGS
    5217 
    5218  // Allocation out of VmaDeviceMemoryBlock.
    5219  struct BlockAllocation
    5220  {
    5221  VmaDeviceMemoryBlock* m_Block;
    5222  VkDeviceSize m_Offset;
    5223  bool m_CanBecomeLost;
    5224  };
    5225 
    5226  // Allocation for an object that has its own private VkDeviceMemory.
    5227  struct DedicatedAllocation
    5228  {
    5229  uint32_t m_MemoryTypeIndex;
    5230  VkDeviceMemory m_hMemory;
    5231  void* m_pMappedData; // Not null means memory is mapped.
    5232  };
    5233 
    5234  union
    5235  {
    5236  // Allocation out of VmaDeviceMemoryBlock.
    5237  BlockAllocation m_BlockAllocation;
    5238  // Allocation for an object that has its own private VkDeviceMemory.
    5239  DedicatedAllocation m_DedicatedAllocation;
    5240  };
    5241 
    5242 #if VMA_STATS_STRING_ENABLED
    5243  uint32_t m_CreationFrameIndex;
    5244  uint32_t m_BufferImageUsage; // 0 if unknown.
    5245 #endif
    5246 
    5247  void FreeUserDataString(VmaAllocator hAllocator);
    5248 };
    5249 
    5250 /*
    5251 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5252 allocated memory block or free.
    5253 */
    5254 struct VmaSuballocation
    5255 {
    5256  VkDeviceSize offset;
    5257  VkDeviceSize size;
    5258  VmaAllocation hAllocation;
    5259  VmaSuballocationType type;
    5260 };
    5261 
    5262 // Comparator for offsets.
    5263 struct VmaSuballocationOffsetLess
    5264 {
    5265  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5266  {
    5267  return lhs.offset < rhs.offset;
    5268  }
    5269 };
    5270 struct VmaSuballocationOffsetGreater
    5271 {
    5272  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5273  {
    5274  return lhs.offset > rhs.offset;
    5275  }
    5276 };
    5277 
    5278 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5279 
    5280 // Cost of one additional allocation lost, as equivalent in bytes.
    5281 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5282 
    5283 enum class VmaAllocationRequestType
    5284 {
    5285  Normal,
    5286  // Used by "Linear" algorithm.
    5287  UpperAddress,
    5288  EndOf1st,
    5289  EndOf2nd,
    5290 };
    5291 
    5292 /*
    5293 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5294 
    5295 If canMakeOtherLost was false:
    5296 - item points to a FREE suballocation.
    5297 - itemsToMakeLostCount is 0.
    5298 
    5299 If canMakeOtherLost was true:
    5300 - item points to first of sequence of suballocations, which are either FREE,
    5301  or point to VmaAllocations that can become lost.
    5302 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5303  the requested allocation to succeed.
    5304 */
    5305 struct VmaAllocationRequest
    5306 {
    5307  VkDeviceSize offset;
    5308  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5309  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5310  VmaSuballocationList::iterator item;
    5311  size_t itemsToMakeLostCount;
    5312  void* customData;
    5313  VmaAllocationRequestType type;
    5314 
    5315  VkDeviceSize CalcCost() const
    5316  {
    5317  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5318  }
    5319 };
    5320 
    5321 /*
    5322 Data structure used for bookkeeping of allocations and unused ranges of memory
    5323 in a single VkDeviceMemory block.
    5324 */
    5325 class VmaBlockMetadata
    5326 {
    5327 public:
    5328  VmaBlockMetadata(VmaAllocator hAllocator);
    5329  virtual ~VmaBlockMetadata() { }
    5330  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5331 
    5332  // Validates all data structures inside this object. If not valid, returns false.
    5333  virtual bool Validate() const = 0;
    5334  VkDeviceSize GetSize() const { return m_Size; }
    5335  virtual size_t GetAllocationCount() const = 0;
    5336  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5337  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5338  // Returns true if this block is empty - contains only single free suballocation.
    5339  virtual bool IsEmpty() const = 0;
    5340 
    5341  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5342  // Shouldn't modify blockCount.
    5343  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5344 
    5345 #if VMA_STATS_STRING_ENABLED
    5346  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5347 #endif
    5348 
    5349  // Tries to find a place for suballocation with given parameters inside this block.
    5350  // If succeeded, fills pAllocationRequest and returns true.
    5351  // If failed, returns false.
    5352  virtual bool CreateAllocationRequest(
    5353  uint32_t currentFrameIndex,
    5354  uint32_t frameInUseCount,
    5355  VkDeviceSize bufferImageGranularity,
    5356  VkDeviceSize allocSize,
    5357  VkDeviceSize allocAlignment,
    5358  bool upperAddress,
    5359  VmaSuballocationType allocType,
    5360  bool canMakeOtherLost,
    5361  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5362  uint32_t strategy,
    5363  VmaAllocationRequest* pAllocationRequest) = 0;
    5364 
    5365  virtual bool MakeRequestedAllocationsLost(
    5366  uint32_t currentFrameIndex,
    5367  uint32_t frameInUseCount,
    5368  VmaAllocationRequest* pAllocationRequest) = 0;
    5369 
    5370  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5371 
    5372  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5373 
    5374  // Makes actual allocation based on request. Request must already be checked and valid.
    5375  virtual void Alloc(
    5376  const VmaAllocationRequest& request,
    5377  VmaSuballocationType type,
    5378  VkDeviceSize allocSize,
    5379  VmaAllocation hAllocation) = 0;
    5380 
    5381  // Frees suballocation assigned to given memory region.
    5382  virtual void Free(const VmaAllocation allocation) = 0;
    5383  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5384 
    5385 protected:
    5386  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5387 
    5388 #if VMA_STATS_STRING_ENABLED
    5389  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5390  VkDeviceSize unusedBytes,
    5391  size_t allocationCount,
    5392  size_t unusedRangeCount) const;
    5393  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5394  VkDeviceSize offset,
    5395  VmaAllocation hAllocation) const;
    5396  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5397  VkDeviceSize offset,
    5398  VkDeviceSize size) const;
    5399  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5400 #endif
    5401 
    5402 private:
    5403  VkDeviceSize m_Size;
    5404  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5405 };
    5406 
    5407 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5408  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5409  return false; \
    5410  } } while(false)
    5411 
    5412 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5413 {
    5414  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5415 public:
    5416  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5417  virtual ~VmaBlockMetadata_Generic();
    5418  virtual void Init(VkDeviceSize size);
    5419 
    5420  virtual bool Validate() const;
    5421  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5422  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5423  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5424  virtual bool IsEmpty() const;
    5425 
    5426  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5427  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5428 
    5429 #if VMA_STATS_STRING_ENABLED
    5430  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5431 #endif
    5432 
    5433  virtual bool CreateAllocationRequest(
    5434  uint32_t currentFrameIndex,
    5435  uint32_t frameInUseCount,
    5436  VkDeviceSize bufferImageGranularity,
    5437  VkDeviceSize allocSize,
    5438  VkDeviceSize allocAlignment,
    5439  bool upperAddress,
    5440  VmaSuballocationType allocType,
    5441  bool canMakeOtherLost,
    5442  uint32_t strategy,
    5443  VmaAllocationRequest* pAllocationRequest);
    5444 
    5445  virtual bool MakeRequestedAllocationsLost(
    5446  uint32_t currentFrameIndex,
    5447  uint32_t frameInUseCount,
    5448  VmaAllocationRequest* pAllocationRequest);
    5449 
    5450  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5451 
    5452  virtual VkResult CheckCorruption(const void* pBlockData);
    5453 
    5454  virtual void Alloc(
    5455  const VmaAllocationRequest& request,
    5456  VmaSuballocationType type,
    5457  VkDeviceSize allocSize,
    5458  VmaAllocation hAllocation);
    5459 
    5460  virtual void Free(const VmaAllocation allocation);
    5461  virtual void FreeAtOffset(VkDeviceSize offset);
    5462 
    5464  // For defragmentation
    5465 
    5466  bool IsBufferImageGranularityConflictPossible(
    5467  VkDeviceSize bufferImageGranularity,
    5468  VmaSuballocationType& inOutPrevSuballocType) const;
    5469 
    5470 private:
    5471  friend class VmaDefragmentationAlgorithm_Generic;
    5472  friend class VmaDefragmentationAlgorithm_Fast;
    5473 
    5474  uint32_t m_FreeCount;
    5475  VkDeviceSize m_SumFreeSize;
    5476  VmaSuballocationList m_Suballocations;
    5477  // Suballocations that are free and have size greater than certain threshold.
    5478  // Sorted by size, ascending.
    5479  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5480 
    5481  bool ValidateFreeSuballocationList() const;
    5482 
    5483  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5484  // If yes, fills pOffset and returns true. If no, returns false.
    5485  bool CheckAllocation(
    5486  uint32_t currentFrameIndex,
    5487  uint32_t frameInUseCount,
    5488  VkDeviceSize bufferImageGranularity,
    5489  VkDeviceSize allocSize,
    5490  VkDeviceSize allocAlignment,
    5491  VmaSuballocationType allocType,
    5492  VmaSuballocationList::const_iterator suballocItem,
    5493  bool canMakeOtherLost,
    5494  VkDeviceSize* pOffset,
    5495  size_t* itemsToMakeLostCount,
    5496  VkDeviceSize* pSumFreeSize,
    5497  VkDeviceSize* pSumItemSize) const;
    5498  // Given free suballocation, it merges it with following one, which must also be free.
    5499  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5500  // Releases given suballocation, making it free.
    5501  // Merges it with adjacent free suballocations if applicable.
    5502  // Returns iterator to new free suballocation at this place.
    5503  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5504  // Given free suballocation, it inserts it into sorted list of
    5505  // m_FreeSuballocationsBySize if it's suitable.
    5506  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5507  // Given free suballocation, it removes it from sorted list of
    5508  // m_FreeSuballocationsBySize if it's suitable.
    5509  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5510 };
    5511 
    5512 /*
    5513 Allocations and their references in internal data structure look like this:
    5514 
    5515 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5516 
    5517  0 +-------+
    5518  | |
    5519  | |
    5520  | |
    5521  +-------+
    5522  | Alloc | 1st[m_1stNullItemsBeginCount]
    5523  +-------+
    5524  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5525  +-------+
    5526  | ... |
    5527  +-------+
    5528  | Alloc | 1st[1st.size() - 1]
    5529  +-------+
    5530  | |
    5531  | |
    5532  | |
    5533 GetSize() +-------+
    5534 
    5535 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5536 
    5537  0 +-------+
    5538  | Alloc | 2nd[0]
    5539  +-------+
    5540  | Alloc | 2nd[1]
    5541  +-------+
    5542  | ... |
    5543  +-------+
    5544  | Alloc | 2nd[2nd.size() - 1]
    5545  +-------+
    5546  | |
    5547  | |
    5548  | |
    5549  +-------+
    5550  | Alloc | 1st[m_1stNullItemsBeginCount]
    5551  +-------+
    5552  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5553  +-------+
    5554  | ... |
    5555  +-------+
    5556  | Alloc | 1st[1st.size() - 1]
    5557  +-------+
    5558  | |
    5559 GetSize() +-------+
    5560 
    5561 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5562 
    5563  0 +-------+
    5564  | |
    5565  | |
    5566  | |
    5567  +-------+
    5568  | Alloc | 1st[m_1stNullItemsBeginCount]
    5569  +-------+
    5570  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5571  +-------+
    5572  | ... |
    5573  +-------+
    5574  | Alloc | 1st[1st.size() - 1]
    5575  +-------+
    5576  | |
    5577  | |
    5578  | |
    5579  +-------+
    5580  | Alloc | 2nd[2nd.size() - 1]
    5581  +-------+
    5582  | ... |
    5583  +-------+
    5584  | Alloc | 2nd[1]
    5585  +-------+
    5586  | Alloc | 2nd[0]
    5587 GetSize() +-------+
    5588 
    5589 */
    5590 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5591 {
    5592  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5593 public:
    5594  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5595  virtual ~VmaBlockMetadata_Linear();
    5596  virtual void Init(VkDeviceSize size);
    5597 
    5598  virtual bool Validate() const;
    5599  virtual size_t GetAllocationCount() const;
    5600  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5601  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5602  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5603 
    5604  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5605  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5606 
    5607 #if VMA_STATS_STRING_ENABLED
    5608  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5609 #endif
    5610 
    5611  virtual bool CreateAllocationRequest(
    5612  uint32_t currentFrameIndex,
    5613  uint32_t frameInUseCount,
    5614  VkDeviceSize bufferImageGranularity,
    5615  VkDeviceSize allocSize,
    5616  VkDeviceSize allocAlignment,
    5617  bool upperAddress,
    5618  VmaSuballocationType allocType,
    5619  bool canMakeOtherLost,
    5620  uint32_t strategy,
    5621  VmaAllocationRequest* pAllocationRequest);
    5622 
    5623  virtual bool MakeRequestedAllocationsLost(
    5624  uint32_t currentFrameIndex,
    5625  uint32_t frameInUseCount,
    5626  VmaAllocationRequest* pAllocationRequest);
    5627 
    5628  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5629 
    5630  virtual VkResult CheckCorruption(const void* pBlockData);
    5631 
    5632  virtual void Alloc(
    5633  const VmaAllocationRequest& request,
    5634  VmaSuballocationType type,
    5635  VkDeviceSize allocSize,
    5636  VmaAllocation hAllocation);
    5637 
    5638  virtual void Free(const VmaAllocation allocation);
    5639  virtual void FreeAtOffset(VkDeviceSize offset);
    5640 
    5641 private:
    5642  /*
    5643  There are two suballocation vectors, used in ping-pong way.
    5644  The one with index m_1stVectorIndex is called 1st.
    5645  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5646  2nd can be non-empty only when 1st is not empty.
    5647  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5648  */
    5649  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5650 
    5651  enum SECOND_VECTOR_MODE
    5652  {
    5653  SECOND_VECTOR_EMPTY,
    5654  /*
    5655  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5656  all have smaller offset.
    5657  */
    5658  SECOND_VECTOR_RING_BUFFER,
    5659  /*
    5660  Suballocations in 2nd vector are upper side of double stack.
    5661  They all have offsets higher than those in 1st vector.
    5662  Top of this stack means smaller offsets, but higher indices in this vector.
    5663  */
    5664  SECOND_VECTOR_DOUBLE_STACK,
    5665  };
    5666 
    5667  VkDeviceSize m_SumFreeSize;
    5668  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5669  uint32_t m_1stVectorIndex;
    5670  SECOND_VECTOR_MODE m_2ndVectorMode;
    5671 
    5672  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5673  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5674  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5675  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5676 
    5677  // Number of items in 1st vector with hAllocation = null at the beginning.
    5678  size_t m_1stNullItemsBeginCount;
    5679  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5680  size_t m_1stNullItemsMiddleCount;
    5681  // Number of items in 2nd vector with hAllocation = null.
    5682  size_t m_2ndNullItemsCount;
    5683 
    5684  bool ShouldCompact1st() const;
    5685  void CleanupAfterFree();
    5686 
    5687  bool CreateAllocationRequest_LowerAddress(
    5688  uint32_t currentFrameIndex,
    5689  uint32_t frameInUseCount,
    5690  VkDeviceSize bufferImageGranularity,
    5691  VkDeviceSize allocSize,
    5692  VkDeviceSize allocAlignment,
    5693  VmaSuballocationType allocType,
    5694  bool canMakeOtherLost,
    5695  uint32_t strategy,
    5696  VmaAllocationRequest* pAllocationRequest);
    5697  bool CreateAllocationRequest_UpperAddress(
    5698  uint32_t currentFrameIndex,
    5699  uint32_t frameInUseCount,
    5700  VkDeviceSize bufferImageGranularity,
    5701  VkDeviceSize allocSize,
    5702  VkDeviceSize allocAlignment,
    5703  VmaSuballocationType allocType,
    5704  bool canMakeOtherLost,
    5705  uint32_t strategy,
    5706  VmaAllocationRequest* pAllocationRequest);
    5707 };
    5708 
    5709 /*
    5710 - GetSize() is the original size of allocated memory block.
    5711 - m_UsableSize is this size aligned down to a power of two.
    5712  All allocations and calculations happen relative to m_UsableSize.
    5713 - GetUnusableSize() is the difference between them.
    5714  It is repoted as separate, unused range, not available for allocations.
    5715 
    5716 Node at level 0 has size = m_UsableSize.
    5717 Each next level contains nodes with size 2 times smaller than current level.
    5718 m_LevelCount is the maximum number of levels to use in the current object.
    5719 */
    5720 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5721 {
    5722  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5723 public:
    5724  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5725  virtual ~VmaBlockMetadata_Buddy();
    5726  virtual void Init(VkDeviceSize size);
    5727 
    5728  virtual bool Validate() const;
    5729  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5730  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5731  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5732  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5733 
    5734  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5735  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5736 
    5737 #if VMA_STATS_STRING_ENABLED
    5738  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5739 #endif
    5740 
    5741  virtual bool CreateAllocationRequest(
    5742  uint32_t currentFrameIndex,
    5743  uint32_t frameInUseCount,
    5744  VkDeviceSize bufferImageGranularity,
    5745  VkDeviceSize allocSize,
    5746  VkDeviceSize allocAlignment,
    5747  bool upperAddress,
    5748  VmaSuballocationType allocType,
    5749  bool canMakeOtherLost,
    5750  uint32_t strategy,
    5751  VmaAllocationRequest* pAllocationRequest);
    5752 
    5753  virtual bool MakeRequestedAllocationsLost(
    5754  uint32_t currentFrameIndex,
    5755  uint32_t frameInUseCount,
    5756  VmaAllocationRequest* pAllocationRequest);
    5757 
    5758  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5759 
    5760  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5761 
    5762  virtual void Alloc(
    5763  const VmaAllocationRequest& request,
    5764  VmaSuballocationType type,
    5765  VkDeviceSize allocSize,
    5766  VmaAllocation hAllocation);
    5767 
    5768  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5769  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5770 
    5771 private:
    5772  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5773  static const size_t MAX_LEVELS = 30;
    5774 
    5775  struct ValidationContext
    5776  {
    5777  size_t calculatedAllocationCount;
    5778  size_t calculatedFreeCount;
    5779  VkDeviceSize calculatedSumFreeSize;
    5780 
    5781  ValidationContext() :
    5782  calculatedAllocationCount(0),
    5783  calculatedFreeCount(0),
    5784  calculatedSumFreeSize(0) { }
    5785  };
    5786 
    5787  struct Node
    5788  {
    5789  VkDeviceSize offset;
    5790  enum TYPE
    5791  {
    5792  TYPE_FREE,
    5793  TYPE_ALLOCATION,
    5794  TYPE_SPLIT,
    5795  TYPE_COUNT
    5796  } type;
    5797  Node* parent;
    5798  Node* buddy;
    5799 
    5800  union
    5801  {
    5802  struct
    5803  {
    5804  Node* prev;
    5805  Node* next;
    5806  } free;
    5807  struct
    5808  {
    5809  VmaAllocation alloc;
    5810  } allocation;
    5811  struct
    5812  {
    5813  Node* leftChild;
    5814  } split;
    5815  };
    5816  };
    5817 
    5818  // Size of the memory block aligned down to a power of two.
    5819  VkDeviceSize m_UsableSize;
    5820  uint32_t m_LevelCount;
    5821 
    5822  Node* m_Root;
    5823  struct {
    5824  Node* front;
    5825  Node* back;
    5826  } m_FreeList[MAX_LEVELS];
    5827  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5828  size_t m_AllocationCount;
    5829  // Number of nodes in the tree with type == TYPE_FREE.
    5830  size_t m_FreeCount;
    5831  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5832  VkDeviceSize m_SumFreeSize;
    5833 
    5834  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5835  void DeleteNode(Node* node);
    5836  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5837  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5838  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5839  // Alloc passed just for validation. Can be null.
    5840  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5841  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5842  // Adds node to the front of FreeList at given level.
    5843  // node->type must be FREE.
    5844  // node->free.prev, next can be undefined.
    5845  void AddToFreeListFront(uint32_t level, Node* node);
    5846  // Removes node from FreeList at given level.
    5847  // node->type must be FREE.
    5848  // node->free.prev, next stay untouched.
    5849  void RemoveFromFreeList(uint32_t level, Node* node);
    5850 
    5851 #if VMA_STATS_STRING_ENABLED
    5852  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5853 #endif
    5854 };
    5855 
    5856 /*
    5857 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5858 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5859 
    5860 Thread-safety: This class must be externally synchronized.
    5861 */
    5862 class VmaDeviceMemoryBlock
    5863 {
    5864  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5865 public:
    5866  VmaBlockMetadata* m_pMetadata;
    5867 
    5868  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5869 
    5870  ~VmaDeviceMemoryBlock()
    5871  {
    5872  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5873  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5874  }
    5875 
    5876  // Always call after construction.
    5877  void Init(
    5878  VmaAllocator hAllocator,
    5879  VmaPool hParentPool,
    5880  uint32_t newMemoryTypeIndex,
    5881  VkDeviceMemory newMemory,
    5882  VkDeviceSize newSize,
    5883  uint32_t id,
    5884  uint32_t algorithm);
    5885  // Always call before destruction.
    5886  void Destroy(VmaAllocator allocator);
    5887 
    5888  VmaPool GetParentPool() const { return m_hParentPool; }
    5889  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5890  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5891  uint32_t GetId() const { return m_Id; }
    5892  void* GetMappedData() const { return m_pMappedData; }
    5893 
    5894  // Validates all data structures inside this object. If not valid, returns false.
    5895  bool Validate() const;
    5896 
    5897  VkResult CheckCorruption(VmaAllocator hAllocator);
    5898 
    5899  // ppData can be null.
    5900  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5901  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5902 
    5903  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5904  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5905 
    5906  VkResult BindBufferMemory(
    5907  const VmaAllocator hAllocator,
    5908  const VmaAllocation hAllocation,
    5909  VkDeviceSize allocationLocalOffset,
    5910  VkBuffer hBuffer,
    5911  const void* pNext);
    5912  VkResult BindImageMemory(
    5913  const VmaAllocator hAllocator,
    5914  const VmaAllocation hAllocation,
    5915  VkDeviceSize allocationLocalOffset,
    5916  VkImage hImage,
    5917  const void* pNext);
    5918 
    5919 private:
    5920  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5921  uint32_t m_MemoryTypeIndex;
    5922  uint32_t m_Id;
    5923  VkDeviceMemory m_hMemory;
    5924 
    5925  /*
    5926  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5927  Also protects m_MapCount, m_pMappedData.
    5928  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5929  */
    5930  VMA_MUTEX m_Mutex;
    5931  uint32_t m_MapCount;
    5932  void* m_pMappedData;
    5933 };
    5934 
    5935 struct VmaPointerLess
    5936 {
    5937  bool operator()(const void* lhs, const void* rhs) const
    5938  {
    5939  return lhs < rhs;
    5940  }
    5941 };
    5942 
    5943 struct VmaDefragmentationMove
    5944 {
    5945  size_t srcBlockIndex;
    5946  size_t dstBlockIndex;
    5947  VkDeviceSize srcOffset;
    5948  VkDeviceSize dstOffset;
    5949  VkDeviceSize size;
    5950 };
    5951 
    5952 class VmaDefragmentationAlgorithm;
    5953 
    5954 /*
    5955 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5956 Vulkan memory type.
    5957 
    5958 Synchronized internally with a mutex.
    5959 */
    5960 struct VmaBlockVector
    5961 {
    5962  VMA_CLASS_NO_COPY(VmaBlockVector)
    5963 public:
    5964  VmaBlockVector(
    5965  VmaAllocator hAllocator,
    5966  VmaPool hParentPool,
    5967  uint32_t memoryTypeIndex,
    5968  VkDeviceSize preferredBlockSize,
    5969  size_t minBlockCount,
    5970  size_t maxBlockCount,
    5971  VkDeviceSize bufferImageGranularity,
    5972  uint32_t frameInUseCount,
    5973  bool isCustomPool,
    5974  bool explicitBlockSize,
    5975  uint32_t algorithm);
    5976  ~VmaBlockVector();
    5977 
    5978  VkResult CreateMinBlocks();
    5979 
    5980  VmaPool GetParentPool() const { return m_hParentPool; }
    5981  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5982  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5983  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5984  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5985  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5986 
    5987  void GetPoolStats(VmaPoolStats* pStats);
    5988 
    5989  bool IsEmpty() const { return m_Blocks.empty(); }
    5990  bool IsCorruptionDetectionEnabled() const;
    5991 
    5992  VkResult Allocate(
    5993  uint32_t currentFrameIndex,
    5994  VkDeviceSize size,
    5995  VkDeviceSize alignment,
    5996  const VmaAllocationCreateInfo& createInfo,
    5997  VmaSuballocationType suballocType,
    5998  size_t allocationCount,
    5999  VmaAllocation* pAllocations);
    6000 
    6001  void Free(
    6002  VmaAllocation hAllocation);
    6003 
    6004  // Adds statistics of this BlockVector to pStats.
    6005  void AddStats(VmaStats* pStats);
    6006 
    6007 #if VMA_STATS_STRING_ENABLED
    6008  void PrintDetailedMap(class VmaJsonWriter& json);
    6009 #endif
    6010 
    6011  void MakePoolAllocationsLost(
    6012  uint32_t currentFrameIndex,
    6013  size_t* pLostAllocationCount);
    6014  VkResult CheckCorruption();
    6015 
    6016  // Saves results in pCtx->res.
    6017  void Defragment(
    6018  class VmaBlockVectorDefragmentationContext* pCtx,
    6019  VmaDefragmentationStats* pStats,
    6020  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    6021  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    6022  VkCommandBuffer commandBuffer);
    6023  void DefragmentationEnd(
    6024  class VmaBlockVectorDefragmentationContext* pCtx,
    6025  VmaDefragmentationStats* pStats);
    6026 
    6028  // To be used only while the m_Mutex is locked. Used during defragmentation.
    6029 
    6030  size_t GetBlockCount() const { return m_Blocks.size(); }
    6031  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    6032  size_t CalcAllocationCount() const;
    6033  bool IsBufferImageGranularityConflictPossible() const;
    6034 
    6035 private:
    6036  friend class VmaDefragmentationAlgorithm_Generic;
    6037 
    6038  const VmaAllocator m_hAllocator;
    6039  const VmaPool m_hParentPool;
    6040  const uint32_t m_MemoryTypeIndex;
    6041  const VkDeviceSize m_PreferredBlockSize;
    6042  const size_t m_MinBlockCount;
    6043  const size_t m_MaxBlockCount;
    6044  const VkDeviceSize m_BufferImageGranularity;
    6045  const uint32_t m_FrameInUseCount;
    6046  const bool m_IsCustomPool;
    6047  const bool m_ExplicitBlockSize;
    6048  const uint32_t m_Algorithm;
    6049  /* There can be at most one allocation that is completely empty - a
    6050  hysteresis to avoid pessimistic case of alternating creation and destruction
    6051  of a VkDeviceMemory. */
    6052  bool m_HasEmptyBlock;
    6053  VMA_RW_MUTEX m_Mutex;
    6054  // Incrementally sorted by sumFreeSize, ascending.
    6055  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    6056  uint32_t m_NextBlockId;
    6057 
    6058  VkDeviceSize CalcMaxBlockSize() const;
    6059 
    6060  // Finds and removes given block from vector.
    6061  void Remove(VmaDeviceMemoryBlock* pBlock);
    6062 
    6063  // Performs single step in sorting m_Blocks. They may not be fully sorted
    6064  // after this call.
    6065  void IncrementallySortBlocks();
    6066 
    6067  VkResult AllocatePage(
    6068  uint32_t currentFrameIndex,
    6069  VkDeviceSize size,
    6070  VkDeviceSize alignment,
    6071  const VmaAllocationCreateInfo& createInfo,
    6072  VmaSuballocationType suballocType,
    6073  VmaAllocation* pAllocation);
    6074 
    6075  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6076  VkResult AllocateFromBlock(
    6077  VmaDeviceMemoryBlock* pBlock,
    6078  uint32_t currentFrameIndex,
    6079  VkDeviceSize size,
    6080  VkDeviceSize alignment,
    6081  VmaAllocationCreateFlags allocFlags,
    6082  void* pUserData,
    6083  VmaSuballocationType suballocType,
    6084  uint32_t strategy,
    6085  VmaAllocation* pAllocation);
    6086 
    6087  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6088 
    6089  // Saves result to pCtx->res.
    6090  void ApplyDefragmentationMovesCpu(
    6091  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6092  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6093  // Saves result to pCtx->res.
    6094  void ApplyDefragmentationMovesGpu(
    6095  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6096  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6097  VkCommandBuffer commandBuffer);
    6098 
    6099  /*
    6100  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6101  - updated with new data.
    6102  */
    6103  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6104 };
    6105 
    6106 struct VmaPool_T
    6107 {
    6108  VMA_CLASS_NO_COPY(VmaPool_T)
    6109 public:
    6110  VmaBlockVector m_BlockVector;
    6111 
    6112  VmaPool_T(
    6113  VmaAllocator hAllocator,
    6114  const VmaPoolCreateInfo& createInfo,
    6115  VkDeviceSize preferredBlockSize);
    6116  ~VmaPool_T();
    6117 
    6118  uint32_t GetId() const { return m_Id; }
    6119  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6120 
    6121 #if VMA_STATS_STRING_ENABLED
    6122  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6123 #endif
    6124 
    6125 private:
    6126  uint32_t m_Id;
    6127 };
    6128 
    6129 /*
    6130 Performs defragmentation:
    6131 
    6132 - Updates `pBlockVector->m_pMetadata`.
    6133 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6134 - Does not move actual data, only returns requested moves as `moves`.
    6135 */
    6136 class VmaDefragmentationAlgorithm
    6137 {
    6138  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6139 public:
    6140  VmaDefragmentationAlgorithm(
    6141  VmaAllocator hAllocator,
    6142  VmaBlockVector* pBlockVector,
    6143  uint32_t currentFrameIndex) :
    6144  m_hAllocator(hAllocator),
    6145  m_pBlockVector(pBlockVector),
    6146  m_CurrentFrameIndex(currentFrameIndex)
    6147  {
    6148  }
    6149  virtual ~VmaDefragmentationAlgorithm()
    6150  {
    6151  }
    6152 
    6153  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6154  virtual void AddAll() = 0;
    6155 
    6156  virtual VkResult Defragment(
    6157  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6158  VkDeviceSize maxBytesToMove,
    6159  uint32_t maxAllocationsToMove) = 0;
    6160 
    6161  virtual VkDeviceSize GetBytesMoved() const = 0;
    6162  virtual uint32_t GetAllocationsMoved() const = 0;
    6163 
    6164 protected:
    6165  VmaAllocator const m_hAllocator;
    6166  VmaBlockVector* const m_pBlockVector;
    6167  const uint32_t m_CurrentFrameIndex;
    6168 
    6169  struct AllocationInfo
    6170  {
    6171  VmaAllocation m_hAllocation;
    6172  VkBool32* m_pChanged;
    6173 
    6174  AllocationInfo() :
    6175  m_hAllocation(VK_NULL_HANDLE),
    6176  m_pChanged(VMA_NULL)
    6177  {
    6178  }
    6179  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6180  m_hAllocation(hAlloc),
    6181  m_pChanged(pChanged)
    6182  {
    6183  }
    6184  };
    6185 };
    6186 
    6187 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6188 {
    6189  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6190 public:
    6191  VmaDefragmentationAlgorithm_Generic(
    6192  VmaAllocator hAllocator,
    6193  VmaBlockVector* pBlockVector,
    6194  uint32_t currentFrameIndex,
    6195  bool overlappingMoveSupported);
    6196  virtual ~VmaDefragmentationAlgorithm_Generic();
    6197 
    6198  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6199  virtual void AddAll() { m_AllAllocations = true; }
    6200 
    6201  virtual VkResult Defragment(
    6202  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6203  VkDeviceSize maxBytesToMove,
    6204  uint32_t maxAllocationsToMove);
    6205 
    6206  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6207  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6208 
    6209 private:
    6210  uint32_t m_AllocationCount;
    6211  bool m_AllAllocations;
    6212 
    6213  VkDeviceSize m_BytesMoved;
    6214  uint32_t m_AllocationsMoved;
    6215 
    6216  struct AllocationInfoSizeGreater
    6217  {
    6218  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6219  {
    6220  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6221  }
    6222  };
    6223 
    6224  struct AllocationInfoOffsetGreater
    6225  {
    6226  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6227  {
    6228  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6229  }
    6230  };
    6231 
    6232  struct BlockInfo
    6233  {
    6234  size_t m_OriginalBlockIndex;
    6235  VmaDeviceMemoryBlock* m_pBlock;
    6236  bool m_HasNonMovableAllocations;
    6237  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6238 
    6239  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6240  m_OriginalBlockIndex(SIZE_MAX),
    6241  m_pBlock(VMA_NULL),
    6242  m_HasNonMovableAllocations(true),
    6243  m_Allocations(pAllocationCallbacks)
    6244  {
    6245  }
    6246 
    6247  void CalcHasNonMovableAllocations()
    6248  {
    6249  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6250  const size_t defragmentAllocCount = m_Allocations.size();
    6251  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6252  }
    6253 
    6254  void SortAllocationsBySizeDescending()
    6255  {
    6256  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6257  }
    6258 
    6259  void SortAllocationsByOffsetDescending()
    6260  {
    6261  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6262  }
    6263  };
    6264 
    6265  struct BlockPointerLess
    6266  {
    6267  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6268  {
    6269  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6270  }
    6271  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6272  {
    6273  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6274  }
    6275  };
    6276 
    6277  // 1. Blocks with some non-movable allocations go first.
    6278  // 2. Blocks with smaller sumFreeSize go first.
    6279  struct BlockInfoCompareMoveDestination
    6280  {
    6281  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6282  {
    6283  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6284  {
    6285  return true;
    6286  }
    6287  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6288  {
    6289  return false;
    6290  }
    6291  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6292  {
    6293  return true;
    6294  }
    6295  return false;
    6296  }
    6297  };
    6298 
    6299  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6300  BlockInfoVector m_Blocks;
    6301 
    6302  VkResult DefragmentRound(
    6303  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6304  VkDeviceSize maxBytesToMove,
    6305  uint32_t maxAllocationsToMove);
    6306 
    6307  size_t CalcBlocksWithNonMovableCount() const;
    6308 
    6309  static bool MoveMakesSense(
    6310  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6311  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6312 };
    6313 
    6314 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6315 {
    6316  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6317 public:
    6318  VmaDefragmentationAlgorithm_Fast(
    6319  VmaAllocator hAllocator,
    6320  VmaBlockVector* pBlockVector,
    6321  uint32_t currentFrameIndex,
    6322  bool overlappingMoveSupported);
    6323  virtual ~VmaDefragmentationAlgorithm_Fast();
    6324 
    6325  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6326  virtual void AddAll() { m_AllAllocations = true; }
    6327 
    6328  virtual VkResult Defragment(
    6329  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6330  VkDeviceSize maxBytesToMove,
    6331  uint32_t maxAllocationsToMove);
    6332 
    6333  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6334  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6335 
    6336 private:
    6337  struct BlockInfo
    6338  {
    6339  size_t origBlockIndex;
    6340  };
    6341 
    6342  class FreeSpaceDatabase
    6343  {
    6344  public:
    6345  FreeSpaceDatabase()
    6346  {
    6347  FreeSpace s = {};
    6348  s.blockInfoIndex = SIZE_MAX;
    6349  for(size_t i = 0; i < MAX_COUNT; ++i)
    6350  {
    6351  m_FreeSpaces[i] = s;
    6352  }
    6353  }
    6354 
    6355  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6356  {
    6357  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6358  {
    6359  return;
    6360  }
    6361 
    6362  // Find first invalid or the smallest structure.
    6363  size_t bestIndex = SIZE_MAX;
    6364  for(size_t i = 0; i < MAX_COUNT; ++i)
    6365  {
    6366  // Empty structure.
    6367  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6368  {
    6369  bestIndex = i;
    6370  break;
    6371  }
    6372  if(m_FreeSpaces[i].size < size &&
    6373  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6374  {
    6375  bestIndex = i;
    6376  }
    6377  }
    6378 
    6379  if(bestIndex != SIZE_MAX)
    6380  {
    6381  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6382  m_FreeSpaces[bestIndex].offset = offset;
    6383  m_FreeSpaces[bestIndex].size = size;
    6384  }
    6385  }
    6386 
    6387  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6388  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6389  {
    6390  size_t bestIndex = SIZE_MAX;
    6391  VkDeviceSize bestFreeSpaceAfter = 0;
    6392  for(size_t i = 0; i < MAX_COUNT; ++i)
    6393  {
    6394  // Structure is valid.
    6395  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6396  {
    6397  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6398  // Allocation fits into this structure.
    6399  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6400  {
    6401  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6402  (dstOffset + size);
    6403  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6404  {
    6405  bestIndex = i;
    6406  bestFreeSpaceAfter = freeSpaceAfter;
    6407  }
    6408  }
    6409  }
    6410  }
    6411 
    6412  if(bestIndex != SIZE_MAX)
    6413  {
    6414  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6415  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6416 
    6417  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6418  {
    6419  // Leave this structure for remaining empty space.
    6420  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6421  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6422  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6423  }
    6424  else
    6425  {
    6426  // This structure becomes invalid.
    6427  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6428  }
    6429 
    6430  return true;
    6431  }
    6432 
    6433  return false;
    6434  }
    6435 
    6436  private:
    6437  static const size_t MAX_COUNT = 4;
    6438 
    6439  struct FreeSpace
    6440  {
    6441  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6442  VkDeviceSize offset;
    6443  VkDeviceSize size;
    6444  } m_FreeSpaces[MAX_COUNT];
    6445  };
    6446 
    6447  const bool m_OverlappingMoveSupported;
    6448 
    6449  uint32_t m_AllocationCount;
    6450  bool m_AllAllocations;
    6451 
    6452  VkDeviceSize m_BytesMoved;
    6453  uint32_t m_AllocationsMoved;
    6454 
    6455  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6456 
    6457  void PreprocessMetadata();
    6458  void PostprocessMetadata();
    6459  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6460 };
    6461 
    6462 struct VmaBlockDefragmentationContext
    6463 {
    6464  enum BLOCK_FLAG
    6465  {
    6466  BLOCK_FLAG_USED = 0x00000001,
    6467  };
    6468  uint32_t flags;
    6469  VkBuffer hBuffer;
    6470 };
    6471 
    6472 class VmaBlockVectorDefragmentationContext
    6473 {
    6474  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6475 public:
    6476  VkResult res;
    6477  bool mutexLocked;
    6478  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6479 
    6480  VmaBlockVectorDefragmentationContext(
    6481  VmaAllocator hAllocator,
    6482  VmaPool hCustomPool, // Optional.
    6483  VmaBlockVector* pBlockVector,
    6484  uint32_t currFrameIndex);
    6485  ~VmaBlockVectorDefragmentationContext();
    6486 
    6487  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6488  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6489  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6490 
    6491  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6492  void AddAll() { m_AllAllocations = true; }
    6493 
    6494  void Begin(bool overlappingMoveSupported);
    6495 
    6496 private:
    6497  const VmaAllocator m_hAllocator;
    6498  // Null if not from custom pool.
    6499  const VmaPool m_hCustomPool;
    6500  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6501  VmaBlockVector* const m_pBlockVector;
    6502  const uint32_t m_CurrFrameIndex;
    6503  // Owner of this object.
    6504  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6505 
    6506  struct AllocInfo
    6507  {
    6508  VmaAllocation hAlloc;
    6509  VkBool32* pChanged;
    6510  };
    6511  // Used between constructor and Begin.
    6512  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6513  bool m_AllAllocations;
    6514 };
    6515 
    6516 struct VmaDefragmentationContext_T
    6517 {
    6518 private:
    6519  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6520 public:
    6521  VmaDefragmentationContext_T(
    6522  VmaAllocator hAllocator,
    6523  uint32_t currFrameIndex,
    6524  uint32_t flags,
    6525  VmaDefragmentationStats* pStats);
    6526  ~VmaDefragmentationContext_T();
    6527 
    6528  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6529  void AddAllocations(
    6530  uint32_t allocationCount,
    6531  VmaAllocation* pAllocations,
    6532  VkBool32* pAllocationsChanged);
    6533 
    6534  /*
    6535  Returns:
    6536  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6537  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6538  - Negative value if error occured and object can be destroyed immediately.
    6539  */
    6540  VkResult Defragment(
    6541  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6542  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6543  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6544 
    6545 private:
    6546  const VmaAllocator m_hAllocator;
    6547  const uint32_t m_CurrFrameIndex;
    6548  const uint32_t m_Flags;
    6549  VmaDefragmentationStats* const m_pStats;
    6550  // Owner of these objects.
    6551  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6552  // Owner of these objects.
    6553  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6554 };
    6555 
    6556 #if VMA_RECORDING_ENABLED
    6557 
    6558 class VmaRecorder
    6559 {
    6560 public:
    6561  VmaRecorder();
    6562  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6563  void WriteConfiguration(
    6564  const VkPhysicalDeviceProperties& devProps,
    6565  const VkPhysicalDeviceMemoryProperties& memProps,
    6566  bool dedicatedAllocationExtensionEnabled,
    6567  bool bindMemory2ExtensionEnabled);
    6568  ~VmaRecorder();
    6569 
    6570  void RecordCreateAllocator(uint32_t frameIndex);
    6571  void RecordDestroyAllocator(uint32_t frameIndex);
    6572  void RecordCreatePool(uint32_t frameIndex,
    6573  const VmaPoolCreateInfo& createInfo,
    6574  VmaPool pool);
    6575  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6576  void RecordAllocateMemory(uint32_t frameIndex,
    6577  const VkMemoryRequirements& vkMemReq,
    6578  const VmaAllocationCreateInfo& createInfo,
    6579  VmaAllocation allocation);
    6580  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6581  const VkMemoryRequirements& vkMemReq,
    6582  const VmaAllocationCreateInfo& createInfo,
    6583  uint64_t allocationCount,
    6584  const VmaAllocation* pAllocations);
    6585  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6586  const VkMemoryRequirements& vkMemReq,
    6587  bool requiresDedicatedAllocation,
    6588  bool prefersDedicatedAllocation,
    6589  const VmaAllocationCreateInfo& createInfo,
    6590  VmaAllocation allocation);
    6591  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6592  const VkMemoryRequirements& vkMemReq,
    6593  bool requiresDedicatedAllocation,
    6594  bool prefersDedicatedAllocation,
    6595  const VmaAllocationCreateInfo& createInfo,
    6596  VmaAllocation allocation);
    6597  void RecordFreeMemory(uint32_t frameIndex,
    6598  VmaAllocation allocation);
    6599  void RecordFreeMemoryPages(uint32_t frameIndex,
    6600  uint64_t allocationCount,
    6601  const VmaAllocation* pAllocations);
    6602  void RecordSetAllocationUserData(uint32_t frameIndex,
    6603  VmaAllocation allocation,
    6604  const void* pUserData);
    6605  void RecordCreateLostAllocation(uint32_t frameIndex,
    6606  VmaAllocation allocation);
    6607  void RecordMapMemory(uint32_t frameIndex,
    6608  VmaAllocation allocation);
    6609  void RecordUnmapMemory(uint32_t frameIndex,
    6610  VmaAllocation allocation);
    6611  void RecordFlushAllocation(uint32_t frameIndex,
    6612  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6613  void RecordInvalidateAllocation(uint32_t frameIndex,
    6614  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6615  void RecordCreateBuffer(uint32_t frameIndex,
    6616  const VkBufferCreateInfo& bufCreateInfo,
    6617  const VmaAllocationCreateInfo& allocCreateInfo,
    6618  VmaAllocation allocation);
    6619  void RecordCreateImage(uint32_t frameIndex,
    6620  const VkImageCreateInfo& imageCreateInfo,
    6621  const VmaAllocationCreateInfo& allocCreateInfo,
    6622  VmaAllocation allocation);
    6623  void RecordDestroyBuffer(uint32_t frameIndex,
    6624  VmaAllocation allocation);
    6625  void RecordDestroyImage(uint32_t frameIndex,
    6626  VmaAllocation allocation);
    6627  void RecordTouchAllocation(uint32_t frameIndex,
    6628  VmaAllocation allocation);
    6629  void RecordGetAllocationInfo(uint32_t frameIndex,
    6630  VmaAllocation allocation);
    6631  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6632  VmaPool pool);
    6633  void RecordDefragmentationBegin(uint32_t frameIndex,
    6634  const VmaDefragmentationInfo2& info,
    6636  void RecordDefragmentationEnd(uint32_t frameIndex,
    6638 
    6639 private:
    6640  struct CallParams
    6641  {
    6642  uint32_t threadId;
    6643  double time;
    6644  };
    6645 
    6646  class UserDataString
    6647  {
    6648  public:
    6649  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6650  const char* GetString() const { return m_Str; }
    6651 
    6652  private:
    6653  char m_PtrStr[17];
    6654  const char* m_Str;
    6655  };
    6656 
    6657  bool m_UseMutex;
    6658  VmaRecordFlags m_Flags;
    6659  FILE* m_File;
    6660  VMA_MUTEX m_FileMutex;
    6661  int64_t m_Freq;
    6662  int64_t m_StartCounter;
    6663 
    6664  void GetBasicParams(CallParams& outParams);
    6665 
    6666  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6667  template<typename T>
    6668  void PrintPointerList(uint64_t count, const T* pItems)
    6669  {
    6670  if(count)
    6671  {
    6672  fprintf(m_File, "%p", pItems[0]);
    6673  for(uint64_t i = 1; i < count; ++i)
    6674  {
    6675  fprintf(m_File, " %p", pItems[i]);
    6676  }
    6677  }
    6678  }
    6679 
    6680  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6681  void Flush();
    6682 };
    6683 
    6684 #endif // #if VMA_RECORDING_ENABLED
    6685 
    6686 /*
    6687 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6688 */
    6689 class VmaAllocationObjectAllocator
    6690 {
    6691  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6692 public:
    6693  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6694 
    6695  VmaAllocation Allocate();
    6696  void Free(VmaAllocation hAlloc);
    6697 
    6698 private:
    6699  VMA_MUTEX m_Mutex;
    6700  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6701 };
    6702 
    6703 // Main allocator object.
    6704 struct VmaAllocator_T
    6705 {
    6706  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6707 public:
    6708  bool m_UseMutex;
    6709  bool m_UseKhrDedicatedAllocation;
    6710  bool m_UseKhrBindMemory2;
    6711  VkDevice m_hDevice;
    6712  bool m_AllocationCallbacksSpecified;
    6713  VkAllocationCallbacks m_AllocationCallbacks;
    6714  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6715  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6716 
    6717  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6718  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6719  VMA_MUTEX m_HeapSizeLimitMutex;
    6720 
    6721  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6722  VkPhysicalDeviceMemoryProperties m_MemProps;
    6723 
    6724  // Default pools.
    6725  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6726 
    6727  // Each vector is sorted by memory (handle value).
    6728  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6729  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6730  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6731 
    6732  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6733  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6734  ~VmaAllocator_T();
    6735 
    6736  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6737  {
    6738  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6739  }
    6740  const VmaVulkanFunctions& GetVulkanFunctions() const
    6741  {
    6742  return m_VulkanFunctions;
    6743  }
    6744 
    6745  VkDeviceSize GetBufferImageGranularity() const
    6746  {
    6747  return VMA_MAX(
    6748  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6749  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6750  }
    6751 
    6752  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6753  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6754 
    6755  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6756  {
    6757  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6758  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6759  }
    6760  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6761  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6762  {
    6763  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6764  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6765  }
    6766  // Minimum alignment for all allocations in specific memory type.
    6767  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6768  {
    6769  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6770  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6771  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6772  }
    6773 
    6774  bool IsIntegratedGpu() const
    6775  {
    6776  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6777  }
    6778 
    6779 #if VMA_RECORDING_ENABLED
    6780  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6781 #endif
    6782 
    6783  void GetBufferMemoryRequirements(
    6784  VkBuffer hBuffer,
    6785  VkMemoryRequirements& memReq,
    6786  bool& requiresDedicatedAllocation,
    6787  bool& prefersDedicatedAllocation) const;
    6788  void GetImageMemoryRequirements(
    6789  VkImage hImage,
    6790  VkMemoryRequirements& memReq,
    6791  bool& requiresDedicatedAllocation,
    6792  bool& prefersDedicatedAllocation) const;
    6793 
    6794  // Main allocation function.
    6795  VkResult AllocateMemory(
    6796  const VkMemoryRequirements& vkMemReq,
    6797  bool requiresDedicatedAllocation,
    6798  bool prefersDedicatedAllocation,
    6799  VkBuffer dedicatedBuffer,
    6800  VkImage dedicatedImage,
    6801  const VmaAllocationCreateInfo& createInfo,
    6802  VmaSuballocationType suballocType,
    6803  size_t allocationCount,
    6804  VmaAllocation* pAllocations);
    6805 
    6806  // Main deallocation function.
    6807  void FreeMemory(
    6808  size_t allocationCount,
    6809  const VmaAllocation* pAllocations);
    6810 
    6811  VkResult ResizeAllocation(
    6812  const VmaAllocation alloc,
    6813  VkDeviceSize newSize);
    6814 
    6815  void CalculateStats(VmaStats* pStats);
    6816 
    6817 #if VMA_STATS_STRING_ENABLED
    6818  void PrintDetailedMap(class VmaJsonWriter& json);
    6819 #endif
    6820 
    6821  VkResult DefragmentationBegin(
    6822  const VmaDefragmentationInfo2& info,
    6823  VmaDefragmentationStats* pStats,
    6824  VmaDefragmentationContext* pContext);
    6825  VkResult DefragmentationEnd(
    6826  VmaDefragmentationContext context);
    6827 
    6828  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6829  bool TouchAllocation(VmaAllocation hAllocation);
    6830 
    6831  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6832  void DestroyPool(VmaPool pool);
    6833  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6834 
    6835  void SetCurrentFrameIndex(uint32_t frameIndex);
    6836  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6837 
    6838  void MakePoolAllocationsLost(
    6839  VmaPool hPool,
    6840  size_t* pLostAllocationCount);
    6841  VkResult CheckPoolCorruption(VmaPool hPool);
    6842  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6843 
    6844  void CreateLostAllocation(VmaAllocation* pAllocation);
    6845 
    6846  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
    6847  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6848  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
    6849  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6850  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
    6851  VkResult BindVulkanBuffer(
    6852  VkDeviceMemory memory,
    6853  VkDeviceSize memoryOffset,
    6854  VkBuffer buffer,
    6855  const void* pNext);
    6856  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
    6857  VkResult BindVulkanImage(
    6858  VkDeviceMemory memory,
    6859  VkDeviceSize memoryOffset,
    6860  VkImage image,
    6861  const void* pNext);
    6862 
    6863  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6864  void Unmap(VmaAllocation hAllocation);
    6865 
    6866  VkResult BindBufferMemory(
    6867  VmaAllocation hAllocation,
    6868  VkDeviceSize allocationLocalOffset,
    6869  VkBuffer hBuffer,
    6870  const void* pNext);
    6871  VkResult BindImageMemory(
    6872  VmaAllocation hAllocation,
    6873  VkDeviceSize allocationLocalOffset,
    6874  VkImage hImage,
    6875  const void* pNext);
    6876 
    6877  void FlushOrInvalidateAllocation(
    6878  VmaAllocation hAllocation,
    6879  VkDeviceSize offset, VkDeviceSize size,
    6880  VMA_CACHE_OPERATION op);
    6881 
    6882  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6883 
    6884  /*
    6885  Returns bit mask of memory types that can support defragmentation on GPU as
    6886  they support creation of required buffer for copy operations.
    6887  */
    6888  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6889 
    6890 private:
    6891  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6892 
    6893  VkPhysicalDevice m_PhysicalDevice;
    6894  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6895  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6896 
    6897  VMA_RW_MUTEX m_PoolsMutex;
    6898  // Protected by m_PoolsMutex. Sorted by pointer value.
    6899  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6900  uint32_t m_NextPoolId;
    6901 
    6902  VmaVulkanFunctions m_VulkanFunctions;
    6903 
    6904 #if VMA_RECORDING_ENABLED
    6905  VmaRecorder* m_pRecorder;
    6906 #endif
    6907 
    6908  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6909 
    6910  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6911 
    6912  VkResult AllocateMemoryOfType(
    6913  VkDeviceSize size,
    6914  VkDeviceSize alignment,
    6915  bool dedicatedAllocation,
    6916  VkBuffer dedicatedBuffer,
    6917  VkImage dedicatedImage,
    6918  const VmaAllocationCreateInfo& createInfo,
    6919  uint32_t memTypeIndex,
    6920  VmaSuballocationType suballocType,
    6921  size_t allocationCount,
    6922  VmaAllocation* pAllocations);
    6923 
    6924  // Helper function only to be used inside AllocateDedicatedMemory.
    6925  VkResult AllocateDedicatedMemoryPage(
    6926  VkDeviceSize size,
    6927  VmaSuballocationType suballocType,
    6928  uint32_t memTypeIndex,
    6929  const VkMemoryAllocateInfo& allocInfo,
    6930  bool map,
    6931  bool isUserDataString,
    6932  void* pUserData,
    6933  VmaAllocation* pAllocation);
    6934 
    6935  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6936  VkResult AllocateDedicatedMemory(
    6937  VkDeviceSize size,
    6938  VmaSuballocationType suballocType,
    6939  uint32_t memTypeIndex,
    6940  bool map,
    6941  bool isUserDataString,
    6942  void* pUserData,
    6943  VkBuffer dedicatedBuffer,
    6944  VkImage dedicatedImage,
    6945  size_t allocationCount,
    6946  VmaAllocation* pAllocations);
    6947 
    6948  void FreeDedicatedMemory(VmaAllocation allocation);
    6949 
    6950  /*
    6951  Calculates and returns bit mask of memory types that can support defragmentation
    6952  on GPU as they support creation of required buffer for copy operations.
    6953  */
    6954  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6955 };
    6956 
    6958 // Memory allocation #2 after VmaAllocator_T definition
    6959 
    6960 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6961 {
    6962  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6963 }
    6964 
    6965 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6966 {
    6967  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6968 }
    6969 
    6970 template<typename T>
    6971 static T* VmaAllocate(VmaAllocator hAllocator)
    6972 {
    6973  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6974 }
    6975 
    6976 template<typename T>
    6977 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6978 {
    6979  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6980 }
    6981 
    6982 template<typename T>
    6983 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6984 {
    6985  if(ptr != VMA_NULL)
    6986  {
    6987  ptr->~T();
    6988  VmaFree(hAllocator, ptr);
    6989  }
    6990 }
    6991 
    6992 template<typename T>
    6993 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6994 {
    6995  if(ptr != VMA_NULL)
    6996  {
    6997  for(size_t i = count; i--; )
    6998  ptr[i].~T();
    6999  VmaFree(hAllocator, ptr);
    7000  }
    7001 }
    7002 
    7004 // VmaStringBuilder
    7005 
    7006 #if VMA_STATS_STRING_ENABLED
    7007 
    7008 class VmaStringBuilder
    7009 {
    7010 public:
    7011  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    7012  size_t GetLength() const { return m_Data.size(); }
    7013  const char* GetData() const { return m_Data.data(); }
    7014 
    7015  void Add(char ch) { m_Data.push_back(ch); }
    7016  void Add(const char* pStr);
    7017  void AddNewLine() { Add('\n'); }
    7018  void AddNumber(uint32_t num);
    7019  void AddNumber(uint64_t num);
    7020  void AddPointer(const void* ptr);
    7021 
    7022 private:
    7023  VmaVector< char, VmaStlAllocator<char> > m_Data;
    7024 };
    7025 
    7026 void VmaStringBuilder::Add(const char* pStr)
    7027 {
    7028  const size_t strLen = strlen(pStr);
    7029  if(strLen > 0)
    7030  {
    7031  const size_t oldCount = m_Data.size();
    7032  m_Data.resize(oldCount + strLen);
    7033  memcpy(m_Data.data() + oldCount, pStr, strLen);
    7034  }
    7035 }
    7036 
    7037 void VmaStringBuilder::AddNumber(uint32_t num)
    7038 {
    7039  char buf[11];
    7040  VmaUint32ToStr(buf, sizeof(buf), num);
    7041  Add(buf);
    7042 }
    7043 
    7044 void VmaStringBuilder::AddNumber(uint64_t num)
    7045 {
    7046  char buf[21];
    7047  VmaUint64ToStr(buf, sizeof(buf), num);
    7048  Add(buf);
    7049 }
    7050 
    7051 void VmaStringBuilder::AddPointer(const void* ptr)
    7052 {
    7053  char buf[21];
    7054  VmaPtrToStr(buf, sizeof(buf), ptr);
    7055  Add(buf);
    7056 }
    7057 
    7058 #endif // #if VMA_STATS_STRING_ENABLED
    7059 
    7061 // VmaJsonWriter
    7062 
    7063 #if VMA_STATS_STRING_ENABLED
    7064 
    7065 class VmaJsonWriter
    7066 {
    7067  VMA_CLASS_NO_COPY(VmaJsonWriter)
    7068 public:
    7069  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    7070  ~VmaJsonWriter();
    7071 
    7072  void BeginObject(bool singleLine = false);
    7073  void EndObject();
    7074 
    7075  void BeginArray(bool singleLine = false);
    7076  void EndArray();
    7077 
    7078  void WriteString(const char* pStr);
    7079  void BeginString(const char* pStr = VMA_NULL);
    7080  void ContinueString(const char* pStr);
    7081  void ContinueString(uint32_t n);
    7082  void ContinueString(uint64_t n);
    7083  void ContinueString_Pointer(const void* ptr);
    7084  void EndString(const char* pStr = VMA_NULL);
    7085 
    7086  void WriteNumber(uint32_t n);
    7087  void WriteNumber(uint64_t n);
    7088  void WriteBool(bool b);
    7089  void WriteNull();
    7090 
    7091 private:
    7092  static const char* const INDENT;
    7093 
    7094  enum COLLECTION_TYPE
    7095  {
    7096  COLLECTION_TYPE_OBJECT,
    7097  COLLECTION_TYPE_ARRAY,
    7098  };
    7099  struct StackItem
    7100  {
    7101  COLLECTION_TYPE type;
    7102  uint32_t valueCount;
    7103  bool singleLineMode;
    7104  };
    7105 
    7106  VmaStringBuilder& m_SB;
    7107  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7108  bool m_InsideString;
    7109 
    7110  void BeginValue(bool isString);
    7111  void WriteIndent(bool oneLess = false);
    7112 };
    7113 
    7114 const char* const VmaJsonWriter::INDENT = " ";
    7115 
    7116 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7117  m_SB(sb),
    7118  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7119  m_InsideString(false)
    7120 {
    7121 }
    7122 
    7123 VmaJsonWriter::~VmaJsonWriter()
    7124 {
    7125  VMA_ASSERT(!m_InsideString);
    7126  VMA_ASSERT(m_Stack.empty());
    7127 }
    7128 
    7129 void VmaJsonWriter::BeginObject(bool singleLine)
    7130 {
    7131  VMA_ASSERT(!m_InsideString);
    7132 
    7133  BeginValue(false);
    7134  m_SB.Add('{');
    7135 
    7136  StackItem item;
    7137  item.type = COLLECTION_TYPE_OBJECT;
    7138  item.valueCount = 0;
    7139  item.singleLineMode = singleLine;
    7140  m_Stack.push_back(item);
    7141 }
    7142 
    7143 void VmaJsonWriter::EndObject()
    7144 {
    7145  VMA_ASSERT(!m_InsideString);
    7146 
    7147  WriteIndent(true);
    7148  m_SB.Add('}');
    7149 
    7150  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7151  m_Stack.pop_back();
    7152 }
    7153 
    7154 void VmaJsonWriter::BeginArray(bool singleLine)
    7155 {
    7156  VMA_ASSERT(!m_InsideString);
    7157 
    7158  BeginValue(false);
    7159  m_SB.Add('[');
    7160 
    7161  StackItem item;
    7162  item.type = COLLECTION_TYPE_ARRAY;
    7163  item.valueCount = 0;
    7164  item.singleLineMode = singleLine;
    7165  m_Stack.push_back(item);
    7166 }
    7167 
    7168 void VmaJsonWriter::EndArray()
    7169 {
    7170  VMA_ASSERT(!m_InsideString);
    7171 
    7172  WriteIndent(true);
    7173  m_SB.Add(']');
    7174 
    7175  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7176  m_Stack.pop_back();
    7177 }
    7178 
    7179 void VmaJsonWriter::WriteString(const char* pStr)
    7180 {
    7181  BeginString(pStr);
    7182  EndString();
    7183 }
    7184 
    7185 void VmaJsonWriter::BeginString(const char* pStr)
    7186 {
    7187  VMA_ASSERT(!m_InsideString);
    7188 
    7189  BeginValue(true);
    7190  m_SB.Add('"');
    7191  m_InsideString = true;
    7192  if(pStr != VMA_NULL && pStr[0] != '\0')
    7193  {
    7194  ContinueString(pStr);
    7195  }
    7196 }
    7197 
    7198 void VmaJsonWriter::ContinueString(const char* pStr)
    7199 {
    7200  VMA_ASSERT(m_InsideString);
    7201 
    7202  const size_t strLen = strlen(pStr);
    7203  for(size_t i = 0; i < strLen; ++i)
    7204  {
    7205  char ch = pStr[i];
    7206  if(ch == '\\')
    7207  {
    7208  m_SB.Add("\\\\");
    7209  }
    7210  else if(ch == '"')
    7211  {
    7212  m_SB.Add("\\\"");
    7213  }
    7214  else if(ch >= 32)
    7215  {
    7216  m_SB.Add(ch);
    7217  }
    7218  else switch(ch)
    7219  {
    7220  case '\b':
    7221  m_SB.Add("\\b");
    7222  break;
    7223  case '\f':
    7224  m_SB.Add("\\f");
    7225  break;
    7226  case '\n':
    7227  m_SB.Add("\\n");
    7228  break;
    7229  case '\r':
    7230  m_SB.Add("\\r");
    7231  break;
    7232  case '\t':
    7233  m_SB.Add("\\t");
    7234  break;
    7235  default:
    7236  VMA_ASSERT(0 && "Character not currently supported.");
    7237  break;
    7238  }
    7239  }
    7240 }
    7241 
    7242 void VmaJsonWriter::ContinueString(uint32_t n)
    7243 {
    7244  VMA_ASSERT(m_InsideString);
    7245  m_SB.AddNumber(n);
    7246 }
    7247 
    7248 void VmaJsonWriter::ContinueString(uint64_t n)
    7249 {
    7250  VMA_ASSERT(m_InsideString);
    7251  m_SB.AddNumber(n);
    7252 }
    7253 
    7254 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7255 {
    7256  VMA_ASSERT(m_InsideString);
    7257  m_SB.AddPointer(ptr);
    7258 }
    7259 
    7260 void VmaJsonWriter::EndString(const char* pStr)
    7261 {
    7262  VMA_ASSERT(m_InsideString);
    7263  if(pStr != VMA_NULL && pStr[0] != '\0')
    7264  {
    7265  ContinueString(pStr);
    7266  }
    7267  m_SB.Add('"');
    7268  m_InsideString = false;
    7269 }
    7270 
    7271 void VmaJsonWriter::WriteNumber(uint32_t n)
    7272 {
    7273  VMA_ASSERT(!m_InsideString);
    7274  BeginValue(false);
    7275  m_SB.AddNumber(n);
    7276 }
    7277 
    7278 void VmaJsonWriter::WriteNumber(uint64_t n)
    7279 {
    7280  VMA_ASSERT(!m_InsideString);
    7281  BeginValue(false);
    7282  m_SB.AddNumber(n);
    7283 }
    7284 
    7285 void VmaJsonWriter::WriteBool(bool b)
    7286 {
    7287  VMA_ASSERT(!m_InsideString);
    7288  BeginValue(false);
    7289  m_SB.Add(b ? "true" : "false");
    7290 }
    7291 
    7292 void VmaJsonWriter::WriteNull()
    7293 {
    7294  VMA_ASSERT(!m_InsideString);
    7295  BeginValue(false);
    7296  m_SB.Add("null");
    7297 }
    7298 
    7299 void VmaJsonWriter::BeginValue(bool isString)
    7300 {
    7301  if(!m_Stack.empty())
    7302  {
    7303  StackItem& currItem = m_Stack.back();
    7304  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7305  currItem.valueCount % 2 == 0)
    7306  {
    7307  VMA_ASSERT(isString);
    7308  }
    7309 
    7310  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7311  currItem.valueCount % 2 != 0)
    7312  {
    7313  m_SB.Add(": ");
    7314  }
    7315  else if(currItem.valueCount > 0)
    7316  {
    7317  m_SB.Add(", ");
    7318  WriteIndent();
    7319  }
    7320  else
    7321  {
    7322  WriteIndent();
    7323  }
    7324  ++currItem.valueCount;
    7325  }
    7326 }
    7327 
    7328 void VmaJsonWriter::WriteIndent(bool oneLess)
    7329 {
    7330  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7331  {
    7332  m_SB.AddNewLine();
    7333 
    7334  size_t count = m_Stack.size();
    7335  if(count > 0 && oneLess)
    7336  {
    7337  --count;
    7338  }
    7339  for(size_t i = 0; i < count; ++i)
    7340  {
    7341  m_SB.Add(INDENT);
    7342  }
    7343  }
    7344 }
    7345 
    7346 #endif // #if VMA_STATS_STRING_ENABLED
    7347 
    7349 
    7350 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7351 {
    7352  if(IsUserDataString())
    7353  {
    7354  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7355 
    7356  FreeUserDataString(hAllocator);
    7357 
    7358  if(pUserData != VMA_NULL)
    7359  {
    7360  const char* const newStrSrc = (char*)pUserData;
    7361  const size_t newStrLen = strlen(newStrSrc);
    7362  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7363  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7364  m_pUserData = newStrDst;
    7365  }
    7366  }
    7367  else
    7368  {
    7369  m_pUserData = pUserData;
    7370  }
    7371 }
    7372 
    7373 void VmaAllocation_T::ChangeBlockAllocation(
    7374  VmaAllocator hAllocator,
    7375  VmaDeviceMemoryBlock* block,
    7376  VkDeviceSize offset)
    7377 {
    7378  VMA_ASSERT(block != VMA_NULL);
    7379  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7380 
    7381  // Move mapping reference counter from old block to new block.
    7382  if(block != m_BlockAllocation.m_Block)
    7383  {
    7384  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7385  if(IsPersistentMap())
    7386  ++mapRefCount;
    7387  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7388  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7389  }
    7390 
    7391  m_BlockAllocation.m_Block = block;
    7392  m_BlockAllocation.m_Offset = offset;
    7393 }
    7394 
    7395 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7396 {
    7397  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7398  m_BlockAllocation.m_Offset = newOffset;
    7399 }
    7400 
    7401 VkDeviceSize VmaAllocation_T::GetOffset() const
    7402 {
    7403  switch(m_Type)
    7404  {
    7405  case ALLOCATION_TYPE_BLOCK:
    7406  return m_BlockAllocation.m_Offset;
    7407  case ALLOCATION_TYPE_DEDICATED:
    7408  return 0;
    7409  default:
    7410  VMA_ASSERT(0);
    7411  return 0;
    7412  }
    7413 }
    7414 
    7415 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7416 {
    7417  switch(m_Type)
    7418  {
    7419  case ALLOCATION_TYPE_BLOCK:
    7420  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7421  case ALLOCATION_TYPE_DEDICATED:
    7422  return m_DedicatedAllocation.m_hMemory;
    7423  default:
    7424  VMA_ASSERT(0);
    7425  return VK_NULL_HANDLE;
    7426  }
    7427 }
    7428 
    7429 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7430 {
    7431  switch(m_Type)
    7432  {
    7433  case ALLOCATION_TYPE_BLOCK:
    7434  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7435  case ALLOCATION_TYPE_DEDICATED:
    7436  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7437  default:
    7438  VMA_ASSERT(0);
    7439  return UINT32_MAX;
    7440  }
    7441 }
    7442 
    7443 void* VmaAllocation_T::GetMappedData() const
    7444 {
    7445  switch(m_Type)
    7446  {
    7447  case ALLOCATION_TYPE_BLOCK:
    7448  if(m_MapCount != 0)
    7449  {
    7450  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7451  VMA_ASSERT(pBlockData != VMA_NULL);
    7452  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7453  }
    7454  else
    7455  {
    7456  return VMA_NULL;
    7457  }
    7458  break;
    7459  case ALLOCATION_TYPE_DEDICATED:
    7460  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7461  return m_DedicatedAllocation.m_pMappedData;
    7462  default:
    7463  VMA_ASSERT(0);
    7464  return VMA_NULL;
    7465  }
    7466 }
    7467 
    7468 bool VmaAllocation_T::CanBecomeLost() const
    7469 {
    7470  switch(m_Type)
    7471  {
    7472  case ALLOCATION_TYPE_BLOCK:
    7473  return m_BlockAllocation.m_CanBecomeLost;
    7474  case ALLOCATION_TYPE_DEDICATED:
    7475  return false;
    7476  default:
    7477  VMA_ASSERT(0);
    7478  return false;
    7479  }
    7480 }
    7481 
    7482 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7483 {
    7484  VMA_ASSERT(CanBecomeLost());
    7485 
    7486  /*
    7487  Warning: This is a carefully designed algorithm.
    7488  Do not modify unless you really know what you're doing :)
    7489  */
    7490  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7491  for(;;)
    7492  {
    7493  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7494  {
    7495  VMA_ASSERT(0);
    7496  return false;
    7497  }
    7498  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7499  {
    7500  return false;
    7501  }
    7502  else // Last use time earlier than current time.
    7503  {
    7504  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7505  {
    7506  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7507  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7508  return true;
    7509  }
    7510  }
    7511  }
    7512 }
    7513 
    7514 #if VMA_STATS_STRING_ENABLED
    7515 
    7516 // Correspond to values of enum VmaSuballocationType.
    7517 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7518  "FREE",
    7519  "UNKNOWN",
    7520  "BUFFER",
    7521  "IMAGE_UNKNOWN",
    7522  "IMAGE_LINEAR",
    7523  "IMAGE_OPTIMAL",
    7524 };
    7525 
    7526 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7527 {
    7528  json.WriteString("Type");
    7529  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7530 
    7531  json.WriteString("Size");
    7532  json.WriteNumber(m_Size);
    7533 
    7534  if(m_pUserData != VMA_NULL)
    7535  {
    7536  json.WriteString("UserData");
    7537  if(IsUserDataString())
    7538  {
    7539  json.WriteString((const char*)m_pUserData);
    7540  }
    7541  else
    7542  {
    7543  json.BeginString();
    7544  json.ContinueString_Pointer(m_pUserData);
    7545  json.EndString();
    7546  }
    7547  }
    7548 
    7549  json.WriteString("CreationFrameIndex");
    7550  json.WriteNumber(m_CreationFrameIndex);
    7551 
    7552  json.WriteString("LastUseFrameIndex");
    7553  json.WriteNumber(GetLastUseFrameIndex());
    7554 
    7555  if(m_BufferImageUsage != 0)
    7556  {
    7557  json.WriteString("Usage");
    7558  json.WriteNumber(m_BufferImageUsage);
    7559  }
    7560 }
    7561 
    7562 #endif
    7563 
    7564 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7565 {
    7566  VMA_ASSERT(IsUserDataString());
    7567  if(m_pUserData != VMA_NULL)
    7568  {
    7569  char* const oldStr = (char*)m_pUserData;
    7570  const size_t oldStrLen = strlen(oldStr);
    7571  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7572  m_pUserData = VMA_NULL;
    7573  }
    7574 }
    7575 
    7576 void VmaAllocation_T::BlockAllocMap()
    7577 {
    7578  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7579 
    7580  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7581  {
    7582  ++m_MapCount;
    7583  }
    7584  else
    7585  {
    7586  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7587  }
    7588 }
    7589 
    7590 void VmaAllocation_T::BlockAllocUnmap()
    7591 {
    7592  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7593 
    7594  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7595  {
    7596  --m_MapCount;
    7597  }
    7598  else
    7599  {
    7600  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7601  }
    7602 }
    7603 
    7604 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7605 {
    7606  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7607 
    7608  if(m_MapCount != 0)
    7609  {
    7610  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7611  {
    7612  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7613  *ppData = m_DedicatedAllocation.m_pMappedData;
    7614  ++m_MapCount;
    7615  return VK_SUCCESS;
    7616  }
    7617  else
    7618  {
    7619  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7620  return VK_ERROR_MEMORY_MAP_FAILED;
    7621  }
    7622  }
    7623  else
    7624  {
    7625  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7626  hAllocator->m_hDevice,
    7627  m_DedicatedAllocation.m_hMemory,
    7628  0, // offset
    7629  VK_WHOLE_SIZE,
    7630  0, // flags
    7631  ppData);
    7632  if(result == VK_SUCCESS)
    7633  {
    7634  m_DedicatedAllocation.m_pMappedData = *ppData;
    7635  m_MapCount = 1;
    7636  }
    7637  return result;
    7638  }
    7639 }
    7640 
    7641 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7642 {
    7643  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7644 
    7645  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7646  {
    7647  --m_MapCount;
    7648  if(m_MapCount == 0)
    7649  {
    7650  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7651  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7652  hAllocator->m_hDevice,
    7653  m_DedicatedAllocation.m_hMemory);
    7654  }
    7655  }
    7656  else
    7657  {
    7658  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7659  }
    7660 }
    7661 
    7662 #if VMA_STATS_STRING_ENABLED
    7663 
    7664 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7665 {
    7666  json.BeginObject();
    7667 
    7668  json.WriteString("Blocks");
    7669  json.WriteNumber(stat.blockCount);
    7670 
    7671  json.WriteString("Allocations");
    7672  json.WriteNumber(stat.allocationCount);
    7673 
    7674  json.WriteString("UnusedRanges");
    7675  json.WriteNumber(stat.unusedRangeCount);
    7676 
    7677  json.WriteString("UsedBytes");
    7678  json.WriteNumber(stat.usedBytes);
    7679 
    7680  json.WriteString("UnusedBytes");
    7681  json.WriteNumber(stat.unusedBytes);
    7682 
    7683  if(stat.allocationCount > 1)
    7684  {
    7685  json.WriteString("AllocationSize");
    7686  json.BeginObject(true);
    7687  json.WriteString("Min");
    7688  json.WriteNumber(stat.allocationSizeMin);
    7689  json.WriteString("Avg");
    7690  json.WriteNumber(stat.allocationSizeAvg);
    7691  json.WriteString("Max");
    7692  json.WriteNumber(stat.allocationSizeMax);
    7693  json.EndObject();
    7694  }
    7695 
    7696  if(stat.unusedRangeCount > 1)
    7697  {
    7698  json.WriteString("UnusedRangeSize");
    7699  json.BeginObject(true);
    7700  json.WriteString("Min");
    7701  json.WriteNumber(stat.unusedRangeSizeMin);
    7702  json.WriteString("Avg");
    7703  json.WriteNumber(stat.unusedRangeSizeAvg);
    7704  json.WriteString("Max");
    7705  json.WriteNumber(stat.unusedRangeSizeMax);
    7706  json.EndObject();
    7707  }
    7708 
    7709  json.EndObject();
    7710 }
    7711 
    7712 #endif // #if VMA_STATS_STRING_ENABLED
    7713 
    7714 struct VmaSuballocationItemSizeLess
    7715 {
    7716  bool operator()(
    7717  const VmaSuballocationList::iterator lhs,
    7718  const VmaSuballocationList::iterator rhs) const
    7719  {
    7720  return lhs->size < rhs->size;
    7721  }
    7722  bool operator()(
    7723  const VmaSuballocationList::iterator lhs,
    7724  VkDeviceSize rhsSize) const
    7725  {
    7726  return lhs->size < rhsSize;
    7727  }
    7728 };
    7729 
    7730 
    7732 // class VmaBlockMetadata
    7733 
    7734 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7735  m_Size(0),
    7736  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7737 {
    7738 }
    7739 
    7740 #if VMA_STATS_STRING_ENABLED
    7741 
    7742 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7743  VkDeviceSize unusedBytes,
    7744  size_t allocationCount,
    7745  size_t unusedRangeCount) const
    7746 {
    7747  json.BeginObject();
    7748 
    7749  json.WriteString("TotalBytes");
    7750  json.WriteNumber(GetSize());
    7751 
    7752  json.WriteString("UnusedBytes");
    7753  json.WriteNumber(unusedBytes);
    7754 
    7755  json.WriteString("Allocations");
    7756  json.WriteNumber((uint64_t)allocationCount);
    7757 
    7758  json.WriteString("UnusedRanges");
    7759  json.WriteNumber((uint64_t)unusedRangeCount);
    7760 
    7761  json.WriteString("Suballocations");
    7762  json.BeginArray();
    7763 }
    7764 
    7765 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7766  VkDeviceSize offset,
    7767  VmaAllocation hAllocation) const
    7768 {
    7769  json.BeginObject(true);
    7770 
    7771  json.WriteString("Offset");
    7772  json.WriteNumber(offset);
    7773 
    7774  hAllocation->PrintParameters(json);
    7775 
    7776  json.EndObject();
    7777 }
    7778 
    7779 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7780  VkDeviceSize offset,
    7781  VkDeviceSize size) const
    7782 {
    7783  json.BeginObject(true);
    7784 
    7785  json.WriteString("Offset");
    7786  json.WriteNumber(offset);
    7787 
    7788  json.WriteString("Type");
    7789  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7790 
    7791  json.WriteString("Size");
    7792  json.WriteNumber(size);
    7793 
    7794  json.EndObject();
    7795 }
    7796 
    7797 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7798 {
    7799  json.EndArray();
    7800  json.EndObject();
    7801 }
    7802 
    7803 #endif // #if VMA_STATS_STRING_ENABLED
    7804 
    7806 // class VmaBlockMetadata_Generic
    7807 
    7808 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7809  VmaBlockMetadata(hAllocator),
    7810  m_FreeCount(0),
    7811  m_SumFreeSize(0),
    7812  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7813  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7814 {
    7815 }
    7816 
    7817 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7818 {
    7819 }
    7820 
    7821 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7822 {
    7823  VmaBlockMetadata::Init(size);
    7824 
    7825  m_FreeCount = 1;
    7826  m_SumFreeSize = size;
    7827 
    7828  VmaSuballocation suballoc = {};
    7829  suballoc.offset = 0;
    7830  suballoc.size = size;
    7831  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7832  suballoc.hAllocation = VK_NULL_HANDLE;
    7833 
    7834  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7835  m_Suballocations.push_back(suballoc);
    7836  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7837  --suballocItem;
    7838  m_FreeSuballocationsBySize.push_back(suballocItem);
    7839 }
    7840 
    7841 bool VmaBlockMetadata_Generic::Validate() const
    7842 {
    7843  VMA_VALIDATE(!m_Suballocations.empty());
    7844 
    7845  // Expected offset of new suballocation as calculated from previous ones.
    7846  VkDeviceSize calculatedOffset = 0;
    7847  // Expected number of free suballocations as calculated from traversing their list.
    7848  uint32_t calculatedFreeCount = 0;
    7849  // Expected sum size of free suballocations as calculated from traversing their list.
    7850  VkDeviceSize calculatedSumFreeSize = 0;
    7851  // Expected number of free suballocations that should be registered in
    7852  // m_FreeSuballocationsBySize calculated from traversing their list.
    7853  size_t freeSuballocationsToRegister = 0;
    7854  // True if previous visited suballocation was free.
    7855  bool prevFree = false;
    7856 
    7857  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7858  suballocItem != m_Suballocations.cend();
    7859  ++suballocItem)
    7860  {
    7861  const VmaSuballocation& subAlloc = *suballocItem;
    7862 
    7863  // Actual offset of this suballocation doesn't match expected one.
    7864  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7865 
    7866  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7867  // Two adjacent free suballocations are invalid. They should be merged.
    7868  VMA_VALIDATE(!prevFree || !currFree);
    7869 
    7870  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7871 
    7872  if(currFree)
    7873  {
    7874  calculatedSumFreeSize += subAlloc.size;
    7875  ++calculatedFreeCount;
    7876  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7877  {
    7878  ++freeSuballocationsToRegister;
    7879  }
    7880 
    7881  // Margin required between allocations - every free space must be at least that large.
    7882  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7883  }
    7884  else
    7885  {
    7886  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7887  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7888 
    7889  // Margin required between allocations - previous allocation must be free.
    7890  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7891  }
    7892 
    7893  calculatedOffset += subAlloc.size;
    7894  prevFree = currFree;
    7895  }
    7896 
    7897  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7898  // match expected one.
    7899  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7900 
    7901  VkDeviceSize lastSize = 0;
    7902  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7903  {
    7904  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7905 
    7906  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7907  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7908  // They must be sorted by size ascending.
    7909  VMA_VALIDATE(suballocItem->size >= lastSize);
    7910 
    7911  lastSize = suballocItem->size;
    7912  }
    7913 
    7914  // Check if totals match calculacted values.
    7915  VMA_VALIDATE(ValidateFreeSuballocationList());
    7916  VMA_VALIDATE(calculatedOffset == GetSize());
    7917  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7918  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7919 
    7920  return true;
    7921 }
    7922 
    7923 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7924 {
    7925  if(!m_FreeSuballocationsBySize.empty())
    7926  {
    7927  return m_FreeSuballocationsBySize.back()->size;
    7928  }
    7929  else
    7930  {
    7931  return 0;
    7932  }
    7933 }
    7934 
    7935 bool VmaBlockMetadata_Generic::IsEmpty() const
    7936 {
    7937  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7938 }
    7939 
    7940 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7941 {
    7942  outInfo.blockCount = 1;
    7943 
    7944  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7945  outInfo.allocationCount = rangeCount - m_FreeCount;
    7946  outInfo.unusedRangeCount = m_FreeCount;
    7947 
    7948  outInfo.unusedBytes = m_SumFreeSize;
    7949  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7950 
    7951  outInfo.allocationSizeMin = UINT64_MAX;
    7952  outInfo.allocationSizeMax = 0;
    7953  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7954  outInfo.unusedRangeSizeMax = 0;
    7955 
    7956  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7957  suballocItem != m_Suballocations.cend();
    7958  ++suballocItem)
    7959  {
    7960  const VmaSuballocation& suballoc = *suballocItem;
    7961  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7962  {
    7963  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7964  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7965  }
    7966  else
    7967  {
    7968  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7969  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7970  }
    7971  }
    7972 }
    7973 
    7974 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7975 {
    7976  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7977 
    7978  inoutStats.size += GetSize();
    7979  inoutStats.unusedSize += m_SumFreeSize;
    7980  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7981  inoutStats.unusedRangeCount += m_FreeCount;
    7982  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7983 }
    7984 
    7985 #if VMA_STATS_STRING_ENABLED
    7986 
    7987 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7988 {
    7989  PrintDetailedMap_Begin(json,
    7990  m_SumFreeSize, // unusedBytes
    7991  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7992  m_FreeCount); // unusedRangeCount
    7993 
    7994  size_t i = 0;
    7995  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7996  suballocItem != m_Suballocations.cend();
    7997  ++suballocItem, ++i)
    7998  {
    7999  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8000  {
    8001  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    8002  }
    8003  else
    8004  {
    8005  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    8006  }
    8007  }
    8008 
    8009  PrintDetailedMap_End(json);
    8010 }
    8011 
    8012 #endif // #if VMA_STATS_STRING_ENABLED
    8013 
    8014 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    8015  uint32_t currentFrameIndex,
    8016  uint32_t frameInUseCount,
    8017  VkDeviceSize bufferImageGranularity,
    8018  VkDeviceSize allocSize,
    8019  VkDeviceSize allocAlignment,
    8020  bool upperAddress,
    8021  VmaSuballocationType allocType,
    8022  bool canMakeOtherLost,
    8023  uint32_t strategy,
    8024  VmaAllocationRequest* pAllocationRequest)
    8025 {
    8026  VMA_ASSERT(allocSize > 0);
    8027  VMA_ASSERT(!upperAddress);
    8028  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8029  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8030  VMA_HEAVY_ASSERT(Validate());
    8031 
    8032  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    8033 
    8034  // There is not enough total free space in this block to fullfill the request: Early return.
    8035  if(canMakeOtherLost == false &&
    8036  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    8037  {
    8038  return false;
    8039  }
    8040 
    8041  // New algorithm, efficiently searching freeSuballocationsBySize.
    8042  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    8043  if(freeSuballocCount > 0)
    8044  {
    8046  {
    8047  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    8048  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8049  m_FreeSuballocationsBySize.data(),
    8050  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    8051  allocSize + 2 * VMA_DEBUG_MARGIN,
    8052  VmaSuballocationItemSizeLess());
    8053  size_t index = it - m_FreeSuballocationsBySize.data();
    8054  for(; index < freeSuballocCount; ++index)
    8055  {
    8056  if(CheckAllocation(
    8057  currentFrameIndex,
    8058  frameInUseCount,
    8059  bufferImageGranularity,
    8060  allocSize,
    8061  allocAlignment,
    8062  allocType,
    8063  m_FreeSuballocationsBySize[index],
    8064  false, // canMakeOtherLost
    8065  &pAllocationRequest->offset,
    8066  &pAllocationRequest->itemsToMakeLostCount,
    8067  &pAllocationRequest->sumFreeSize,
    8068  &pAllocationRequest->sumItemSize))
    8069  {
    8070  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8071  return true;
    8072  }
    8073  }
    8074  }
    8075  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    8076  {
    8077  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8078  it != m_Suballocations.end();
    8079  ++it)
    8080  {
    8081  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    8082  currentFrameIndex,
    8083  frameInUseCount,
    8084  bufferImageGranularity,
    8085  allocSize,
    8086  allocAlignment,
    8087  allocType,
    8088  it,
    8089  false, // canMakeOtherLost
    8090  &pAllocationRequest->offset,
    8091  &pAllocationRequest->itemsToMakeLostCount,
    8092  &pAllocationRequest->sumFreeSize,
    8093  &pAllocationRequest->sumItemSize))
    8094  {
    8095  pAllocationRequest->item = it;
    8096  return true;
    8097  }
    8098  }
    8099  }
    8100  else // WORST_FIT, FIRST_FIT
    8101  {
    8102  // Search staring from biggest suballocations.
    8103  for(size_t index = freeSuballocCount; index--; )
    8104  {
    8105  if(CheckAllocation(
    8106  currentFrameIndex,
    8107  frameInUseCount,
    8108  bufferImageGranularity,
    8109  allocSize,
    8110  allocAlignment,
    8111  allocType,
    8112  m_FreeSuballocationsBySize[index],
    8113  false, // canMakeOtherLost
    8114  &pAllocationRequest->offset,
    8115  &pAllocationRequest->itemsToMakeLostCount,
    8116  &pAllocationRequest->sumFreeSize,
    8117  &pAllocationRequest->sumItemSize))
    8118  {
    8119  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8120  return true;
    8121  }
    8122  }
    8123  }
    8124  }
    8125 
    8126  if(canMakeOtherLost)
    8127  {
    8128  // Brute-force algorithm. TODO: Come up with something better.
    8129 
    8130  bool found = false;
    8131  VmaAllocationRequest tmpAllocRequest = {};
    8132  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8133  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8134  suballocIt != m_Suballocations.end();
    8135  ++suballocIt)
    8136  {
    8137  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8138  suballocIt->hAllocation->CanBecomeLost())
    8139  {
    8140  if(CheckAllocation(
    8141  currentFrameIndex,
    8142  frameInUseCount,
    8143  bufferImageGranularity,
    8144  allocSize,
    8145  allocAlignment,
    8146  allocType,
    8147  suballocIt,
    8148  canMakeOtherLost,
    8149  &tmpAllocRequest.offset,
    8150  &tmpAllocRequest.itemsToMakeLostCount,
    8151  &tmpAllocRequest.sumFreeSize,
    8152  &tmpAllocRequest.sumItemSize))
    8153  {
    8155  {
    8156  *pAllocationRequest = tmpAllocRequest;
    8157  pAllocationRequest->item = suballocIt;
    8158  break;
    8159  }
    8160  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8161  {
    8162  *pAllocationRequest = tmpAllocRequest;
    8163  pAllocationRequest->item = suballocIt;
    8164  found = true;
    8165  }
    8166  }
    8167  }
    8168  }
    8169 
    8170  return found;
    8171  }
    8172 
    8173  return false;
    8174 }
    8175 
    8176 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8177  uint32_t currentFrameIndex,
    8178  uint32_t frameInUseCount,
    8179  VmaAllocationRequest* pAllocationRequest)
    8180 {
    8181  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8182 
    8183  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8184  {
    8185  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8186  {
    8187  ++pAllocationRequest->item;
    8188  }
    8189  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8190  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8191  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8192  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8193  {
    8194  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8195  --pAllocationRequest->itemsToMakeLostCount;
    8196  }
    8197  else
    8198  {
    8199  return false;
    8200  }
    8201  }
    8202 
    8203  VMA_HEAVY_ASSERT(Validate());
    8204  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8205  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8206 
    8207  return true;
    8208 }
    8209 
    8210 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8211 {
    8212  uint32_t lostAllocationCount = 0;
    8213  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8214  it != m_Suballocations.end();
    8215  ++it)
    8216  {
    8217  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8218  it->hAllocation->CanBecomeLost() &&
    8219  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8220  {
    8221  it = FreeSuballocation(it);
    8222  ++lostAllocationCount;
    8223  }
    8224  }
    8225  return lostAllocationCount;
    8226 }
    8227 
    8228 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8229 {
    8230  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8231  it != m_Suballocations.end();
    8232  ++it)
    8233  {
    8234  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8235  {
    8236  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8237  {
    8238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8239  return VK_ERROR_VALIDATION_FAILED_EXT;
    8240  }
    8241  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8242  {
    8243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8244  return VK_ERROR_VALIDATION_FAILED_EXT;
    8245  }
    8246  }
    8247  }
    8248 
    8249  return VK_SUCCESS;
    8250 }
    8251 
    8252 void VmaBlockMetadata_Generic::Alloc(
    8253  const VmaAllocationRequest& request,
    8254  VmaSuballocationType type,
    8255  VkDeviceSize allocSize,
    8256  VmaAllocation hAllocation)
    8257 {
    8258  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8259  VMA_ASSERT(request.item != m_Suballocations.end());
    8260  VmaSuballocation& suballoc = *request.item;
    8261  // Given suballocation is a free block.
    8262  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8263  // Given offset is inside this suballocation.
    8264  VMA_ASSERT(request.offset >= suballoc.offset);
    8265  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8266  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8267  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8268 
    8269  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8270  // it to become used.
    8271  UnregisterFreeSuballocation(request.item);
    8272 
    8273  suballoc.offset = request.offset;
    8274  suballoc.size = allocSize;
    8275  suballoc.type = type;
    8276  suballoc.hAllocation = hAllocation;
    8277 
    8278  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8279  if(paddingEnd)
    8280  {
    8281  VmaSuballocation paddingSuballoc = {};
    8282  paddingSuballoc.offset = request.offset + allocSize;
    8283  paddingSuballoc.size = paddingEnd;
    8284  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8285  VmaSuballocationList::iterator next = request.item;
    8286  ++next;
    8287  const VmaSuballocationList::iterator paddingEndItem =
    8288  m_Suballocations.insert(next, paddingSuballoc);
    8289  RegisterFreeSuballocation(paddingEndItem);
    8290  }
    8291 
    8292  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8293  if(paddingBegin)
    8294  {
    8295  VmaSuballocation paddingSuballoc = {};
    8296  paddingSuballoc.offset = request.offset - paddingBegin;
    8297  paddingSuballoc.size = paddingBegin;
    8298  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8299  const VmaSuballocationList::iterator paddingBeginItem =
    8300  m_Suballocations.insert(request.item, paddingSuballoc);
    8301  RegisterFreeSuballocation(paddingBeginItem);
    8302  }
    8303 
    8304  // Update totals.
    8305  m_FreeCount = m_FreeCount - 1;
    8306  if(paddingBegin > 0)
    8307  {
    8308  ++m_FreeCount;
    8309  }
    8310  if(paddingEnd > 0)
    8311  {
    8312  ++m_FreeCount;
    8313  }
    8314  m_SumFreeSize -= allocSize;
    8315 }
    8316 
    8317 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8318 {
    8319  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8320  suballocItem != m_Suballocations.end();
    8321  ++suballocItem)
    8322  {
    8323  VmaSuballocation& suballoc = *suballocItem;
    8324  if(suballoc.hAllocation == allocation)
    8325  {
    8326  FreeSuballocation(suballocItem);
    8327  VMA_HEAVY_ASSERT(Validate());
    8328  return;
    8329  }
    8330  }
    8331  VMA_ASSERT(0 && "Not found!");
    8332 }
    8333 
    8334 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8335 {
    8336  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8337  suballocItem != m_Suballocations.end();
    8338  ++suballocItem)
    8339  {
    8340  VmaSuballocation& suballoc = *suballocItem;
    8341  if(suballoc.offset == offset)
    8342  {
    8343  FreeSuballocation(suballocItem);
    8344  return;
    8345  }
    8346  }
    8347  VMA_ASSERT(0 && "Not found!");
    8348 }
    8349 
    8350 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8351 {
    8352  VkDeviceSize lastSize = 0;
    8353  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8354  {
    8355  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8356 
    8357  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8358  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8359  VMA_VALIDATE(it->size >= lastSize);
    8360  lastSize = it->size;
    8361  }
    8362  return true;
    8363 }
    8364 
    8365 bool VmaBlockMetadata_Generic::CheckAllocation(
    8366  uint32_t currentFrameIndex,
    8367  uint32_t frameInUseCount,
    8368  VkDeviceSize bufferImageGranularity,
    8369  VkDeviceSize allocSize,
    8370  VkDeviceSize allocAlignment,
    8371  VmaSuballocationType allocType,
    8372  VmaSuballocationList::const_iterator suballocItem,
    8373  bool canMakeOtherLost,
    8374  VkDeviceSize* pOffset,
    8375  size_t* itemsToMakeLostCount,
    8376  VkDeviceSize* pSumFreeSize,
    8377  VkDeviceSize* pSumItemSize) const
    8378 {
    8379  VMA_ASSERT(allocSize > 0);
    8380  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8381  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8382  VMA_ASSERT(pOffset != VMA_NULL);
    8383 
    8384  *itemsToMakeLostCount = 0;
    8385  *pSumFreeSize = 0;
    8386  *pSumItemSize = 0;
    8387 
    8388  if(canMakeOtherLost)
    8389  {
    8390  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8391  {
    8392  *pSumFreeSize = suballocItem->size;
    8393  }
    8394  else
    8395  {
    8396  if(suballocItem->hAllocation->CanBecomeLost() &&
    8397  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8398  {
    8399  ++*itemsToMakeLostCount;
    8400  *pSumItemSize = suballocItem->size;
    8401  }
    8402  else
    8403  {
    8404  return false;
    8405  }
    8406  }
    8407 
    8408  // Remaining size is too small for this request: Early return.
    8409  if(GetSize() - suballocItem->offset < allocSize)
    8410  {
    8411  return false;
    8412  }
    8413 
    8414  // Start from offset equal to beginning of this suballocation.
    8415  *pOffset = suballocItem->offset;
    8416 
    8417  // Apply VMA_DEBUG_MARGIN at the beginning.
    8418  if(VMA_DEBUG_MARGIN > 0)
    8419  {
    8420  *pOffset += VMA_DEBUG_MARGIN;
    8421  }
    8422 
    8423  // Apply alignment.
    8424  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8425 
    8426  // Check previous suballocations for BufferImageGranularity conflicts.
    8427  // Make bigger alignment if necessary.
    8428  if(bufferImageGranularity > 1)
    8429  {
    8430  bool bufferImageGranularityConflict = false;
    8431  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8432  while(prevSuballocItem != m_Suballocations.cbegin())
    8433  {
    8434  --prevSuballocItem;
    8435  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8436  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8437  {
    8438  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8439  {
    8440  bufferImageGranularityConflict = true;
    8441  break;
    8442  }
    8443  }
    8444  else
    8445  // Already on previous page.
    8446  break;
    8447  }
    8448  if(bufferImageGranularityConflict)
    8449  {
    8450  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8451  }
    8452  }
    8453 
    8454  // Now that we have final *pOffset, check if we are past suballocItem.
    8455  // If yes, return false - this function should be called for another suballocItem as starting point.
    8456  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8457  {
    8458  return false;
    8459  }
    8460 
    8461  // Calculate padding at the beginning based on current offset.
    8462  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8463 
    8464  // Calculate required margin at the end.
    8465  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8466 
    8467  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8468  // Another early return check.
    8469  if(suballocItem->offset + totalSize > GetSize())
    8470  {
    8471  return false;
    8472  }
    8473 
    8474  // Advance lastSuballocItem until desired size is reached.
    8475  // Update itemsToMakeLostCount.
    8476  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8477  if(totalSize > suballocItem->size)
    8478  {
    8479  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8480  while(remainingSize > 0)
    8481  {
    8482  ++lastSuballocItem;
    8483  if(lastSuballocItem == m_Suballocations.cend())
    8484  {
    8485  return false;
    8486  }
    8487  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8488  {
    8489  *pSumFreeSize += lastSuballocItem->size;
    8490  }
    8491  else
    8492  {
    8493  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8494  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8495  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8496  {
    8497  ++*itemsToMakeLostCount;
    8498  *pSumItemSize += lastSuballocItem->size;
    8499  }
    8500  else
    8501  {
    8502  return false;
    8503  }
    8504  }
    8505  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8506  remainingSize - lastSuballocItem->size : 0;
    8507  }
    8508  }
    8509 
    8510  // Check next suballocations for BufferImageGranularity conflicts.
    8511  // If conflict exists, we must mark more allocations lost or fail.
    8512  if(bufferImageGranularity > 1)
    8513  {
    8514  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8515  ++nextSuballocItem;
    8516  while(nextSuballocItem != m_Suballocations.cend())
    8517  {
    8518  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8519  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8520  {
    8521  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8522  {
    8523  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8524  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8525  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8526  {
    8527  ++*itemsToMakeLostCount;
    8528  }
    8529  else
    8530  {
    8531  return false;
    8532  }
    8533  }
    8534  }
    8535  else
    8536  {
    8537  // Already on next page.
    8538  break;
    8539  }
    8540  ++nextSuballocItem;
    8541  }
    8542  }
    8543  }
    8544  else
    8545  {
    8546  const VmaSuballocation& suballoc = *suballocItem;
    8547  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8548 
    8549  *pSumFreeSize = suballoc.size;
    8550 
    8551  // Size of this suballocation is too small for this request: Early return.
    8552  if(suballoc.size < allocSize)
    8553  {
    8554  return false;
    8555  }
    8556 
    8557  // Start from offset equal to beginning of this suballocation.
    8558  *pOffset = suballoc.offset;
    8559 
    8560  // Apply VMA_DEBUG_MARGIN at the beginning.
    8561  if(VMA_DEBUG_MARGIN > 0)
    8562  {
    8563  *pOffset += VMA_DEBUG_MARGIN;
    8564  }
    8565 
    8566  // Apply alignment.
    8567  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8568 
    8569  // Check previous suballocations for BufferImageGranularity conflicts.
    8570  // Make bigger alignment if necessary.
    8571  if(bufferImageGranularity > 1)
    8572  {
    8573  bool bufferImageGranularityConflict = false;
    8574  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8575  while(prevSuballocItem != m_Suballocations.cbegin())
    8576  {
    8577  --prevSuballocItem;
    8578  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8579  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8580  {
    8581  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8582  {
    8583  bufferImageGranularityConflict = true;
    8584  break;
    8585  }
    8586  }
    8587  else
    8588  // Already on previous page.
    8589  break;
    8590  }
    8591  if(bufferImageGranularityConflict)
    8592  {
    8593  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8594  }
    8595  }
    8596 
    8597  // Calculate padding at the beginning based on current offset.
    8598  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8599 
    8600  // Calculate required margin at the end.
    8601  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8602 
    8603  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8604  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8605  {
    8606  return false;
    8607  }
    8608 
    8609  // Check next suballocations for BufferImageGranularity conflicts.
    8610  // If conflict exists, allocation cannot be made here.
    8611  if(bufferImageGranularity > 1)
    8612  {
    8613  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8614  ++nextSuballocItem;
    8615  while(nextSuballocItem != m_Suballocations.cend())
    8616  {
    8617  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8618  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8619  {
    8620  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8621  {
    8622  return false;
    8623  }
    8624  }
    8625  else
    8626  {
    8627  // Already on next page.
    8628  break;
    8629  }
    8630  ++nextSuballocItem;
    8631  }
    8632  }
    8633  }
    8634 
    8635  // All tests passed: Success. pOffset is already filled.
    8636  return true;
    8637 }
    8638 
    8639 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8640 {
    8641  VMA_ASSERT(item != m_Suballocations.end());
    8642  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8643 
    8644  VmaSuballocationList::iterator nextItem = item;
    8645  ++nextItem;
    8646  VMA_ASSERT(nextItem != m_Suballocations.end());
    8647  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8648 
    8649  item->size += nextItem->size;
    8650  --m_FreeCount;
    8651  m_Suballocations.erase(nextItem);
    8652 }
    8653 
    8654 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8655 {
    8656  // Change this suballocation to be marked as free.
    8657  VmaSuballocation& suballoc = *suballocItem;
    8658  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8659  suballoc.hAllocation = VK_NULL_HANDLE;
    8660 
    8661  // Update totals.
    8662  ++m_FreeCount;
    8663  m_SumFreeSize += suballoc.size;
    8664 
    8665  // Merge with previous and/or next suballocation if it's also free.
    8666  bool mergeWithNext = false;
    8667  bool mergeWithPrev = false;
    8668 
    8669  VmaSuballocationList::iterator nextItem = suballocItem;
    8670  ++nextItem;
    8671  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8672  {
    8673  mergeWithNext = true;
    8674  }
    8675 
    8676  VmaSuballocationList::iterator prevItem = suballocItem;
    8677  if(suballocItem != m_Suballocations.begin())
    8678  {
    8679  --prevItem;
    8680  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8681  {
    8682  mergeWithPrev = true;
    8683  }
    8684  }
    8685 
    8686  if(mergeWithNext)
    8687  {
    8688  UnregisterFreeSuballocation(nextItem);
    8689  MergeFreeWithNext(suballocItem);
    8690  }
    8691 
    8692  if(mergeWithPrev)
    8693  {
    8694  UnregisterFreeSuballocation(prevItem);
    8695  MergeFreeWithNext(prevItem);
    8696  RegisterFreeSuballocation(prevItem);
    8697  return prevItem;
    8698  }
    8699  else
    8700  {
    8701  RegisterFreeSuballocation(suballocItem);
    8702  return suballocItem;
    8703  }
    8704 }
    8705 
    8706 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8707 {
    8708  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8709  VMA_ASSERT(item->size > 0);
    8710 
    8711  // You may want to enable this validation at the beginning or at the end of
    8712  // this function, depending on what do you want to check.
    8713  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8714 
    8715  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8716  {
    8717  if(m_FreeSuballocationsBySize.empty())
    8718  {
    8719  m_FreeSuballocationsBySize.push_back(item);
    8720  }
    8721  else
    8722  {
    8723  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8724  }
    8725  }
    8726 
    8727  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8728 }
    8729 
    8730 
    8731 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8732 {
    8733  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8734  VMA_ASSERT(item->size > 0);
    8735 
    8736  // You may want to enable this validation at the beginning or at the end of
    8737  // this function, depending on what do you want to check.
    8738  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8739 
    8740  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8741  {
    8742  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8743  m_FreeSuballocationsBySize.data(),
    8744  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8745  item,
    8746  VmaSuballocationItemSizeLess());
    8747  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8748  index < m_FreeSuballocationsBySize.size();
    8749  ++index)
    8750  {
    8751  if(m_FreeSuballocationsBySize[index] == item)
    8752  {
    8753  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8754  return;
    8755  }
    8756  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8757  }
    8758  VMA_ASSERT(0 && "Not found.");
    8759  }
    8760 
    8761  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8762 }
    8763 
    8764 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8765  VkDeviceSize bufferImageGranularity,
    8766  VmaSuballocationType& inOutPrevSuballocType) const
    8767 {
    8768  if(bufferImageGranularity == 1 || IsEmpty())
    8769  {
    8770  return false;
    8771  }
    8772 
    8773  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8774  bool typeConflictFound = false;
    8775  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8776  it != m_Suballocations.cend();
    8777  ++it)
    8778  {
    8779  const VmaSuballocationType suballocType = it->type;
    8780  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8781  {
    8782  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8783  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8784  {
    8785  typeConflictFound = true;
    8786  }
    8787  inOutPrevSuballocType = suballocType;
    8788  }
    8789  }
    8790 
    8791  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8792 }
    8793 
    8795 // class VmaBlockMetadata_Linear
    8796 
    8797 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8798  VmaBlockMetadata(hAllocator),
    8799  m_SumFreeSize(0),
    8800  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8801  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8802  m_1stVectorIndex(0),
    8803  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8804  m_1stNullItemsBeginCount(0),
    8805  m_1stNullItemsMiddleCount(0),
    8806  m_2ndNullItemsCount(0)
    8807 {
    8808 }
    8809 
    8810 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8811 {
    8812 }
    8813 
    8814 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8815 {
    8816  VmaBlockMetadata::Init(size);
    8817  m_SumFreeSize = size;
    8818 }
    8819 
    8820 bool VmaBlockMetadata_Linear::Validate() const
    8821 {
    8822  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8823  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8824 
    8825  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8826  VMA_VALIDATE(!suballocations1st.empty() ||
    8827  suballocations2nd.empty() ||
    8828  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8829 
    8830  if(!suballocations1st.empty())
    8831  {
    8832  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8833  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8834  // Null item at the end should be just pop_back().
    8835  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8836  }
    8837  if(!suballocations2nd.empty())
    8838  {
    8839  // Null item at the end should be just pop_back().
    8840  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8841  }
    8842 
    8843  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8844  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8845 
    8846  VkDeviceSize sumUsedSize = 0;
    8847  const size_t suballoc1stCount = suballocations1st.size();
    8848  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8849 
    8850  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8851  {
    8852  const size_t suballoc2ndCount = suballocations2nd.size();
    8853  size_t nullItem2ndCount = 0;
    8854  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8855  {
    8856  const VmaSuballocation& suballoc = suballocations2nd[i];
    8857  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8858 
    8859  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8860  VMA_VALIDATE(suballoc.offset >= offset);
    8861 
    8862  if(!currFree)
    8863  {
    8864  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8865  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8866  sumUsedSize += suballoc.size;
    8867  }
    8868  else
    8869  {
    8870  ++nullItem2ndCount;
    8871  }
    8872 
    8873  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8874  }
    8875 
    8876  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8877  }
    8878 
    8879  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8880  {
    8881  const VmaSuballocation& suballoc = suballocations1st[i];
    8882  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8883  suballoc.hAllocation == VK_NULL_HANDLE);
    8884  }
    8885 
    8886  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8887 
    8888  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8889  {
    8890  const VmaSuballocation& suballoc = suballocations1st[i];
    8891  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8892 
    8893  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8894  VMA_VALIDATE(suballoc.offset >= offset);
    8895  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8896 
    8897  if(!currFree)
    8898  {
    8899  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8900  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8901  sumUsedSize += suballoc.size;
    8902  }
    8903  else
    8904  {
    8905  ++nullItem1stCount;
    8906  }
    8907 
    8908  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8909  }
    8910  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8911 
    8912  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8913  {
    8914  const size_t suballoc2ndCount = suballocations2nd.size();
    8915  size_t nullItem2ndCount = 0;
    8916  for(size_t i = suballoc2ndCount; i--; )
    8917  {
    8918  const VmaSuballocation& suballoc = suballocations2nd[i];
    8919  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8920 
    8921  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8922  VMA_VALIDATE(suballoc.offset >= offset);
    8923 
    8924  if(!currFree)
    8925  {
    8926  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8927  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8928  sumUsedSize += suballoc.size;
    8929  }
    8930  else
    8931  {
    8932  ++nullItem2ndCount;
    8933  }
    8934 
    8935  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8936  }
    8937 
    8938  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8939  }
    8940 
    8941  VMA_VALIDATE(offset <= GetSize());
    8942  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8943 
    8944  return true;
    8945 }
    8946 
    8947 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8948 {
    8949  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8950  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8951 }
    8952 
    8953 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8954 {
    8955  const VkDeviceSize size = GetSize();
    8956 
    8957  /*
    8958  We don't consider gaps inside allocation vectors with freed allocations because
    8959  they are not suitable for reuse in linear allocator. We consider only space that
    8960  is available for new allocations.
    8961  */
    8962  if(IsEmpty())
    8963  {
    8964  return size;
    8965  }
    8966 
    8967  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8968 
    8969  switch(m_2ndVectorMode)
    8970  {
    8971  case SECOND_VECTOR_EMPTY:
    8972  /*
    8973  Available space is after end of 1st, as well as before beginning of 1st (which
    8974  whould make it a ring buffer).
    8975  */
    8976  {
    8977  const size_t suballocations1stCount = suballocations1st.size();
    8978  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8979  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8980  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8981  return VMA_MAX(
    8982  firstSuballoc.offset,
    8983  size - (lastSuballoc.offset + lastSuballoc.size));
    8984  }
    8985  break;
    8986 
    8987  case SECOND_VECTOR_RING_BUFFER:
    8988  /*
    8989  Available space is only between end of 2nd and beginning of 1st.
    8990  */
    8991  {
    8992  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8993  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8994  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8995  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8996  }
    8997  break;
    8998 
    8999  case SECOND_VECTOR_DOUBLE_STACK:
    9000  /*
    9001  Available space is only between end of 1st and top of 2nd.
    9002  */
    9003  {
    9004  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9005  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9006  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9007  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9008  }
    9009  break;
    9010 
    9011  default:
    9012  VMA_ASSERT(0);
    9013  return 0;
    9014  }
    9015 }
    9016 
    9017 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9018 {
    9019  const VkDeviceSize size = GetSize();
    9020  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9021  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9022  const size_t suballoc1stCount = suballocations1st.size();
    9023  const size_t suballoc2ndCount = suballocations2nd.size();
    9024 
    9025  outInfo.blockCount = 1;
    9026  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9027  outInfo.unusedRangeCount = 0;
    9028  outInfo.usedBytes = 0;
    9029  outInfo.allocationSizeMin = UINT64_MAX;
    9030  outInfo.allocationSizeMax = 0;
    9031  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9032  outInfo.unusedRangeSizeMax = 0;
    9033 
    9034  VkDeviceSize lastOffset = 0;
    9035 
    9036  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9037  {
    9038  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9039  size_t nextAlloc2ndIndex = 0;
    9040  while(lastOffset < freeSpace2ndTo1stEnd)
    9041  {
    9042  // Find next non-null allocation or move nextAllocIndex to the end.
    9043  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9044  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9045  {
    9046  ++nextAlloc2ndIndex;
    9047  }
    9048 
    9049  // Found non-null allocation.
    9050  if(nextAlloc2ndIndex < suballoc2ndCount)
    9051  {
    9052  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9053 
    9054  // 1. Process free space before this allocation.
    9055  if(lastOffset < suballoc.offset)
    9056  {
    9057  // There is free space from lastOffset to suballoc.offset.
    9058  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9059  ++outInfo.unusedRangeCount;
    9060  outInfo.unusedBytes += unusedRangeSize;
    9061  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9062  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9063  }
    9064 
    9065  // 2. Process this allocation.
    9066  // There is allocation with suballoc.offset, suballoc.size.
    9067  outInfo.usedBytes += suballoc.size;
    9068  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9069  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9070 
    9071  // 3. Prepare for next iteration.
    9072  lastOffset = suballoc.offset + suballoc.size;
    9073  ++nextAlloc2ndIndex;
    9074  }
    9075  // We are at the end.
    9076  else
    9077  {
    9078  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9079  if(lastOffset < freeSpace2ndTo1stEnd)
    9080  {
    9081  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9082  ++outInfo.unusedRangeCount;
    9083  outInfo.unusedBytes += unusedRangeSize;
    9084  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9085  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9086  }
    9087 
    9088  // End of loop.
    9089  lastOffset = freeSpace2ndTo1stEnd;
    9090  }
    9091  }
    9092  }
    9093 
    9094  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9095  const VkDeviceSize freeSpace1stTo2ndEnd =
    9096  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9097  while(lastOffset < freeSpace1stTo2ndEnd)
    9098  {
    9099  // Find next non-null allocation or move nextAllocIndex to the end.
    9100  while(nextAlloc1stIndex < suballoc1stCount &&
    9101  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9102  {
    9103  ++nextAlloc1stIndex;
    9104  }
    9105 
    9106  // Found non-null allocation.
    9107  if(nextAlloc1stIndex < suballoc1stCount)
    9108  {
    9109  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9110 
    9111  // 1. Process free space before this allocation.
    9112  if(lastOffset < suballoc.offset)
    9113  {
    9114  // There is free space from lastOffset to suballoc.offset.
    9115  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9116  ++outInfo.unusedRangeCount;
    9117  outInfo.unusedBytes += unusedRangeSize;
    9118  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9119  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9120  }
    9121 
    9122  // 2. Process this allocation.
    9123  // There is allocation with suballoc.offset, suballoc.size.
    9124  outInfo.usedBytes += suballoc.size;
    9125  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9126  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9127 
    9128  // 3. Prepare for next iteration.
    9129  lastOffset = suballoc.offset + suballoc.size;
    9130  ++nextAlloc1stIndex;
    9131  }
    9132  // We are at the end.
    9133  else
    9134  {
    9135  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9136  if(lastOffset < freeSpace1stTo2ndEnd)
    9137  {
    9138  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9139  ++outInfo.unusedRangeCount;
    9140  outInfo.unusedBytes += unusedRangeSize;
    9141  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9142  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9143  }
    9144 
    9145  // End of loop.
    9146  lastOffset = freeSpace1stTo2ndEnd;
    9147  }
    9148  }
    9149 
    9150  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9151  {
    9152  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9153  while(lastOffset < size)
    9154  {
    9155  // Find next non-null allocation or move nextAllocIndex to the end.
    9156  while(nextAlloc2ndIndex != SIZE_MAX &&
    9157  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9158  {
    9159  --nextAlloc2ndIndex;
    9160  }
    9161 
    9162  // Found non-null allocation.
    9163  if(nextAlloc2ndIndex != SIZE_MAX)
    9164  {
    9165  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9166 
    9167  // 1. Process free space before this allocation.
    9168  if(lastOffset < suballoc.offset)
    9169  {
    9170  // There is free space from lastOffset to suballoc.offset.
    9171  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9172  ++outInfo.unusedRangeCount;
    9173  outInfo.unusedBytes += unusedRangeSize;
    9174  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9175  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9176  }
    9177 
    9178  // 2. Process this allocation.
    9179  // There is allocation with suballoc.offset, suballoc.size.
    9180  outInfo.usedBytes += suballoc.size;
    9181  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9182  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9183 
    9184  // 3. Prepare for next iteration.
    9185  lastOffset = suballoc.offset + suballoc.size;
    9186  --nextAlloc2ndIndex;
    9187  }
    9188  // We are at the end.
    9189  else
    9190  {
    9191  // There is free space from lastOffset to size.
    9192  if(lastOffset < size)
    9193  {
    9194  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9195  ++outInfo.unusedRangeCount;
    9196  outInfo.unusedBytes += unusedRangeSize;
    9197  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9198  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9199  }
    9200 
    9201  // End of loop.
    9202  lastOffset = size;
    9203  }
    9204  }
    9205  }
    9206 
    9207  outInfo.unusedBytes = size - outInfo.usedBytes;
    9208 }
    9209 
    9210 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9211 {
    9212  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9213  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9214  const VkDeviceSize size = GetSize();
    9215  const size_t suballoc1stCount = suballocations1st.size();
    9216  const size_t suballoc2ndCount = suballocations2nd.size();
    9217 
    9218  inoutStats.size += size;
    9219 
    9220  VkDeviceSize lastOffset = 0;
    9221 
    9222  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9223  {
    9224  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9225  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9226  while(lastOffset < freeSpace2ndTo1stEnd)
    9227  {
    9228  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9229  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9230  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9231  {
    9232  ++nextAlloc2ndIndex;
    9233  }
    9234 
    9235  // Found non-null allocation.
    9236  if(nextAlloc2ndIndex < suballoc2ndCount)
    9237  {
    9238  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9239 
    9240  // 1. Process free space before this allocation.
    9241  if(lastOffset < suballoc.offset)
    9242  {
    9243  // There is free space from lastOffset to suballoc.offset.
    9244  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9245  inoutStats.unusedSize += unusedRangeSize;
    9246  ++inoutStats.unusedRangeCount;
    9247  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9248  }
    9249 
    9250  // 2. Process this allocation.
    9251  // There is allocation with suballoc.offset, suballoc.size.
    9252  ++inoutStats.allocationCount;
    9253 
    9254  // 3. Prepare for next iteration.
    9255  lastOffset = suballoc.offset + suballoc.size;
    9256  ++nextAlloc2ndIndex;
    9257  }
    9258  // We are at the end.
    9259  else
    9260  {
    9261  if(lastOffset < freeSpace2ndTo1stEnd)
    9262  {
    9263  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9264  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9265  inoutStats.unusedSize += unusedRangeSize;
    9266  ++inoutStats.unusedRangeCount;
    9267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9268  }
    9269 
    9270  // End of loop.
    9271  lastOffset = freeSpace2ndTo1stEnd;
    9272  }
    9273  }
    9274  }
    9275 
    9276  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9277  const VkDeviceSize freeSpace1stTo2ndEnd =
    9278  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9279  while(lastOffset < freeSpace1stTo2ndEnd)
    9280  {
    9281  // Find next non-null allocation or move nextAllocIndex to the end.
    9282  while(nextAlloc1stIndex < suballoc1stCount &&
    9283  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9284  {
    9285  ++nextAlloc1stIndex;
    9286  }
    9287 
    9288  // Found non-null allocation.
    9289  if(nextAlloc1stIndex < suballoc1stCount)
    9290  {
    9291  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9292 
    9293  // 1. Process free space before this allocation.
    9294  if(lastOffset < suballoc.offset)
    9295  {
    9296  // There is free space from lastOffset to suballoc.offset.
    9297  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9298  inoutStats.unusedSize += unusedRangeSize;
    9299  ++inoutStats.unusedRangeCount;
    9300  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9301  }
    9302 
    9303  // 2. Process this allocation.
    9304  // There is allocation with suballoc.offset, suballoc.size.
    9305  ++inoutStats.allocationCount;
    9306 
    9307  // 3. Prepare for next iteration.
    9308  lastOffset = suballoc.offset + suballoc.size;
    9309  ++nextAlloc1stIndex;
    9310  }
    9311  // We are at the end.
    9312  else
    9313  {
    9314  if(lastOffset < freeSpace1stTo2ndEnd)
    9315  {
    9316  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9317  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9318  inoutStats.unusedSize += unusedRangeSize;
    9319  ++inoutStats.unusedRangeCount;
    9320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9321  }
    9322 
    9323  // End of loop.
    9324  lastOffset = freeSpace1stTo2ndEnd;
    9325  }
    9326  }
    9327 
    9328  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9329  {
    9330  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9331  while(lastOffset < size)
    9332  {
    9333  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9334  while(nextAlloc2ndIndex != SIZE_MAX &&
    9335  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9336  {
    9337  --nextAlloc2ndIndex;
    9338  }
    9339 
    9340  // Found non-null allocation.
    9341  if(nextAlloc2ndIndex != SIZE_MAX)
    9342  {
    9343  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9344 
    9345  // 1. Process free space before this allocation.
    9346  if(lastOffset < suballoc.offset)
    9347  {
    9348  // There is free space from lastOffset to suballoc.offset.
    9349  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9350  inoutStats.unusedSize += unusedRangeSize;
    9351  ++inoutStats.unusedRangeCount;
    9352  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9353  }
    9354 
    9355  // 2. Process this allocation.
    9356  // There is allocation with suballoc.offset, suballoc.size.
    9357  ++inoutStats.allocationCount;
    9358 
    9359  // 3. Prepare for next iteration.
    9360  lastOffset = suballoc.offset + suballoc.size;
    9361  --nextAlloc2ndIndex;
    9362  }
    9363  // We are at the end.
    9364  else
    9365  {
    9366  if(lastOffset < size)
    9367  {
    9368  // There is free space from lastOffset to size.
    9369  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9370  inoutStats.unusedSize += unusedRangeSize;
    9371  ++inoutStats.unusedRangeCount;
    9372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9373  }
    9374 
    9375  // End of loop.
    9376  lastOffset = size;
    9377  }
    9378  }
    9379  }
    9380 }
    9381 
    9382 #if VMA_STATS_STRING_ENABLED
    9383 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9384 {
    9385  const VkDeviceSize size = GetSize();
    9386  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9387  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9388  const size_t suballoc1stCount = suballocations1st.size();
    9389  const size_t suballoc2ndCount = suballocations2nd.size();
    9390 
    9391  // FIRST PASS
    9392 
    9393  size_t unusedRangeCount = 0;
    9394  VkDeviceSize usedBytes = 0;
    9395 
    9396  VkDeviceSize lastOffset = 0;
    9397 
    9398  size_t alloc2ndCount = 0;
    9399  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9400  {
    9401  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9402  size_t nextAlloc2ndIndex = 0;
    9403  while(lastOffset < freeSpace2ndTo1stEnd)
    9404  {
    9405  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9406  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9407  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9408  {
    9409  ++nextAlloc2ndIndex;
    9410  }
    9411 
    9412  // Found non-null allocation.
    9413  if(nextAlloc2ndIndex < suballoc2ndCount)
    9414  {
    9415  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9416 
    9417  // 1. Process free space before this allocation.
    9418  if(lastOffset < suballoc.offset)
    9419  {
    9420  // There is free space from lastOffset to suballoc.offset.
    9421  ++unusedRangeCount;
    9422  }
    9423 
    9424  // 2. Process this allocation.
    9425  // There is allocation with suballoc.offset, suballoc.size.
    9426  ++alloc2ndCount;
    9427  usedBytes += suballoc.size;
    9428 
    9429  // 3. Prepare for next iteration.
    9430  lastOffset = suballoc.offset + suballoc.size;
    9431  ++nextAlloc2ndIndex;
    9432  }
    9433  // We are at the end.
    9434  else
    9435  {
    9436  if(lastOffset < freeSpace2ndTo1stEnd)
    9437  {
    9438  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9439  ++unusedRangeCount;
    9440  }
    9441 
    9442  // End of loop.
    9443  lastOffset = freeSpace2ndTo1stEnd;
    9444  }
    9445  }
    9446  }
    9447 
    9448  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9449  size_t alloc1stCount = 0;
    9450  const VkDeviceSize freeSpace1stTo2ndEnd =
    9451  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9452  while(lastOffset < freeSpace1stTo2ndEnd)
    9453  {
    9454  // Find next non-null allocation or move nextAllocIndex to the end.
    9455  while(nextAlloc1stIndex < suballoc1stCount &&
    9456  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9457  {
    9458  ++nextAlloc1stIndex;
    9459  }
    9460 
    9461  // Found non-null allocation.
    9462  if(nextAlloc1stIndex < suballoc1stCount)
    9463  {
    9464  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9465 
    9466  // 1. Process free space before this allocation.
    9467  if(lastOffset < suballoc.offset)
    9468  {
    9469  // There is free space from lastOffset to suballoc.offset.
    9470  ++unusedRangeCount;
    9471  }
    9472 
    9473  // 2. Process this allocation.
    9474  // There is allocation with suballoc.offset, suballoc.size.
    9475  ++alloc1stCount;
    9476  usedBytes += suballoc.size;
    9477 
    9478  // 3. Prepare for next iteration.
    9479  lastOffset = suballoc.offset + suballoc.size;
    9480  ++nextAlloc1stIndex;
    9481  }
    9482  // We are at the end.
    9483  else
    9484  {
    9485  if(lastOffset < size)
    9486  {
    9487  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9488  ++unusedRangeCount;
    9489  }
    9490 
    9491  // End of loop.
    9492  lastOffset = freeSpace1stTo2ndEnd;
    9493  }
    9494  }
    9495 
    9496  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9497  {
    9498  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9499  while(lastOffset < size)
    9500  {
    9501  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9502  while(nextAlloc2ndIndex != SIZE_MAX &&
    9503  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9504  {
    9505  --nextAlloc2ndIndex;
    9506  }
    9507 
    9508  // Found non-null allocation.
    9509  if(nextAlloc2ndIndex != SIZE_MAX)
    9510  {
    9511  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9512 
    9513  // 1. Process free space before this allocation.
    9514  if(lastOffset < suballoc.offset)
    9515  {
    9516  // There is free space from lastOffset to suballoc.offset.
    9517  ++unusedRangeCount;
    9518  }
    9519 
    9520  // 2. Process this allocation.
    9521  // There is allocation with suballoc.offset, suballoc.size.
    9522  ++alloc2ndCount;
    9523  usedBytes += suballoc.size;
    9524 
    9525  // 3. Prepare for next iteration.
    9526  lastOffset = suballoc.offset + suballoc.size;
    9527  --nextAlloc2ndIndex;
    9528  }
    9529  // We are at the end.
    9530  else
    9531  {
    9532  if(lastOffset < size)
    9533  {
    9534  // There is free space from lastOffset to size.
    9535  ++unusedRangeCount;
    9536  }
    9537 
    9538  // End of loop.
    9539  lastOffset = size;
    9540  }
    9541  }
    9542  }
    9543 
    9544  const VkDeviceSize unusedBytes = size - usedBytes;
    9545  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9546 
    9547  // SECOND PASS
    9548  lastOffset = 0;
    9549 
    9550  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9551  {
    9552  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9553  size_t nextAlloc2ndIndex = 0;
    9554  while(lastOffset < freeSpace2ndTo1stEnd)
    9555  {
    9556  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9557  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9558  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9559  {
    9560  ++nextAlloc2ndIndex;
    9561  }
    9562 
    9563  // Found non-null allocation.
    9564  if(nextAlloc2ndIndex < suballoc2ndCount)
    9565  {
    9566  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9567 
    9568  // 1. Process free space before this allocation.
    9569  if(lastOffset < suballoc.offset)
    9570  {
    9571  // There is free space from lastOffset to suballoc.offset.
    9572  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9573  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9574  }
    9575 
    9576  // 2. Process this allocation.
    9577  // There is allocation with suballoc.offset, suballoc.size.
    9578  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9579 
    9580  // 3. Prepare for next iteration.
    9581  lastOffset = suballoc.offset + suballoc.size;
    9582  ++nextAlloc2ndIndex;
    9583  }
    9584  // We are at the end.
    9585  else
    9586  {
    9587  if(lastOffset < freeSpace2ndTo1stEnd)
    9588  {
    9589  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9590  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9592  }
    9593 
    9594  // End of loop.
    9595  lastOffset = freeSpace2ndTo1stEnd;
    9596  }
    9597  }
    9598  }
    9599 
    9600  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9601  while(lastOffset < freeSpace1stTo2ndEnd)
    9602  {
    9603  // Find next non-null allocation or move nextAllocIndex to the end.
    9604  while(nextAlloc1stIndex < suballoc1stCount &&
    9605  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9606  {
    9607  ++nextAlloc1stIndex;
    9608  }
    9609 
    9610  // Found non-null allocation.
    9611  if(nextAlloc1stIndex < suballoc1stCount)
    9612  {
    9613  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9614 
    9615  // 1. Process free space before this allocation.
    9616  if(lastOffset < suballoc.offset)
    9617  {
    9618  // There is free space from lastOffset to suballoc.offset.
    9619  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9620  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9621  }
    9622 
    9623  // 2. Process this allocation.
    9624  // There is allocation with suballoc.offset, suballoc.size.
    9625  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9626 
    9627  // 3. Prepare for next iteration.
    9628  lastOffset = suballoc.offset + suballoc.size;
    9629  ++nextAlloc1stIndex;
    9630  }
    9631  // We are at the end.
    9632  else
    9633  {
    9634  if(lastOffset < freeSpace1stTo2ndEnd)
    9635  {
    9636  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9637  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9639  }
    9640 
    9641  // End of loop.
    9642  lastOffset = freeSpace1stTo2ndEnd;
    9643  }
    9644  }
    9645 
    9646  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9647  {
    9648  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9649  while(lastOffset < size)
    9650  {
    9651  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9652  while(nextAlloc2ndIndex != SIZE_MAX &&
    9653  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9654  {
    9655  --nextAlloc2ndIndex;
    9656  }
    9657 
    9658  // Found non-null allocation.
    9659  if(nextAlloc2ndIndex != SIZE_MAX)
    9660  {
    9661  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9662 
    9663  // 1. Process free space before this allocation.
    9664  if(lastOffset < suballoc.offset)
    9665  {
    9666  // There is free space from lastOffset to suballoc.offset.
    9667  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9668  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9669  }
    9670 
    9671  // 2. Process this allocation.
    9672  // There is allocation with suballoc.offset, suballoc.size.
    9673  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9674 
    9675  // 3. Prepare for next iteration.
    9676  lastOffset = suballoc.offset + suballoc.size;
    9677  --nextAlloc2ndIndex;
    9678  }
    9679  // We are at the end.
    9680  else
    9681  {
    9682  if(lastOffset < size)
    9683  {
    9684  // There is free space from lastOffset to size.
    9685  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9687  }
    9688 
    9689  // End of loop.
    9690  lastOffset = size;
    9691  }
    9692  }
    9693  }
    9694 
    9695  PrintDetailedMap_End(json);
    9696 }
    9697 #endif // #if VMA_STATS_STRING_ENABLED
    9698 
    9699 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9700  uint32_t currentFrameIndex,
    9701  uint32_t frameInUseCount,
    9702  VkDeviceSize bufferImageGranularity,
    9703  VkDeviceSize allocSize,
    9704  VkDeviceSize allocAlignment,
    9705  bool upperAddress,
    9706  VmaSuballocationType allocType,
    9707  bool canMakeOtherLost,
    9708  uint32_t strategy,
    9709  VmaAllocationRequest* pAllocationRequest)
    9710 {
    9711  VMA_ASSERT(allocSize > 0);
    9712  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9713  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9714  VMA_HEAVY_ASSERT(Validate());
    9715  return upperAddress ?
    9716  CreateAllocationRequest_UpperAddress(
    9717  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9718  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9719  CreateAllocationRequest_LowerAddress(
    9720  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9721  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9722 }
    9723 
    9724 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9725  uint32_t currentFrameIndex,
    9726  uint32_t frameInUseCount,
    9727  VkDeviceSize bufferImageGranularity,
    9728  VkDeviceSize allocSize,
    9729  VkDeviceSize allocAlignment,
    9730  VmaSuballocationType allocType,
    9731  bool canMakeOtherLost,
    9732  uint32_t strategy,
    9733  VmaAllocationRequest* pAllocationRequest)
    9734 {
    9735  const VkDeviceSize size = GetSize();
    9736  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9737  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9738 
    9739  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9740  {
    9741  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9742  return false;
    9743  }
    9744 
    9745  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9746  if(allocSize > size)
    9747  {
    9748  return false;
    9749  }
    9750  VkDeviceSize resultBaseOffset = size - allocSize;
    9751  if(!suballocations2nd.empty())
    9752  {
    9753  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9754  resultBaseOffset = lastSuballoc.offset - allocSize;
    9755  if(allocSize > lastSuballoc.offset)
    9756  {
    9757  return false;
    9758  }
    9759  }
    9760 
    9761  // Start from offset equal to end of free space.
    9762  VkDeviceSize resultOffset = resultBaseOffset;
    9763 
    9764  // Apply VMA_DEBUG_MARGIN at the end.
    9765  if(VMA_DEBUG_MARGIN > 0)
    9766  {
    9767  if(resultOffset < VMA_DEBUG_MARGIN)
    9768  {
    9769  return false;
    9770  }
    9771  resultOffset -= VMA_DEBUG_MARGIN;
    9772  }
    9773 
    9774  // Apply alignment.
    9775  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9776 
    9777  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9778  // Make bigger alignment if necessary.
    9779  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9780  {
    9781  bool bufferImageGranularityConflict = false;
    9782  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9783  {
    9784  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9785  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9786  {
    9787  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9788  {
    9789  bufferImageGranularityConflict = true;
    9790  break;
    9791  }
    9792  }
    9793  else
    9794  // Already on previous page.
    9795  break;
    9796  }
    9797  if(bufferImageGranularityConflict)
    9798  {
    9799  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9800  }
    9801  }
    9802 
    9803  // There is enough free space.
    9804  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9805  suballocations1st.back().offset + suballocations1st.back().size :
    9806  0;
    9807  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9808  {
    9809  // Check previous suballocations for BufferImageGranularity conflicts.
    9810  // If conflict exists, allocation cannot be made here.
    9811  if(bufferImageGranularity > 1)
    9812  {
    9813  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9814  {
    9815  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9816  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9817  {
    9818  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9819  {
    9820  return false;
    9821  }
    9822  }
    9823  else
    9824  {
    9825  // Already on next page.
    9826  break;
    9827  }
    9828  }
    9829  }
    9830 
    9831  // All tests passed: Success.
    9832  pAllocationRequest->offset = resultOffset;
    9833  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9834  pAllocationRequest->sumItemSize = 0;
    9835  // pAllocationRequest->item unused.
    9836  pAllocationRequest->itemsToMakeLostCount = 0;
    9837  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9838  return true;
    9839  }
    9840 
    9841  return false;
    9842 }
    9843 
    9844 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9845  uint32_t currentFrameIndex,
    9846  uint32_t frameInUseCount,
    9847  VkDeviceSize bufferImageGranularity,
    9848  VkDeviceSize allocSize,
    9849  VkDeviceSize allocAlignment,
    9850  VmaSuballocationType allocType,
    9851  bool canMakeOtherLost,
    9852  uint32_t strategy,
    9853  VmaAllocationRequest* pAllocationRequest)
    9854 {
    9855  const VkDeviceSize size = GetSize();
    9856  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9857  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9858 
    9859  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9860  {
    9861  // Try to allocate at the end of 1st vector.
    9862 
    9863  VkDeviceSize resultBaseOffset = 0;
    9864  if(!suballocations1st.empty())
    9865  {
    9866  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9867  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9868  }
    9869 
    9870  // Start from offset equal to beginning of free space.
    9871  VkDeviceSize resultOffset = resultBaseOffset;
    9872 
    9873  // Apply VMA_DEBUG_MARGIN at the beginning.
    9874  if(VMA_DEBUG_MARGIN > 0)
    9875  {
    9876  resultOffset += VMA_DEBUG_MARGIN;
    9877  }
    9878 
    9879  // Apply alignment.
    9880  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9881 
    9882  // Check previous suballocations for BufferImageGranularity conflicts.
    9883  // Make bigger alignment if necessary.
    9884  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9885  {
    9886  bool bufferImageGranularityConflict = false;
    9887  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9888  {
    9889  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9890  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9891  {
    9892  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9893  {
    9894  bufferImageGranularityConflict = true;
    9895  break;
    9896  }
    9897  }
    9898  else
    9899  // Already on previous page.
    9900  break;
    9901  }
    9902  if(bufferImageGranularityConflict)
    9903  {
    9904  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9905  }
    9906  }
    9907 
    9908  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9909  suballocations2nd.back().offset : size;
    9910 
    9911  // There is enough free space at the end after alignment.
    9912  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9913  {
    9914  // Check next suballocations for BufferImageGranularity conflicts.
    9915  // If conflict exists, allocation cannot be made here.
    9916  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9917  {
    9918  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9919  {
    9920  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9921  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9922  {
    9923  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9924  {
    9925  return false;
    9926  }
    9927  }
    9928  else
    9929  {
    9930  // Already on previous page.
    9931  break;
    9932  }
    9933  }
    9934  }
    9935 
    9936  // All tests passed: Success.
    9937  pAllocationRequest->offset = resultOffset;
    9938  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9939  pAllocationRequest->sumItemSize = 0;
    9940  // pAllocationRequest->item, customData unused.
    9941  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9942  pAllocationRequest->itemsToMakeLostCount = 0;
    9943  return true;
    9944  }
    9945  }
    9946 
    9947  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9948  // beginning of 1st vector as the end of free space.
    9949  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9950  {
    9951  VMA_ASSERT(!suballocations1st.empty());
    9952 
    9953  VkDeviceSize resultBaseOffset = 0;
    9954  if(!suballocations2nd.empty())
    9955  {
    9956  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9957  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9958  }
    9959 
    9960  // Start from offset equal to beginning of free space.
    9961  VkDeviceSize resultOffset = resultBaseOffset;
    9962 
    9963  // Apply VMA_DEBUG_MARGIN at the beginning.
    9964  if(VMA_DEBUG_MARGIN > 0)
    9965  {
    9966  resultOffset += VMA_DEBUG_MARGIN;
    9967  }
    9968 
    9969  // Apply alignment.
    9970  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9971 
    9972  // Check previous suballocations for BufferImageGranularity conflicts.
    9973  // Make bigger alignment if necessary.
    9974  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9975  {
    9976  bool bufferImageGranularityConflict = false;
    9977  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9978  {
    9979  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9980  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9981  {
    9982  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9983  {
    9984  bufferImageGranularityConflict = true;
    9985  break;
    9986  }
    9987  }
    9988  else
    9989  // Already on previous page.
    9990  break;
    9991  }
    9992  if(bufferImageGranularityConflict)
    9993  {
    9994  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9995  }
    9996  }
    9997 
    9998  pAllocationRequest->itemsToMakeLostCount = 0;
    9999  pAllocationRequest->sumItemSize = 0;
    10000  size_t index1st = m_1stNullItemsBeginCount;
    10001 
    10002  if(canMakeOtherLost)
    10003  {
    10004  while(index1st < suballocations1st.size() &&
    10005  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10006  {
    10007  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10008  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10009  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10010  {
    10011  // No problem.
    10012  }
    10013  else
    10014  {
    10015  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10016  if(suballoc.hAllocation->CanBecomeLost() &&
    10017  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10018  {
    10019  ++pAllocationRequest->itemsToMakeLostCount;
    10020  pAllocationRequest->sumItemSize += suballoc.size;
    10021  }
    10022  else
    10023  {
    10024  return false;
    10025  }
    10026  }
    10027  ++index1st;
    10028  }
    10029 
    10030  // Check next suballocations for BufferImageGranularity conflicts.
    10031  // If conflict exists, we must mark more allocations lost or fail.
    10032  if(bufferImageGranularity > 1)
    10033  {
    10034  while(index1st < suballocations1st.size())
    10035  {
    10036  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10037  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10038  {
    10039  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10040  {
    10041  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10042  if(suballoc.hAllocation->CanBecomeLost() &&
    10043  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10044  {
    10045  ++pAllocationRequest->itemsToMakeLostCount;
    10046  pAllocationRequest->sumItemSize += suballoc.size;
    10047  }
    10048  else
    10049  {
    10050  return false;
    10051  }
    10052  }
    10053  }
    10054  else
    10055  {
    10056  // Already on next page.
    10057  break;
    10058  }
    10059  ++index1st;
    10060  }
    10061  }
    10062 
    10063  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10064  if(index1st == suballocations1st.size() &&
    10065  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10066  {
    10067  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10068  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10069  }
    10070  }
    10071 
    10072  // There is enough free space at the end after alignment.
    10073  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10074  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10075  {
    10076  // Check next suballocations for BufferImageGranularity conflicts.
    10077  // If conflict exists, allocation cannot be made here.
    10078  if(bufferImageGranularity > 1)
    10079  {
    10080  for(size_t nextSuballocIndex = index1st;
    10081  nextSuballocIndex < suballocations1st.size();
    10082  nextSuballocIndex++)
    10083  {
    10084  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10085  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10086  {
    10087  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10088  {
    10089  return false;
    10090  }
    10091  }
    10092  else
    10093  {
    10094  // Already on next page.
    10095  break;
    10096  }
    10097  }
    10098  }
    10099 
    10100  // All tests passed: Success.
    10101  pAllocationRequest->offset = resultOffset;
    10102  pAllocationRequest->sumFreeSize =
    10103  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10104  - resultBaseOffset
    10105  - pAllocationRequest->sumItemSize;
    10106  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10107  // pAllocationRequest->item, customData unused.
    10108  return true;
    10109  }
    10110  }
    10111 
    10112  return false;
    10113 }
    10114 
    10115 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10116  uint32_t currentFrameIndex,
    10117  uint32_t frameInUseCount,
    10118  VmaAllocationRequest* pAllocationRequest)
    10119 {
    10120  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10121  {
    10122  return true;
    10123  }
    10124 
    10125  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10126 
    10127  // We always start from 1st.
    10128  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10129  size_t index = m_1stNullItemsBeginCount;
    10130  size_t madeLostCount = 0;
    10131  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10132  {
    10133  if(index == suballocations->size())
    10134  {
    10135  index = 0;
    10136  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10137  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10138  {
    10139  suballocations = &AccessSuballocations2nd();
    10140  }
    10141  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10142  // suballocations continues pointing at AccessSuballocations1st().
    10143  VMA_ASSERT(!suballocations->empty());
    10144  }
    10145  VmaSuballocation& suballoc = (*suballocations)[index];
    10146  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10147  {
    10148  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10149  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10150  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10151  {
    10152  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10153  suballoc.hAllocation = VK_NULL_HANDLE;
    10154  m_SumFreeSize += suballoc.size;
    10155  if(suballocations == &AccessSuballocations1st())
    10156  {
    10157  ++m_1stNullItemsMiddleCount;
    10158  }
    10159  else
    10160  {
    10161  ++m_2ndNullItemsCount;
    10162  }
    10163  ++madeLostCount;
    10164  }
    10165  else
    10166  {
    10167  return false;
    10168  }
    10169  }
    10170  ++index;
    10171  }
    10172 
    10173  CleanupAfterFree();
    10174  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10175 
    10176  return true;
    10177 }
    10178 
    10179 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10180 {
    10181  uint32_t lostAllocationCount = 0;
    10182 
    10183  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10184  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10185  {
    10186  VmaSuballocation& suballoc = suballocations1st[i];
    10187  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10188  suballoc.hAllocation->CanBecomeLost() &&
    10189  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10190  {
    10191  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10192  suballoc.hAllocation = VK_NULL_HANDLE;
    10193  ++m_1stNullItemsMiddleCount;
    10194  m_SumFreeSize += suballoc.size;
    10195  ++lostAllocationCount;
    10196  }
    10197  }
    10198 
    10199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10201  {
    10202  VmaSuballocation& suballoc = suballocations2nd[i];
    10203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10204  suballoc.hAllocation->CanBecomeLost() &&
    10205  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10206  {
    10207  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10208  suballoc.hAllocation = VK_NULL_HANDLE;
    10209  ++m_2ndNullItemsCount;
    10210  m_SumFreeSize += suballoc.size;
    10211  ++lostAllocationCount;
    10212  }
    10213  }
    10214 
    10215  if(lostAllocationCount)
    10216  {
    10217  CleanupAfterFree();
    10218  }
    10219 
    10220  return lostAllocationCount;
    10221 }
    10222 
    10223 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10224 {
    10225  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10226  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10227  {
    10228  const VmaSuballocation& suballoc = suballocations1st[i];
    10229  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10230  {
    10231  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10232  {
    10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10234  return VK_ERROR_VALIDATION_FAILED_EXT;
    10235  }
    10236  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10237  {
    10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10239  return VK_ERROR_VALIDATION_FAILED_EXT;
    10240  }
    10241  }
    10242  }
    10243 
    10244  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10245  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10246  {
    10247  const VmaSuballocation& suballoc = suballocations2nd[i];
    10248  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10249  {
    10250  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10251  {
    10252  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10253  return VK_ERROR_VALIDATION_FAILED_EXT;
    10254  }
    10255  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10256  {
    10257  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10258  return VK_ERROR_VALIDATION_FAILED_EXT;
    10259  }
    10260  }
    10261  }
    10262 
    10263  return VK_SUCCESS;
    10264 }
    10265 
    10266 void VmaBlockMetadata_Linear::Alloc(
    10267  const VmaAllocationRequest& request,
    10268  VmaSuballocationType type,
    10269  VkDeviceSize allocSize,
    10270  VmaAllocation hAllocation)
    10271 {
    10272  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10273 
    10274  switch(request.type)
    10275  {
    10276  case VmaAllocationRequestType::UpperAddress:
    10277  {
    10278  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10279  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10280  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10281  suballocations2nd.push_back(newSuballoc);
    10282  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10283  }
    10284  break;
    10285  case VmaAllocationRequestType::EndOf1st:
    10286  {
    10287  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10288 
    10289  VMA_ASSERT(suballocations1st.empty() ||
    10290  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10291  // Check if it fits before the end of the block.
    10292  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10293 
    10294  suballocations1st.push_back(newSuballoc);
    10295  }
    10296  break;
    10297  case VmaAllocationRequestType::EndOf2nd:
    10298  {
    10299  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10300  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10301  VMA_ASSERT(!suballocations1st.empty() &&
    10302  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10303  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10304 
    10305  switch(m_2ndVectorMode)
    10306  {
    10307  case SECOND_VECTOR_EMPTY:
    10308  // First allocation from second part ring buffer.
    10309  VMA_ASSERT(suballocations2nd.empty());
    10310  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10311  break;
    10312  case SECOND_VECTOR_RING_BUFFER:
    10313  // 2-part ring buffer is already started.
    10314  VMA_ASSERT(!suballocations2nd.empty());
    10315  break;
    10316  case SECOND_VECTOR_DOUBLE_STACK:
    10317  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10318  break;
    10319  default:
    10320  VMA_ASSERT(0);
    10321  }
    10322 
    10323  suballocations2nd.push_back(newSuballoc);
    10324  }
    10325  break;
    10326  default:
    10327  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10328  }
    10329 
    10330  m_SumFreeSize -= newSuballoc.size;
    10331 }
    10332 
    10333 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10334 {
    10335  FreeAtOffset(allocation->GetOffset());
    10336 }
    10337 
    10338 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10339 {
    10340  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10341  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10342 
    10343  if(!suballocations1st.empty())
    10344  {
    10345  // First allocation: Mark it as next empty at the beginning.
    10346  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10347  if(firstSuballoc.offset == offset)
    10348  {
    10349  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10350  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10351  m_SumFreeSize += firstSuballoc.size;
    10352  ++m_1stNullItemsBeginCount;
    10353  CleanupAfterFree();
    10354  return;
    10355  }
    10356  }
    10357 
    10358  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10359  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10360  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10361  {
    10362  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10363  if(lastSuballoc.offset == offset)
    10364  {
    10365  m_SumFreeSize += lastSuballoc.size;
    10366  suballocations2nd.pop_back();
    10367  CleanupAfterFree();
    10368  return;
    10369  }
    10370  }
    10371  // Last allocation in 1st vector.
    10372  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10373  {
    10374  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10375  if(lastSuballoc.offset == offset)
    10376  {
    10377  m_SumFreeSize += lastSuballoc.size;
    10378  suballocations1st.pop_back();
    10379  CleanupAfterFree();
    10380  return;
    10381  }
    10382  }
    10383 
    10384  // Item from the middle of 1st vector.
    10385  {
    10386  VmaSuballocation refSuballoc;
    10387  refSuballoc.offset = offset;
    10388  // Rest of members stays uninitialized intentionally for better performance.
    10389  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10390  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10391  suballocations1st.end(),
    10392  refSuballoc,
    10393  VmaSuballocationOffsetLess());
    10394  if(it != suballocations1st.end())
    10395  {
    10396  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10397  it->hAllocation = VK_NULL_HANDLE;
    10398  ++m_1stNullItemsMiddleCount;
    10399  m_SumFreeSize += it->size;
    10400  CleanupAfterFree();
    10401  return;
    10402  }
    10403  }
    10404 
    10405  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10406  {
    10407  // Item from the middle of 2nd vector.
    10408  VmaSuballocation refSuballoc;
    10409  refSuballoc.offset = offset;
    10410  // Rest of members stays uninitialized intentionally for better performance.
    10411  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10412  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10413  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10414  if(it != suballocations2nd.end())
    10415  {
    10416  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10417  it->hAllocation = VK_NULL_HANDLE;
    10418  ++m_2ndNullItemsCount;
    10419  m_SumFreeSize += it->size;
    10420  CleanupAfterFree();
    10421  return;
    10422  }
    10423  }
    10424 
    10425  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10426 }
    10427 
    10428 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10429 {
    10430  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10431  const size_t suballocCount = AccessSuballocations1st().size();
    10432  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10433 }
    10434 
    10435 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10436 {
    10437  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10438  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10439 
    10440  if(IsEmpty())
    10441  {
    10442  suballocations1st.clear();
    10443  suballocations2nd.clear();
    10444  m_1stNullItemsBeginCount = 0;
    10445  m_1stNullItemsMiddleCount = 0;
    10446  m_2ndNullItemsCount = 0;
    10447  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10448  }
    10449  else
    10450  {
    10451  const size_t suballoc1stCount = suballocations1st.size();
    10452  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10453  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10454 
    10455  // Find more null items at the beginning of 1st vector.
    10456  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10457  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10458  {
    10459  ++m_1stNullItemsBeginCount;
    10460  --m_1stNullItemsMiddleCount;
    10461  }
    10462 
    10463  // Find more null items at the end of 1st vector.
    10464  while(m_1stNullItemsMiddleCount > 0 &&
    10465  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10466  {
    10467  --m_1stNullItemsMiddleCount;
    10468  suballocations1st.pop_back();
    10469  }
    10470 
    10471  // Find more null items at the end of 2nd vector.
    10472  while(m_2ndNullItemsCount > 0 &&
    10473  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10474  {
    10475  --m_2ndNullItemsCount;
    10476  suballocations2nd.pop_back();
    10477  }
    10478 
    10479  // Find more null items at the beginning of 2nd vector.
    10480  while(m_2ndNullItemsCount > 0 &&
    10481  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10482  {
    10483  --m_2ndNullItemsCount;
    10484  VmaVectorRemove(suballocations2nd, 0);
    10485  }
    10486 
    10487  if(ShouldCompact1st())
    10488  {
    10489  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10490  size_t srcIndex = m_1stNullItemsBeginCount;
    10491  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10492  {
    10493  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10494  {
    10495  ++srcIndex;
    10496  }
    10497  if(dstIndex != srcIndex)
    10498  {
    10499  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10500  }
    10501  ++srcIndex;
    10502  }
    10503  suballocations1st.resize(nonNullItemCount);
    10504  m_1stNullItemsBeginCount = 0;
    10505  m_1stNullItemsMiddleCount = 0;
    10506  }
    10507 
    10508  // 2nd vector became empty.
    10509  if(suballocations2nd.empty())
    10510  {
    10511  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10512  }
    10513 
    10514  // 1st vector became empty.
    10515  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10516  {
    10517  suballocations1st.clear();
    10518  m_1stNullItemsBeginCount = 0;
    10519 
    10520  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10521  {
    10522  // Swap 1st with 2nd. Now 2nd is empty.
    10523  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10524  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10525  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10526  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10527  {
    10528  ++m_1stNullItemsBeginCount;
    10529  --m_1stNullItemsMiddleCount;
    10530  }
    10531  m_2ndNullItemsCount = 0;
    10532  m_1stVectorIndex ^= 1;
    10533  }
    10534  }
    10535  }
    10536 
    10537  VMA_HEAVY_ASSERT(Validate());
    10538 }
    10539 
    10540 
    10542 // class VmaBlockMetadata_Buddy
    10543 
    10544 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10545  VmaBlockMetadata(hAllocator),
    10546  m_Root(VMA_NULL),
    10547  m_AllocationCount(0),
    10548  m_FreeCount(1),
    10549  m_SumFreeSize(0)
    10550 {
    10551  memset(m_FreeList, 0, sizeof(m_FreeList));
    10552 }
    10553 
    10554 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10555 {
    10556  DeleteNode(m_Root);
    10557 }
    10558 
    10559 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10560 {
    10561  VmaBlockMetadata::Init(size);
    10562 
    10563  m_UsableSize = VmaPrevPow2(size);
    10564  m_SumFreeSize = m_UsableSize;
    10565 
    10566  // Calculate m_LevelCount.
    10567  m_LevelCount = 1;
    10568  while(m_LevelCount < MAX_LEVELS &&
    10569  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10570  {
    10571  ++m_LevelCount;
    10572  }
    10573 
    10574  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10575  rootNode->offset = 0;
    10576  rootNode->type = Node::TYPE_FREE;
    10577  rootNode->parent = VMA_NULL;
    10578  rootNode->buddy = VMA_NULL;
    10579 
    10580  m_Root = rootNode;
    10581  AddToFreeListFront(0, rootNode);
    10582 }
    10583 
    10584 bool VmaBlockMetadata_Buddy::Validate() const
    10585 {
    10586  // Validate tree.
    10587  ValidationContext ctx;
    10588  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10589  {
    10590  VMA_VALIDATE(false && "ValidateNode failed.");
    10591  }
    10592  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10593  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10594 
    10595  // Validate free node lists.
    10596  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10597  {
    10598  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10599  m_FreeList[level].front->free.prev == VMA_NULL);
    10600 
    10601  for(Node* node = m_FreeList[level].front;
    10602  node != VMA_NULL;
    10603  node = node->free.next)
    10604  {
    10605  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10606 
    10607  if(node->free.next == VMA_NULL)
    10608  {
    10609  VMA_VALIDATE(m_FreeList[level].back == node);
    10610  }
    10611  else
    10612  {
    10613  VMA_VALIDATE(node->free.next->free.prev == node);
    10614  }
    10615  }
    10616  }
    10617 
    10618  // Validate that free lists ar higher levels are empty.
    10619  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10620  {
    10621  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10622  }
    10623 
    10624  return true;
    10625 }
    10626 
    10627 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10628 {
    10629  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10630  {
    10631  if(m_FreeList[level].front != VMA_NULL)
    10632  {
    10633  return LevelToNodeSize(level);
    10634  }
    10635  }
    10636  return 0;
    10637 }
    10638 
    10639 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10640 {
    10641  const VkDeviceSize unusableSize = GetUnusableSize();
    10642 
    10643  outInfo.blockCount = 1;
    10644 
    10645  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10646  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10647 
    10648  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10649  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10650  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10651 
    10652  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10653 
    10654  if(unusableSize > 0)
    10655  {
    10656  ++outInfo.unusedRangeCount;
    10657  outInfo.unusedBytes += unusableSize;
    10658  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10659  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10660  }
    10661 }
    10662 
    10663 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10664 {
    10665  const VkDeviceSize unusableSize = GetUnusableSize();
    10666 
    10667  inoutStats.size += GetSize();
    10668  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10669  inoutStats.allocationCount += m_AllocationCount;
    10670  inoutStats.unusedRangeCount += m_FreeCount;
    10671  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10672 
    10673  if(unusableSize > 0)
    10674  {
    10675  ++inoutStats.unusedRangeCount;
    10676  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10677  }
    10678 }
    10679 
    10680 #if VMA_STATS_STRING_ENABLED
    10681 
    10682 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10683 {
    10684  // TODO optimize
    10685  VmaStatInfo stat;
    10686  CalcAllocationStatInfo(stat);
    10687 
    10688  PrintDetailedMap_Begin(
    10689  json,
    10690  stat.unusedBytes,
    10691  stat.allocationCount,
    10692  stat.unusedRangeCount);
    10693 
    10694  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10695 
    10696  const VkDeviceSize unusableSize = GetUnusableSize();
    10697  if(unusableSize > 0)
    10698  {
    10699  PrintDetailedMap_UnusedRange(json,
    10700  m_UsableSize, // offset
    10701  unusableSize); // size
    10702  }
    10703 
    10704  PrintDetailedMap_End(json);
    10705 }
    10706 
    10707 #endif // #if VMA_STATS_STRING_ENABLED
    10708 
    10709 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10710  uint32_t currentFrameIndex,
    10711  uint32_t frameInUseCount,
    10712  VkDeviceSize bufferImageGranularity,
    10713  VkDeviceSize allocSize,
    10714  VkDeviceSize allocAlignment,
    10715  bool upperAddress,
    10716  VmaSuballocationType allocType,
    10717  bool canMakeOtherLost,
    10718  uint32_t strategy,
    10719  VmaAllocationRequest* pAllocationRequest)
    10720 {
    10721  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10722 
    10723  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10724  // Whenever it might be an OPTIMAL image...
    10725  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10726  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10727  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10728  {
    10729  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10730  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10731  }
    10732 
    10733  if(allocSize > m_UsableSize)
    10734  {
    10735  return false;
    10736  }
    10737 
    10738  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10739  for(uint32_t level = targetLevel + 1; level--; )
    10740  {
    10741  for(Node* freeNode = m_FreeList[level].front;
    10742  freeNode != VMA_NULL;
    10743  freeNode = freeNode->free.next)
    10744  {
    10745  if(freeNode->offset % allocAlignment == 0)
    10746  {
    10747  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10748  pAllocationRequest->offset = freeNode->offset;
    10749  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10750  pAllocationRequest->sumItemSize = 0;
    10751  pAllocationRequest->itemsToMakeLostCount = 0;
    10752  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10753  return true;
    10754  }
    10755  }
    10756  }
    10757 
    10758  return false;
    10759 }
    10760 
    10761 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10762  uint32_t currentFrameIndex,
    10763  uint32_t frameInUseCount,
    10764  VmaAllocationRequest* pAllocationRequest)
    10765 {
    10766  /*
    10767  Lost allocations are not supported in buddy allocator at the moment.
    10768  Support might be added in the future.
    10769  */
    10770  return pAllocationRequest->itemsToMakeLostCount == 0;
    10771 }
    10772 
    10773 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10774 {
    10775  /*
    10776  Lost allocations are not supported in buddy allocator at the moment.
    10777  Support might be added in the future.
    10778  */
    10779  return 0;
    10780 }
    10781 
    10782 void VmaBlockMetadata_Buddy::Alloc(
    10783  const VmaAllocationRequest& request,
    10784  VmaSuballocationType type,
    10785  VkDeviceSize allocSize,
    10786  VmaAllocation hAllocation)
    10787 {
    10788  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10789 
    10790  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10791  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10792 
    10793  Node* currNode = m_FreeList[currLevel].front;
    10794  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10795  while(currNode->offset != request.offset)
    10796  {
    10797  currNode = currNode->free.next;
    10798  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10799  }
    10800 
    10801  // Go down, splitting free nodes.
    10802  while(currLevel < targetLevel)
    10803  {
    10804  // currNode is already first free node at currLevel.
    10805  // Remove it from list of free nodes at this currLevel.
    10806  RemoveFromFreeList(currLevel, currNode);
    10807 
    10808  const uint32_t childrenLevel = currLevel + 1;
    10809 
    10810  // Create two free sub-nodes.
    10811  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10812  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10813 
    10814  leftChild->offset = currNode->offset;
    10815  leftChild->type = Node::TYPE_FREE;
    10816  leftChild->parent = currNode;
    10817  leftChild->buddy = rightChild;
    10818 
    10819  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10820  rightChild->type = Node::TYPE_FREE;
    10821  rightChild->parent = currNode;
    10822  rightChild->buddy = leftChild;
    10823 
    10824  // Convert current currNode to split type.
    10825  currNode->type = Node::TYPE_SPLIT;
    10826  currNode->split.leftChild = leftChild;
    10827 
    10828  // Add child nodes to free list. Order is important!
    10829  AddToFreeListFront(childrenLevel, rightChild);
    10830  AddToFreeListFront(childrenLevel, leftChild);
    10831 
    10832  ++m_FreeCount;
    10833  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10834  ++currLevel;
    10835  currNode = m_FreeList[currLevel].front;
    10836 
    10837  /*
    10838  We can be sure that currNode, as left child of node previously split,
    10839  also fullfills the alignment requirement.
    10840  */
    10841  }
    10842 
    10843  // Remove from free list.
    10844  VMA_ASSERT(currLevel == targetLevel &&
    10845  currNode != VMA_NULL &&
    10846  currNode->type == Node::TYPE_FREE);
    10847  RemoveFromFreeList(currLevel, currNode);
    10848 
    10849  // Convert to allocation node.
    10850  currNode->type = Node::TYPE_ALLOCATION;
    10851  currNode->allocation.alloc = hAllocation;
    10852 
    10853  ++m_AllocationCount;
    10854  --m_FreeCount;
    10855  m_SumFreeSize -= allocSize;
    10856 }
    10857 
    10858 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10859 {
    10860  if(node->type == Node::TYPE_SPLIT)
    10861  {
    10862  DeleteNode(node->split.leftChild->buddy);
    10863  DeleteNode(node->split.leftChild);
    10864  }
    10865 
    10866  vma_delete(GetAllocationCallbacks(), node);
    10867 }
    10868 
    10869 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10870 {
    10871  VMA_VALIDATE(level < m_LevelCount);
    10872  VMA_VALIDATE(curr->parent == parent);
    10873  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10874  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10875  switch(curr->type)
    10876  {
    10877  case Node::TYPE_FREE:
    10878  // curr->free.prev, next are validated separately.
    10879  ctx.calculatedSumFreeSize += levelNodeSize;
    10880  ++ctx.calculatedFreeCount;
    10881  break;
    10882  case Node::TYPE_ALLOCATION:
    10883  ++ctx.calculatedAllocationCount;
    10884  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10885  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10886  break;
    10887  case Node::TYPE_SPLIT:
    10888  {
    10889  const uint32_t childrenLevel = level + 1;
    10890  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10891  const Node* const leftChild = curr->split.leftChild;
    10892  VMA_VALIDATE(leftChild != VMA_NULL);
    10893  VMA_VALIDATE(leftChild->offset == curr->offset);
    10894  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10895  {
    10896  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10897  }
    10898  const Node* const rightChild = leftChild->buddy;
    10899  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10900  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10901  {
    10902  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10903  }
    10904  }
    10905  break;
    10906  default:
    10907  return false;
    10908  }
    10909 
    10910  return true;
    10911 }
    10912 
    10913 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10914 {
    10915  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10916  uint32_t level = 0;
    10917  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10918  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10919  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10920  {
    10921  ++level;
    10922  currLevelNodeSize = nextLevelNodeSize;
    10923  nextLevelNodeSize = currLevelNodeSize >> 1;
    10924  }
    10925  return level;
    10926 }
    10927 
    10928 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10929 {
    10930  // Find node and level.
    10931  Node* node = m_Root;
    10932  VkDeviceSize nodeOffset = 0;
    10933  uint32_t level = 0;
    10934  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10935  while(node->type == Node::TYPE_SPLIT)
    10936  {
    10937  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10938  if(offset < nodeOffset + nextLevelSize)
    10939  {
    10940  node = node->split.leftChild;
    10941  }
    10942  else
    10943  {
    10944  node = node->split.leftChild->buddy;
    10945  nodeOffset += nextLevelSize;
    10946  }
    10947  ++level;
    10948  levelNodeSize = nextLevelSize;
    10949  }
    10950 
    10951  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10952  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10953 
    10954  ++m_FreeCount;
    10955  --m_AllocationCount;
    10956  m_SumFreeSize += alloc->GetSize();
    10957 
    10958  node->type = Node::TYPE_FREE;
    10959 
    10960  // Join free nodes if possible.
    10961  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10962  {
    10963  RemoveFromFreeList(level, node->buddy);
    10964  Node* const parent = node->parent;
    10965 
    10966  vma_delete(GetAllocationCallbacks(), node->buddy);
    10967  vma_delete(GetAllocationCallbacks(), node);
    10968  parent->type = Node::TYPE_FREE;
    10969 
    10970  node = parent;
    10971  --level;
    10972  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10973  --m_FreeCount;
    10974  }
    10975 
    10976  AddToFreeListFront(level, node);
    10977 }
    10978 
    10979 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10980 {
    10981  switch(node->type)
    10982  {
    10983  case Node::TYPE_FREE:
    10984  ++outInfo.unusedRangeCount;
    10985  outInfo.unusedBytes += levelNodeSize;
    10986  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10987  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10988  break;
    10989  case Node::TYPE_ALLOCATION:
    10990  {
    10991  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10992  ++outInfo.allocationCount;
    10993  outInfo.usedBytes += allocSize;
    10994  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10995  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10996 
    10997  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10998  if(unusedRangeSize > 0)
    10999  {
    11000  ++outInfo.unusedRangeCount;
    11001  outInfo.unusedBytes += unusedRangeSize;
    11002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11003  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11004  }
    11005  }
    11006  break;
    11007  case Node::TYPE_SPLIT:
    11008  {
    11009  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11010  const Node* const leftChild = node->split.leftChild;
    11011  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11012  const Node* const rightChild = leftChild->buddy;
    11013  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11014  }
    11015  break;
    11016  default:
    11017  VMA_ASSERT(0);
    11018  }
    11019 }
    11020 
    11021 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11022 {
    11023  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11024 
    11025  // List is empty.
    11026  Node* const frontNode = m_FreeList[level].front;
    11027  if(frontNode == VMA_NULL)
    11028  {
    11029  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11030  node->free.prev = node->free.next = VMA_NULL;
    11031  m_FreeList[level].front = m_FreeList[level].back = node;
    11032  }
    11033  else
    11034  {
    11035  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11036  node->free.prev = VMA_NULL;
    11037  node->free.next = frontNode;
    11038  frontNode->free.prev = node;
    11039  m_FreeList[level].front = node;
    11040  }
    11041 }
    11042 
    11043 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11044 {
    11045  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11046 
    11047  // It is at the front.
    11048  if(node->free.prev == VMA_NULL)
    11049  {
    11050  VMA_ASSERT(m_FreeList[level].front == node);
    11051  m_FreeList[level].front = node->free.next;
    11052  }
    11053  else
    11054  {
    11055  Node* const prevFreeNode = node->free.prev;
    11056  VMA_ASSERT(prevFreeNode->free.next == node);
    11057  prevFreeNode->free.next = node->free.next;
    11058  }
    11059 
    11060  // It is at the back.
    11061  if(node->free.next == VMA_NULL)
    11062  {
    11063  VMA_ASSERT(m_FreeList[level].back == node);
    11064  m_FreeList[level].back = node->free.prev;
    11065  }
    11066  else
    11067  {
    11068  Node* const nextFreeNode = node->free.next;
    11069  VMA_ASSERT(nextFreeNode->free.prev == node);
    11070  nextFreeNode->free.prev = node->free.prev;
    11071  }
    11072 }
    11073 
    11074 #if VMA_STATS_STRING_ENABLED
    11075 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11076 {
    11077  switch(node->type)
    11078  {
    11079  case Node::TYPE_FREE:
    11080  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11081  break;
    11082  case Node::TYPE_ALLOCATION:
    11083  {
    11084  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11085  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11086  if(allocSize < levelNodeSize)
    11087  {
    11088  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11089  }
    11090  }
    11091  break;
    11092  case Node::TYPE_SPLIT:
    11093  {
    11094  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11095  const Node* const leftChild = node->split.leftChild;
    11096  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11097  const Node* const rightChild = leftChild->buddy;
    11098  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11099  }
    11100  break;
    11101  default:
    11102  VMA_ASSERT(0);
    11103  }
    11104 }
    11105 #endif // #if VMA_STATS_STRING_ENABLED
    11106 
    11107 
    11109 // class VmaDeviceMemoryBlock
    11110 
    11111 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11112  m_pMetadata(VMA_NULL),
    11113  m_MemoryTypeIndex(UINT32_MAX),
    11114  m_Id(0),
    11115  m_hMemory(VK_NULL_HANDLE),
    11116  m_MapCount(0),
    11117  m_pMappedData(VMA_NULL)
    11118 {
    11119 }
    11120 
    11121 void VmaDeviceMemoryBlock::Init(
    11122  VmaAllocator hAllocator,
    11123  VmaPool hParentPool,
    11124  uint32_t newMemoryTypeIndex,
    11125  VkDeviceMemory newMemory,
    11126  VkDeviceSize newSize,
    11127  uint32_t id,
    11128  uint32_t algorithm)
    11129 {
    11130  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11131 
    11132  m_hParentPool = hParentPool;
    11133  m_MemoryTypeIndex = newMemoryTypeIndex;
    11134  m_Id = id;
    11135  m_hMemory = newMemory;
    11136 
    11137  switch(algorithm)
    11138  {
    11140  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11141  break;
    11143  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11144  break;
    11145  default:
    11146  VMA_ASSERT(0);
    11147  // Fall-through.
    11148  case 0:
    11149  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11150  }
    11151  m_pMetadata->Init(newSize);
    11152 }
    11153 
    11154 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11155 {
    11156  // This is the most important assert in the entire library.
    11157  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11158  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11159 
    11160  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11161  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11162  m_hMemory = VK_NULL_HANDLE;
    11163 
    11164  vma_delete(allocator, m_pMetadata);
    11165  m_pMetadata = VMA_NULL;
    11166 }
    11167 
    11168 bool VmaDeviceMemoryBlock::Validate() const
    11169 {
    11170  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11171  (m_pMetadata->GetSize() != 0));
    11172 
    11173  return m_pMetadata->Validate();
    11174 }
    11175 
    11176 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11177 {
    11178  void* pData = nullptr;
    11179  VkResult res = Map(hAllocator, 1, &pData);
    11180  if(res != VK_SUCCESS)
    11181  {
    11182  return res;
    11183  }
    11184 
    11185  res = m_pMetadata->CheckCorruption(pData);
    11186 
    11187  Unmap(hAllocator, 1);
    11188 
    11189  return res;
    11190 }
    11191 
    11192 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11193 {
    11194  if(count == 0)
    11195  {
    11196  return VK_SUCCESS;
    11197  }
    11198 
    11199  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11200  if(m_MapCount != 0)
    11201  {
    11202  m_MapCount += count;
    11203  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11204  if(ppData != VMA_NULL)
    11205  {
    11206  *ppData = m_pMappedData;
    11207  }
    11208  return VK_SUCCESS;
    11209  }
    11210  else
    11211  {
    11212  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11213  hAllocator->m_hDevice,
    11214  m_hMemory,
    11215  0, // offset
    11216  VK_WHOLE_SIZE,
    11217  0, // flags
    11218  &m_pMappedData);
    11219  if(result == VK_SUCCESS)
    11220  {
    11221  if(ppData != VMA_NULL)
    11222  {
    11223  *ppData = m_pMappedData;
    11224  }
    11225  m_MapCount = count;
    11226  }
    11227  return result;
    11228  }
    11229 }
    11230 
    11231 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11232 {
    11233  if(count == 0)
    11234  {
    11235  return;
    11236  }
    11237 
    11238  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11239  if(m_MapCount >= count)
    11240  {
    11241  m_MapCount -= count;
    11242  if(m_MapCount == 0)
    11243  {
    11244  m_pMappedData = VMA_NULL;
    11245  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11246  }
    11247  }
    11248  else
    11249  {
    11250  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11251  }
    11252 }
    11253 
    11254 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11255 {
    11256  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11257  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11258 
    11259  void* pData;
    11260  VkResult res = Map(hAllocator, 1, &pData);
    11261  if(res != VK_SUCCESS)
    11262  {
    11263  return res;
    11264  }
    11265 
    11266  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11267  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11268 
    11269  Unmap(hAllocator, 1);
    11270 
    11271  return VK_SUCCESS;
    11272 }
    11273 
    11274 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11275 {
    11276  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11277  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11278 
    11279  void* pData;
    11280  VkResult res = Map(hAllocator, 1, &pData);
    11281  if(res != VK_SUCCESS)
    11282  {
    11283  return res;
    11284  }
    11285 
    11286  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11287  {
    11288  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11289  }
    11290  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11291  {
    11292  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11293  }
    11294 
    11295  Unmap(hAllocator, 1);
    11296 
    11297  return VK_SUCCESS;
    11298 }
    11299 
    11300 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11301  const VmaAllocator hAllocator,
    11302  const VmaAllocation hAllocation,
    11303  VkDeviceSize allocationLocalOffset,
    11304  VkBuffer hBuffer,
    11305  const void* pNext)
    11306 {
    11307  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11308  hAllocation->GetBlock() == this);
    11309  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
    11310  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
    11311  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
    11312  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11313  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11314  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
    11315 }
    11316 
    11317 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11318  const VmaAllocator hAllocator,
    11319  const VmaAllocation hAllocation,
    11320  VkDeviceSize allocationLocalOffset,
    11321  VkImage hImage,
    11322  const void* pNext)
    11323 {
    11324  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11325  hAllocation->GetBlock() == this);
    11326  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
    11327  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
    11328  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
    11329  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11330  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11331  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
    11332 }
    11333 
    11334 static void InitStatInfo(VmaStatInfo& outInfo)
    11335 {
    11336  memset(&outInfo, 0, sizeof(outInfo));
    11337  outInfo.allocationSizeMin = UINT64_MAX;
    11338  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11339 }
    11340 
    11341 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11342 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11343 {
    11344  inoutInfo.blockCount += srcInfo.blockCount;
    11345  inoutInfo.allocationCount += srcInfo.allocationCount;
    11346  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11347  inoutInfo.usedBytes += srcInfo.usedBytes;
    11348  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11349  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11350  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11351  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11352  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11353 }
    11354 
    11355 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11356 {
    11357  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11358  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11359  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11360  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11361 }
    11362 
    11363 VmaPool_T::VmaPool_T(
    11364  VmaAllocator hAllocator,
    11365  const VmaPoolCreateInfo& createInfo,
    11366  VkDeviceSize preferredBlockSize) :
    11367  m_BlockVector(
    11368  hAllocator,
    11369  this, // hParentPool
    11370  createInfo.memoryTypeIndex,
    11371  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11372  createInfo.minBlockCount,
    11373  createInfo.maxBlockCount,
    11374  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11375  createInfo.frameInUseCount,
    11376  true, // isCustomPool
    11377  createInfo.blockSize != 0, // explicitBlockSize
    11378  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11379  m_Id(0)
    11380 {
    11381 }
    11382 
    11383 VmaPool_T::~VmaPool_T()
    11384 {
    11385 }
    11386 
    11387 #if VMA_STATS_STRING_ENABLED
    11388 
    11389 #endif // #if VMA_STATS_STRING_ENABLED
    11390 
    11391 VmaBlockVector::VmaBlockVector(
    11392  VmaAllocator hAllocator,
    11393  VmaPool hParentPool,
    11394  uint32_t memoryTypeIndex,
    11395  VkDeviceSize preferredBlockSize,
    11396  size_t minBlockCount,
    11397  size_t maxBlockCount,
    11398  VkDeviceSize bufferImageGranularity,
    11399  uint32_t frameInUseCount,
    11400  bool isCustomPool,
    11401  bool explicitBlockSize,
    11402  uint32_t algorithm) :
    11403  m_hAllocator(hAllocator),
    11404  m_hParentPool(hParentPool),
    11405  m_MemoryTypeIndex(memoryTypeIndex),
    11406  m_PreferredBlockSize(preferredBlockSize),
    11407  m_MinBlockCount(minBlockCount),
    11408  m_MaxBlockCount(maxBlockCount),
    11409  m_BufferImageGranularity(bufferImageGranularity),
    11410  m_FrameInUseCount(frameInUseCount),
    11411  m_IsCustomPool(isCustomPool),
    11412  m_ExplicitBlockSize(explicitBlockSize),
    11413  m_Algorithm(algorithm),
    11414  m_HasEmptyBlock(false),
    11415  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11416  m_NextBlockId(0)
    11417 {
    11418 }
    11419 
    11420 VmaBlockVector::~VmaBlockVector()
    11421 {
    11422  for(size_t i = m_Blocks.size(); i--; )
    11423  {
    11424  m_Blocks[i]->Destroy(m_hAllocator);
    11425  vma_delete(m_hAllocator, m_Blocks[i]);
    11426  }
    11427 }
    11428 
    11429 VkResult VmaBlockVector::CreateMinBlocks()
    11430 {
    11431  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11432  {
    11433  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11434  if(res != VK_SUCCESS)
    11435  {
    11436  return res;
    11437  }
    11438  }
    11439  return VK_SUCCESS;
    11440 }
    11441 
    11442 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11443 {
    11444  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11445 
    11446  const size_t blockCount = m_Blocks.size();
    11447 
    11448  pStats->size = 0;
    11449  pStats->unusedSize = 0;
    11450  pStats->allocationCount = 0;
    11451  pStats->unusedRangeCount = 0;
    11452  pStats->unusedRangeSizeMax = 0;
    11453  pStats->blockCount = blockCount;
    11454 
    11455  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11456  {
    11457  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11458  VMA_ASSERT(pBlock);
    11459  VMA_HEAVY_ASSERT(pBlock->Validate());
    11460  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11461  }
    11462 }
    11463 
    11464 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11465 {
    11466  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11467  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11468  (VMA_DEBUG_MARGIN > 0) &&
    11469  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11470  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11471 }
    11472 
    11473 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11474 
    11475 VkResult VmaBlockVector::Allocate(
    11476  uint32_t currentFrameIndex,
    11477  VkDeviceSize size,
    11478  VkDeviceSize alignment,
    11479  const VmaAllocationCreateInfo& createInfo,
    11480  VmaSuballocationType suballocType,
    11481  size_t allocationCount,
    11482  VmaAllocation* pAllocations)
    11483 {
    11484  size_t allocIndex;
    11485  VkResult res = VK_SUCCESS;
    11486 
    11487  if(IsCorruptionDetectionEnabled())
    11488  {
    11489  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11490  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11491  }
    11492 
    11493  {
    11494  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11495  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11496  {
    11497  res = AllocatePage(
    11498  currentFrameIndex,
    11499  size,
    11500  alignment,
    11501  createInfo,
    11502  suballocType,
    11503  pAllocations + allocIndex);
    11504  if(res != VK_SUCCESS)
    11505  {
    11506  break;
    11507  }
    11508  }
    11509  }
    11510 
    11511  if(res != VK_SUCCESS)
    11512  {
    11513  // Free all already created allocations.
    11514  while(allocIndex--)
    11515  {
    11516  Free(pAllocations[allocIndex]);
    11517  }
    11518  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11519  }
    11520 
    11521  return res;
    11522 }
    11523 
    11524 VkResult VmaBlockVector::AllocatePage(
    11525  uint32_t currentFrameIndex,
    11526  VkDeviceSize size,
    11527  VkDeviceSize alignment,
    11528  const VmaAllocationCreateInfo& createInfo,
    11529  VmaSuballocationType suballocType,
    11530  VmaAllocation* pAllocation)
    11531 {
    11532  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11533  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11534  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11535  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11536  const bool canCreateNewBlock =
    11537  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11538  (m_Blocks.size() < m_MaxBlockCount);
    11539  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11540 
    11541  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11542  // Which in turn is available only when maxBlockCount = 1.
    11543  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11544  {
    11545  canMakeOtherLost = false;
    11546  }
    11547 
    11548  // Upper address can only be used with linear allocator and within single memory block.
    11549  if(isUpperAddress &&
    11550  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11551  {
    11552  return VK_ERROR_FEATURE_NOT_PRESENT;
    11553  }
    11554 
    11555  // Validate strategy.
    11556  switch(strategy)
    11557  {
    11558  case 0:
    11560  break;
    11564  break;
    11565  default:
    11566  return VK_ERROR_FEATURE_NOT_PRESENT;
    11567  }
    11568 
    11569  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11570  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11571  {
    11572  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11573  }
    11574 
    11575  /*
    11576  Under certain condition, this whole section can be skipped for optimization, so
    11577  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11578  e.g. for custom pools with linear algorithm.
    11579  */
    11580  if(!canMakeOtherLost || canCreateNewBlock)
    11581  {
    11582  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11583  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11585 
    11586  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11587  {
    11588  // Use only last block.
    11589  if(!m_Blocks.empty())
    11590  {
    11591  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11592  VMA_ASSERT(pCurrBlock);
    11593  VkResult res = AllocateFromBlock(
    11594  pCurrBlock,
    11595  currentFrameIndex,
    11596  size,
    11597  alignment,
    11598  allocFlagsCopy,
    11599  createInfo.pUserData,
    11600  suballocType,
    11601  strategy,
    11602  pAllocation);
    11603  if(res == VK_SUCCESS)
    11604  {
    11605  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11606  return VK_SUCCESS;
    11607  }
    11608  }
    11609  }
    11610  else
    11611  {
    11613  {
    11614  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11615  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11616  {
    11617  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11618  VMA_ASSERT(pCurrBlock);
    11619  VkResult res = AllocateFromBlock(
    11620  pCurrBlock,
    11621  currentFrameIndex,
    11622  size,
    11623  alignment,
    11624  allocFlagsCopy,
    11625  createInfo.pUserData,
    11626  suballocType,
    11627  strategy,
    11628  pAllocation);
    11629  if(res == VK_SUCCESS)
    11630  {
    11631  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11632  return VK_SUCCESS;
    11633  }
    11634  }
    11635  }
    11636  else // WORST_FIT, FIRST_FIT
    11637  {
    11638  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11639  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11640  {
    11641  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11642  VMA_ASSERT(pCurrBlock);
    11643  VkResult res = AllocateFromBlock(
    11644  pCurrBlock,
    11645  currentFrameIndex,
    11646  size,
    11647  alignment,
    11648  allocFlagsCopy,
    11649  createInfo.pUserData,
    11650  suballocType,
    11651  strategy,
    11652  pAllocation);
    11653  if(res == VK_SUCCESS)
    11654  {
    11655  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11656  return VK_SUCCESS;
    11657  }
    11658  }
    11659  }
    11660  }
    11661 
    11662  // 2. Try to create new block.
    11663  if(canCreateNewBlock)
    11664  {
    11665  // Calculate optimal size for new block.
    11666  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11667  uint32_t newBlockSizeShift = 0;
    11668  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11669 
    11670  if(!m_ExplicitBlockSize)
    11671  {
    11672  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11673  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11674  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11675  {
    11676  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11677  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11678  {
    11679  newBlockSize = smallerNewBlockSize;
    11680  ++newBlockSizeShift;
    11681  }
    11682  else
    11683  {
    11684  break;
    11685  }
    11686  }
    11687  }
    11688 
    11689  size_t newBlockIndex = 0;
    11690  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11691  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11692  if(!m_ExplicitBlockSize)
    11693  {
    11694  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11695  {
    11696  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11697  if(smallerNewBlockSize >= size)
    11698  {
    11699  newBlockSize = smallerNewBlockSize;
    11700  ++newBlockSizeShift;
    11701  res = CreateBlock(newBlockSize, &newBlockIndex);
    11702  }
    11703  else
    11704  {
    11705  break;
    11706  }
    11707  }
    11708  }
    11709 
    11710  if(res == VK_SUCCESS)
    11711  {
    11712  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11713  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11714 
    11715  res = AllocateFromBlock(
    11716  pBlock,
    11717  currentFrameIndex,
    11718  size,
    11719  alignment,
    11720  allocFlagsCopy,
    11721  createInfo.pUserData,
    11722  suballocType,
    11723  strategy,
    11724  pAllocation);
    11725  if(res == VK_SUCCESS)
    11726  {
    11727  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11728  return VK_SUCCESS;
    11729  }
    11730  else
    11731  {
    11732  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11733  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11734  }
    11735  }
    11736  }
    11737  }
    11738 
    11739  // 3. Try to allocate from existing blocks with making other allocations lost.
    11740  if(canMakeOtherLost)
    11741  {
    11742  uint32_t tryIndex = 0;
    11743  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11744  {
    11745  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11746  VmaAllocationRequest bestRequest = {};
    11747  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11748 
    11749  // 1. Search existing allocations.
    11751  {
    11752  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11753  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11754  {
    11755  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11756  VMA_ASSERT(pCurrBlock);
    11757  VmaAllocationRequest currRequest = {};
    11758  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11759  currentFrameIndex,
    11760  m_FrameInUseCount,
    11761  m_BufferImageGranularity,
    11762  size,
    11763  alignment,
    11764  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11765  suballocType,
    11766  canMakeOtherLost,
    11767  strategy,
    11768  &currRequest))
    11769  {
    11770  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11771  if(pBestRequestBlock == VMA_NULL ||
    11772  currRequestCost < bestRequestCost)
    11773  {
    11774  pBestRequestBlock = pCurrBlock;
    11775  bestRequest = currRequest;
    11776  bestRequestCost = currRequestCost;
    11777 
    11778  if(bestRequestCost == 0)
    11779  {
    11780  break;
    11781  }
    11782  }
    11783  }
    11784  }
    11785  }
    11786  else // WORST_FIT, FIRST_FIT
    11787  {
    11788  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11789  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11790  {
    11791  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11792  VMA_ASSERT(pCurrBlock);
    11793  VmaAllocationRequest currRequest = {};
    11794  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11795  currentFrameIndex,
    11796  m_FrameInUseCount,
    11797  m_BufferImageGranularity,
    11798  size,
    11799  alignment,
    11800  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11801  suballocType,
    11802  canMakeOtherLost,
    11803  strategy,
    11804  &currRequest))
    11805  {
    11806  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11807  if(pBestRequestBlock == VMA_NULL ||
    11808  currRequestCost < bestRequestCost ||
    11810  {
    11811  pBestRequestBlock = pCurrBlock;
    11812  bestRequest = currRequest;
    11813  bestRequestCost = currRequestCost;
    11814 
    11815  if(bestRequestCost == 0 ||
    11817  {
    11818  break;
    11819  }
    11820  }
    11821  }
    11822  }
    11823  }
    11824 
    11825  if(pBestRequestBlock != VMA_NULL)
    11826  {
    11827  if(mapped)
    11828  {
    11829  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11830  if(res != VK_SUCCESS)
    11831  {
    11832  return res;
    11833  }
    11834  }
    11835 
    11836  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11837  currentFrameIndex,
    11838  m_FrameInUseCount,
    11839  &bestRequest))
    11840  {
    11841  // We no longer have an empty Allocation.
    11842  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11843  {
    11844  m_HasEmptyBlock = false;
    11845  }
    11846  // Allocate from this pBlock.
    11847  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11848  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11849  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11850  (*pAllocation)->InitBlockAllocation(
    11851  pBestRequestBlock,
    11852  bestRequest.offset,
    11853  alignment,
    11854  size,
    11855  suballocType,
    11856  mapped,
    11857  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11858  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11859  VMA_DEBUG_LOG(" Returned from existing block");
    11860  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11861  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11862  {
    11863  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11864  }
    11865  if(IsCorruptionDetectionEnabled())
    11866  {
    11867  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11868  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11869  }
    11870  return VK_SUCCESS;
    11871  }
    11872  // else: Some allocations must have been touched while we are here. Next try.
    11873  }
    11874  else
    11875  {
    11876  // Could not find place in any of the blocks - break outer loop.
    11877  break;
    11878  }
    11879  }
    11880  /* Maximum number of tries exceeded - a very unlike event when many other
    11881  threads are simultaneously touching allocations making it impossible to make
    11882  lost at the same time as we try to allocate. */
    11883  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11884  {
    11885  return VK_ERROR_TOO_MANY_OBJECTS;
    11886  }
    11887  }
    11888 
    11889  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11890 }
    11891 
    11892 void VmaBlockVector::Free(
    11893  VmaAllocation hAllocation)
    11894 {
    11895  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11896 
    11897  // Scope for lock.
    11898  {
    11899  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11900 
    11901  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11902 
    11903  if(IsCorruptionDetectionEnabled())
    11904  {
    11905  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11906  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11907  }
    11908 
    11909  if(hAllocation->IsPersistentMap())
    11910  {
    11911  pBlock->Unmap(m_hAllocator, 1);
    11912  }
    11913 
    11914  pBlock->m_pMetadata->Free(hAllocation);
    11915  VMA_HEAVY_ASSERT(pBlock->Validate());
    11916 
    11917  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11918 
    11919  // pBlock became empty after this deallocation.
    11920  if(pBlock->m_pMetadata->IsEmpty())
    11921  {
    11922  // Already has empty Allocation. We don't want to have two, so delete this one.
    11923  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11924  {
    11925  pBlockToDelete = pBlock;
    11926  Remove(pBlock);
    11927  }
    11928  // We now have first empty block.
    11929  else
    11930  {
    11931  m_HasEmptyBlock = true;
    11932  }
    11933  }
    11934  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11935  // (This is optional, heuristics.)
    11936  else if(m_HasEmptyBlock)
    11937  {
    11938  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11939  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11940  {
    11941  pBlockToDelete = pLastBlock;
    11942  m_Blocks.pop_back();
    11943  m_HasEmptyBlock = false;
    11944  }
    11945  }
    11946 
    11947  IncrementallySortBlocks();
    11948  }
    11949 
    11950  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11951  // lock, for performance reason.
    11952  if(pBlockToDelete != VMA_NULL)
    11953  {
    11954  VMA_DEBUG_LOG(" Deleted empty allocation");
    11955  pBlockToDelete->Destroy(m_hAllocator);
    11956  vma_delete(m_hAllocator, pBlockToDelete);
    11957  }
    11958 }
    11959 
    11960 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11961 {
    11962  VkDeviceSize result = 0;
    11963  for(size_t i = m_Blocks.size(); i--; )
    11964  {
    11965  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11966  if(result >= m_PreferredBlockSize)
    11967  {
    11968  break;
    11969  }
    11970  }
    11971  return result;
    11972 }
    11973 
    11974 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11975 {
    11976  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11977  {
    11978  if(m_Blocks[blockIndex] == pBlock)
    11979  {
    11980  VmaVectorRemove(m_Blocks, blockIndex);
    11981  return;
    11982  }
    11983  }
    11984  VMA_ASSERT(0);
    11985 }
    11986 
    11987 void VmaBlockVector::IncrementallySortBlocks()
    11988 {
    11989  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11990  {
    11991  // Bubble sort only until first swap.
    11992  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11993  {
    11994  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11995  {
    11996  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11997  return;
    11998  }
    11999  }
    12000  }
    12001 }
    12002 
    12003 VkResult VmaBlockVector::AllocateFromBlock(
    12004  VmaDeviceMemoryBlock* pBlock,
    12005  uint32_t currentFrameIndex,
    12006  VkDeviceSize size,
    12007  VkDeviceSize alignment,
    12008  VmaAllocationCreateFlags allocFlags,
    12009  void* pUserData,
    12010  VmaSuballocationType suballocType,
    12011  uint32_t strategy,
    12012  VmaAllocation* pAllocation)
    12013 {
    12014  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12015  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12016  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12017  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12018 
    12019  VmaAllocationRequest currRequest = {};
    12020  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12021  currentFrameIndex,
    12022  m_FrameInUseCount,
    12023  m_BufferImageGranularity,
    12024  size,
    12025  alignment,
    12026  isUpperAddress,
    12027  suballocType,
    12028  false, // canMakeOtherLost
    12029  strategy,
    12030  &currRequest))
    12031  {
    12032  // Allocate from pCurrBlock.
    12033  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12034 
    12035  if(mapped)
    12036  {
    12037  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12038  if(res != VK_SUCCESS)
    12039  {
    12040  return res;
    12041  }
    12042  }
    12043 
    12044  // We no longer have an empty Allocation.
    12045  if(pBlock->m_pMetadata->IsEmpty())
    12046  {
    12047  m_HasEmptyBlock = false;
    12048  }
    12049 
    12050  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12051  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12052  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12053  (*pAllocation)->InitBlockAllocation(
    12054  pBlock,
    12055  currRequest.offset,
    12056  alignment,
    12057  size,
    12058  suballocType,
    12059  mapped,
    12060  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12061  VMA_HEAVY_ASSERT(pBlock->Validate());
    12062  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12063  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12064  {
    12065  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12066  }
    12067  if(IsCorruptionDetectionEnabled())
    12068  {
    12069  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12070  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12071  }
    12072  return VK_SUCCESS;
    12073  }
    12074  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12075 }
    12076 
    12077 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12078 {
    12079  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12080  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12081  allocInfo.allocationSize = blockSize;
    12082  VkDeviceMemory mem = VK_NULL_HANDLE;
    12083  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12084  if(res < 0)
    12085  {
    12086  return res;
    12087  }
    12088 
    12089  // New VkDeviceMemory successfully created.
    12090 
    12091  // Create new Allocation for it.
    12092  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12093  pBlock->Init(
    12094  m_hAllocator,
    12095  m_hParentPool,
    12096  m_MemoryTypeIndex,
    12097  mem,
    12098  allocInfo.allocationSize,
    12099  m_NextBlockId++,
    12100  m_Algorithm);
    12101 
    12102  m_Blocks.push_back(pBlock);
    12103  if(pNewBlockIndex != VMA_NULL)
    12104  {
    12105  *pNewBlockIndex = m_Blocks.size() - 1;
    12106  }
    12107 
    12108  return VK_SUCCESS;
    12109 }
    12110 
    12111 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12112  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12113  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12114 {
    12115  const size_t blockCount = m_Blocks.size();
    12116  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12117 
    12118  enum BLOCK_FLAG
    12119  {
    12120  BLOCK_FLAG_USED = 0x00000001,
    12121  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12122  };
    12123 
    12124  struct BlockInfo
    12125  {
    12126  uint32_t flags;
    12127  void* pMappedData;
    12128  };
    12129  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12130  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12131  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12132 
    12133  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12134  const size_t moveCount = moves.size();
    12135  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12136  {
    12137  const VmaDefragmentationMove& move = moves[moveIndex];
    12138  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12139  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12140  }
    12141 
    12142  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12143 
    12144  // Go over all blocks. Get mapped pointer or map if necessary.
    12145  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12146  {
    12147  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12148  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12149  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12150  {
    12151  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12152  // It is not originally mapped - map it.
    12153  if(currBlockInfo.pMappedData == VMA_NULL)
    12154  {
    12155  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12156  if(pDefragCtx->res == VK_SUCCESS)
    12157  {
    12158  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12159  }
    12160  }
    12161  }
    12162  }
    12163 
    12164  // Go over all moves. Do actual data transfer.
    12165  if(pDefragCtx->res == VK_SUCCESS)
    12166  {
    12167  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12168  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12169 
    12170  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12171  {
    12172  const VmaDefragmentationMove& move = moves[moveIndex];
    12173 
    12174  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12175  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12176 
    12177  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12178 
    12179  // Invalidate source.
    12180  if(isNonCoherent)
    12181  {
    12182  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12183  memRange.memory = pSrcBlock->GetDeviceMemory();
    12184  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12185  memRange.size = VMA_MIN(
    12186  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12187  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12188  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12189  }
    12190 
    12191  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12192  memmove(
    12193  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12194  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12195  static_cast<size_t>(move.size));
    12196 
    12197  if(IsCorruptionDetectionEnabled())
    12198  {
    12199  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12200  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12201  }
    12202 
    12203  // Flush destination.
    12204  if(isNonCoherent)
    12205  {
    12206  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12207  memRange.memory = pDstBlock->GetDeviceMemory();
    12208  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12209  memRange.size = VMA_MIN(
    12210  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12211  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12212  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12213  }
    12214  }
    12215  }
    12216 
    12217  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12218  // Regardless of pCtx->res == VK_SUCCESS.
    12219  for(size_t blockIndex = blockCount; blockIndex--; )
    12220  {
    12221  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12222  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12223  {
    12224  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12225  pBlock->Unmap(m_hAllocator, 1);
    12226  }
    12227  }
    12228 }
    12229 
    12230 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12231  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12232  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12233  VkCommandBuffer commandBuffer)
    12234 {
    12235  const size_t blockCount = m_Blocks.size();
    12236 
    12237  pDefragCtx->blockContexts.resize(blockCount);
    12238  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12239 
    12240  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12241  const size_t moveCount = moves.size();
    12242  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12243  {
    12244  const VmaDefragmentationMove& move = moves[moveIndex];
    12245  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12246  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12247  }
    12248 
    12249  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12250 
    12251  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12252  {
    12253  VkBufferCreateInfo bufCreateInfo;
    12254  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12255 
    12256  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12257  {
    12258  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12259  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12260  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12261  {
    12262  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12263  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12264  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12265  if(pDefragCtx->res == VK_SUCCESS)
    12266  {
    12267  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12268  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12269  }
    12270  }
    12271  }
    12272  }
    12273 
    12274  // Go over all moves. Post data transfer commands to command buffer.
    12275  if(pDefragCtx->res == VK_SUCCESS)
    12276  {
    12277  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12278  {
    12279  const VmaDefragmentationMove& move = moves[moveIndex];
    12280 
    12281  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12282  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12283 
    12284  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12285 
    12286  VkBufferCopy region = {
    12287  move.srcOffset,
    12288  move.dstOffset,
    12289  move.size };
    12290  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12291  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12292  }
    12293  }
    12294 
    12295  // Save buffers to defrag context for later destruction.
    12296  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12297  {
    12298  pDefragCtx->res = VK_NOT_READY;
    12299  }
    12300 }
    12301 
    12302 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12303 {
    12304  m_HasEmptyBlock = false;
    12305  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12306  {
    12307  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12308  if(pBlock->m_pMetadata->IsEmpty())
    12309  {
    12310  if(m_Blocks.size() > m_MinBlockCount)
    12311  {
    12312  if(pDefragmentationStats != VMA_NULL)
    12313  {
    12314  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12315  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12316  }
    12317 
    12318  VmaVectorRemove(m_Blocks, blockIndex);
    12319  pBlock->Destroy(m_hAllocator);
    12320  vma_delete(m_hAllocator, pBlock);
    12321  }
    12322  else
    12323  {
    12324  m_HasEmptyBlock = true;
    12325  }
    12326  }
    12327  }
    12328 }
    12329 
    12330 #if VMA_STATS_STRING_ENABLED
    12331 
    12332 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12333 {
    12334  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12335 
    12336  json.BeginObject();
    12337 
    12338  if(m_IsCustomPool)
    12339  {
    12340  json.WriteString("MemoryTypeIndex");
    12341  json.WriteNumber(m_MemoryTypeIndex);
    12342 
    12343  json.WriteString("BlockSize");
    12344  json.WriteNumber(m_PreferredBlockSize);
    12345 
    12346  json.WriteString("BlockCount");
    12347  json.BeginObject(true);
    12348  if(m_MinBlockCount > 0)
    12349  {
    12350  json.WriteString("Min");
    12351  json.WriteNumber((uint64_t)m_MinBlockCount);
    12352  }
    12353  if(m_MaxBlockCount < SIZE_MAX)
    12354  {
    12355  json.WriteString("Max");
    12356  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12357  }
    12358  json.WriteString("Cur");
    12359  json.WriteNumber((uint64_t)m_Blocks.size());
    12360  json.EndObject();
    12361 
    12362  if(m_FrameInUseCount > 0)
    12363  {
    12364  json.WriteString("FrameInUseCount");
    12365  json.WriteNumber(m_FrameInUseCount);
    12366  }
    12367 
    12368  if(m_Algorithm != 0)
    12369  {
    12370  json.WriteString("Algorithm");
    12371  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12372  }
    12373  }
    12374  else
    12375  {
    12376  json.WriteString("PreferredBlockSize");
    12377  json.WriteNumber(m_PreferredBlockSize);
    12378  }
    12379 
    12380  json.WriteString("Blocks");
    12381  json.BeginObject();
    12382  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12383  {
    12384  json.BeginString();
    12385  json.ContinueString(m_Blocks[i]->GetId());
    12386  json.EndString();
    12387 
    12388  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12389  }
    12390  json.EndObject();
    12391 
    12392  json.EndObject();
    12393 }
    12394 
    12395 #endif // #if VMA_STATS_STRING_ENABLED
    12396 
    12397 void VmaBlockVector::Defragment(
    12398  class VmaBlockVectorDefragmentationContext* pCtx,
    12399  VmaDefragmentationStats* pStats,
    12400  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12401  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12402  VkCommandBuffer commandBuffer)
    12403 {
    12404  pCtx->res = VK_SUCCESS;
    12405 
    12406  const VkMemoryPropertyFlags memPropFlags =
    12407  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12408  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12409 
    12410  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12411  isHostVisible;
    12412  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12413  !IsCorruptionDetectionEnabled() &&
    12414  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12415 
    12416  // There are options to defragment this memory type.
    12417  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12418  {
    12419  bool defragmentOnGpu;
    12420  // There is only one option to defragment this memory type.
    12421  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12422  {
    12423  defragmentOnGpu = canDefragmentOnGpu;
    12424  }
    12425  // Both options are available: Heuristics to choose the best one.
    12426  else
    12427  {
    12428  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12429  m_hAllocator->IsIntegratedGpu();
    12430  }
    12431 
    12432  bool overlappingMoveSupported = !defragmentOnGpu;
    12433 
    12434  if(m_hAllocator->m_UseMutex)
    12435  {
    12436  m_Mutex.LockWrite();
    12437  pCtx->mutexLocked = true;
    12438  }
    12439 
    12440  pCtx->Begin(overlappingMoveSupported);
    12441 
    12442  // Defragment.
    12443 
    12444  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12445  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12446  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12447  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12448  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12449 
    12450  // Accumulate statistics.
    12451  if(pStats != VMA_NULL)
    12452  {
    12453  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12454  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12455  pStats->bytesMoved += bytesMoved;
    12456  pStats->allocationsMoved += allocationsMoved;
    12457  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12458  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12459  if(defragmentOnGpu)
    12460  {
    12461  maxGpuBytesToMove -= bytesMoved;
    12462  maxGpuAllocationsToMove -= allocationsMoved;
    12463  }
    12464  else
    12465  {
    12466  maxCpuBytesToMove -= bytesMoved;
    12467  maxCpuAllocationsToMove -= allocationsMoved;
    12468  }
    12469  }
    12470 
    12471  if(pCtx->res >= VK_SUCCESS)
    12472  {
    12473  if(defragmentOnGpu)
    12474  {
    12475  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12476  }
    12477  else
    12478  {
    12479  ApplyDefragmentationMovesCpu(pCtx, moves);
    12480  }
    12481  }
    12482  }
    12483 }
    12484 
    12485 void VmaBlockVector::DefragmentationEnd(
    12486  class VmaBlockVectorDefragmentationContext* pCtx,
    12487  VmaDefragmentationStats* pStats)
    12488 {
    12489  // Destroy buffers.
    12490  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12491  {
    12492  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12493  if(blockCtx.hBuffer)
    12494  {
    12495  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12496  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12497  }
    12498  }
    12499 
    12500  if(pCtx->res >= VK_SUCCESS)
    12501  {
    12502  FreeEmptyBlocks(pStats);
    12503  }
    12504 
    12505  if(pCtx->mutexLocked)
    12506  {
    12507  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12508  m_Mutex.UnlockWrite();
    12509  }
    12510 }
    12511 
    12512 size_t VmaBlockVector::CalcAllocationCount() const
    12513 {
    12514  size_t result = 0;
    12515  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12516  {
    12517  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12518  }
    12519  return result;
    12520 }
    12521 
    12522 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12523 {
    12524  if(m_BufferImageGranularity == 1)
    12525  {
    12526  return false;
    12527  }
    12528  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12529  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12530  {
    12531  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12532  VMA_ASSERT(m_Algorithm == 0);
    12533  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12534  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12535  {
    12536  return true;
    12537  }
    12538  }
    12539  return false;
    12540 }
    12541 
    12542 void VmaBlockVector::MakePoolAllocationsLost(
    12543  uint32_t currentFrameIndex,
    12544  size_t* pLostAllocationCount)
    12545 {
    12546  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12547  size_t lostAllocationCount = 0;
    12548  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12549  {
    12550  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12551  VMA_ASSERT(pBlock);
    12552  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12553  }
    12554  if(pLostAllocationCount != VMA_NULL)
    12555  {
    12556  *pLostAllocationCount = lostAllocationCount;
    12557  }
    12558 }
    12559 
    12560 VkResult VmaBlockVector::CheckCorruption()
    12561 {
    12562  if(!IsCorruptionDetectionEnabled())
    12563  {
    12564  return VK_ERROR_FEATURE_NOT_PRESENT;
    12565  }
    12566 
    12567  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12568  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12569  {
    12570  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12571  VMA_ASSERT(pBlock);
    12572  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12573  if(res != VK_SUCCESS)
    12574  {
    12575  return res;
    12576  }
    12577  }
    12578  return VK_SUCCESS;
    12579 }
    12580 
    12581 void VmaBlockVector::AddStats(VmaStats* pStats)
    12582 {
    12583  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12584  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12585 
    12586  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12587 
    12588  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12589  {
    12590  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12591  VMA_ASSERT(pBlock);
    12592  VMA_HEAVY_ASSERT(pBlock->Validate());
    12593  VmaStatInfo allocationStatInfo;
    12594  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12595  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12596  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12597  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12598  }
    12599 }
    12600 
    12602 // VmaDefragmentationAlgorithm_Generic members definition
    12603 
    12604 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12605  VmaAllocator hAllocator,
    12606  VmaBlockVector* pBlockVector,
    12607  uint32_t currentFrameIndex,
    12608  bool overlappingMoveSupported) :
    12609  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12610  m_AllocationCount(0),
    12611  m_AllAllocations(false),
    12612  m_BytesMoved(0),
    12613  m_AllocationsMoved(0),
    12614  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12615 {
    12616  // Create block info for each block.
    12617  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12618  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12619  {
    12620  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12621  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12622  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12623  m_Blocks.push_back(pBlockInfo);
    12624  }
    12625 
    12626  // Sort them by m_pBlock pointer value.
    12627  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12628 }
    12629 
    12630 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12631 {
    12632  for(size_t i = m_Blocks.size(); i--; )
    12633  {
    12634  vma_delete(m_hAllocator, m_Blocks[i]);
    12635  }
    12636 }
    12637 
    12638 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12639 {
    12640  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12641  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12642  {
    12643  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12644  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12645  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12646  {
    12647  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12648  (*it)->m_Allocations.push_back(allocInfo);
    12649  }
    12650  else
    12651  {
    12652  VMA_ASSERT(0);
    12653  }
    12654 
    12655  ++m_AllocationCount;
    12656  }
    12657 }
    12658 
    12659 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12660  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12661  VkDeviceSize maxBytesToMove,
    12662  uint32_t maxAllocationsToMove)
    12663 {
    12664  if(m_Blocks.empty())
    12665  {
    12666  return VK_SUCCESS;
    12667  }
    12668 
    12669  // This is a choice based on research.
    12670  // Option 1:
    12671  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12672  // Option 2:
    12673  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12674  // Option 3:
    12675  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12676 
    12677  size_t srcBlockMinIndex = 0;
    12678  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12679  /*
    12680  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12681  {
    12682  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12683  if(blocksWithNonMovableCount > 0)
    12684  {
    12685  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12686  }
    12687  }
    12688  */
    12689 
    12690  size_t srcBlockIndex = m_Blocks.size() - 1;
    12691  size_t srcAllocIndex = SIZE_MAX;
    12692  for(;;)
    12693  {
    12694  // 1. Find next allocation to move.
    12695  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12696  // 1.2. Then start from last to first m_Allocations.
    12697  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12698  {
    12699  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12700  {
    12701  // Finished: no more allocations to process.
    12702  if(srcBlockIndex == srcBlockMinIndex)
    12703  {
    12704  return VK_SUCCESS;
    12705  }
    12706  else
    12707  {
    12708  --srcBlockIndex;
    12709  srcAllocIndex = SIZE_MAX;
    12710  }
    12711  }
    12712  else
    12713  {
    12714  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12715  }
    12716  }
    12717 
    12718  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12719  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12720 
    12721  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12722  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12723  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12724  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12725 
    12726  // 2. Try to find new place for this allocation in preceding or current block.
    12727  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12728  {
    12729  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12730  VmaAllocationRequest dstAllocRequest;
    12731  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12732  m_CurrentFrameIndex,
    12733  m_pBlockVector->GetFrameInUseCount(),
    12734  m_pBlockVector->GetBufferImageGranularity(),
    12735  size,
    12736  alignment,
    12737  false, // upperAddress
    12738  suballocType,
    12739  false, // canMakeOtherLost
    12740  strategy,
    12741  &dstAllocRequest) &&
    12742  MoveMakesSense(
    12743  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12744  {
    12745  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12746 
    12747  // Reached limit on number of allocations or bytes to move.
    12748  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12749  (m_BytesMoved + size > maxBytesToMove))
    12750  {
    12751  return VK_SUCCESS;
    12752  }
    12753 
    12754  VmaDefragmentationMove move;
    12755  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12756  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12757  move.srcOffset = srcOffset;
    12758  move.dstOffset = dstAllocRequest.offset;
    12759  move.size = size;
    12760  moves.push_back(move);
    12761 
    12762  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12763  dstAllocRequest,
    12764  suballocType,
    12765  size,
    12766  allocInfo.m_hAllocation);
    12767  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12768 
    12769  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12770 
    12771  if(allocInfo.m_pChanged != VMA_NULL)
    12772  {
    12773  *allocInfo.m_pChanged = VK_TRUE;
    12774  }
    12775 
    12776  ++m_AllocationsMoved;
    12777  m_BytesMoved += size;
    12778 
    12779  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12780 
    12781  break;
    12782  }
    12783  }
    12784 
    12785  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12786 
    12787  if(srcAllocIndex > 0)
    12788  {
    12789  --srcAllocIndex;
    12790  }
    12791  else
    12792  {
    12793  if(srcBlockIndex > 0)
    12794  {
    12795  --srcBlockIndex;
    12796  srcAllocIndex = SIZE_MAX;
    12797  }
    12798  else
    12799  {
    12800  return VK_SUCCESS;
    12801  }
    12802  }
    12803  }
    12804 }
    12805 
    12806 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12807 {
    12808  size_t result = 0;
    12809  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12810  {
    12811  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12812  {
    12813  ++result;
    12814  }
    12815  }
    12816  return result;
    12817 }
    12818 
    12819 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12820  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12821  VkDeviceSize maxBytesToMove,
    12822  uint32_t maxAllocationsToMove)
    12823 {
    12824  if(!m_AllAllocations && m_AllocationCount == 0)
    12825  {
    12826  return VK_SUCCESS;
    12827  }
    12828 
    12829  const size_t blockCount = m_Blocks.size();
    12830  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12831  {
    12832  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12833 
    12834  if(m_AllAllocations)
    12835  {
    12836  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12837  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12838  it != pMetadata->m_Suballocations.end();
    12839  ++it)
    12840  {
    12841  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12842  {
    12843  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12844  pBlockInfo->m_Allocations.push_back(allocInfo);
    12845  }
    12846  }
    12847  }
    12848 
    12849  pBlockInfo->CalcHasNonMovableAllocations();
    12850 
    12851  // This is a choice based on research.
    12852  // Option 1:
    12853  pBlockInfo->SortAllocationsByOffsetDescending();
    12854  // Option 2:
    12855  //pBlockInfo->SortAllocationsBySizeDescending();
    12856  }
    12857 
    12858  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12859  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12860 
    12861  // This is a choice based on research.
    12862  const uint32_t roundCount = 2;
    12863 
    12864  // Execute defragmentation rounds (the main part).
    12865  VkResult result = VK_SUCCESS;
    12866  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12867  {
    12868  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12869  }
    12870 
    12871  return result;
    12872 }
    12873 
    12874 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12875  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12876  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12877 {
    12878  if(dstBlockIndex < srcBlockIndex)
    12879  {
    12880  return true;
    12881  }
    12882  if(dstBlockIndex > srcBlockIndex)
    12883  {
    12884  return false;
    12885  }
    12886  if(dstOffset < srcOffset)
    12887  {
    12888  return true;
    12889  }
    12890  return false;
    12891 }
    12892 
    12894 // VmaDefragmentationAlgorithm_Fast
    12895 
    12896 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12897  VmaAllocator hAllocator,
    12898  VmaBlockVector* pBlockVector,
    12899  uint32_t currentFrameIndex,
    12900  bool overlappingMoveSupported) :
    12901  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12902  m_OverlappingMoveSupported(overlappingMoveSupported),
    12903  m_AllocationCount(0),
    12904  m_AllAllocations(false),
    12905  m_BytesMoved(0),
    12906  m_AllocationsMoved(0),
    12907  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12908 {
    12909  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12910 
    12911 }
    12912 
    12913 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12914 {
    12915 }
    12916 
    12917 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12918  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12919  VkDeviceSize maxBytesToMove,
    12920  uint32_t maxAllocationsToMove)
    12921 {
    12922  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12923 
    12924  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12925  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12926  {
    12927  return VK_SUCCESS;
    12928  }
    12929 
    12930  PreprocessMetadata();
    12931 
    12932  // Sort blocks in order from most destination.
    12933 
    12934  m_BlockInfos.resize(blockCount);
    12935  for(size_t i = 0; i < blockCount; ++i)
    12936  {
    12937  m_BlockInfos[i].origBlockIndex = i;
    12938  }
    12939 
    12940  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12941  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12942  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12943  });
    12944 
    12945  // THE MAIN ALGORITHM
    12946 
    12947  FreeSpaceDatabase freeSpaceDb;
    12948 
    12949  size_t dstBlockInfoIndex = 0;
    12950  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12951  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12952  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12953  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12954  VkDeviceSize dstOffset = 0;
    12955 
    12956  bool end = false;
    12957  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12958  {
    12959  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12960  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12961  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12962  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12963  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12964  {
    12965  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12966  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12967  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12968  if(m_AllocationsMoved == maxAllocationsToMove ||
    12969  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12970  {
    12971  end = true;
    12972  break;
    12973  }
    12974  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12975 
    12976  // Try to place it in one of free spaces from the database.
    12977  size_t freeSpaceInfoIndex;
    12978  VkDeviceSize dstAllocOffset;
    12979  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12980  freeSpaceInfoIndex, dstAllocOffset))
    12981  {
    12982  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12983  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12984  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12985 
    12986  // Same block
    12987  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12988  {
    12989  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12990 
    12991  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12992 
    12993  VmaSuballocation suballoc = *srcSuballocIt;
    12994  suballoc.offset = dstAllocOffset;
    12995  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12996  m_BytesMoved += srcAllocSize;
    12997  ++m_AllocationsMoved;
    12998 
    12999  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13000  ++nextSuballocIt;
    13001  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13002  srcSuballocIt = nextSuballocIt;
    13003 
    13004  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13005 
    13006  VmaDefragmentationMove move = {
    13007  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13008  srcAllocOffset, dstAllocOffset,
    13009  srcAllocSize };
    13010  moves.push_back(move);
    13011  }
    13012  // Different block
    13013  else
    13014  {
    13015  // MOVE OPTION 2: Move the allocation to a different block.
    13016 
    13017  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13018 
    13019  VmaSuballocation suballoc = *srcSuballocIt;
    13020  suballoc.offset = dstAllocOffset;
    13021  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13022  m_BytesMoved += srcAllocSize;
    13023  ++m_AllocationsMoved;
    13024 
    13025  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13026  ++nextSuballocIt;
    13027  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13028  srcSuballocIt = nextSuballocIt;
    13029 
    13030  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13031 
    13032  VmaDefragmentationMove move = {
    13033  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13034  srcAllocOffset, dstAllocOffset,
    13035  srcAllocSize };
    13036  moves.push_back(move);
    13037  }
    13038  }
    13039  else
    13040  {
    13041  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13042 
    13043  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13044  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13045  dstAllocOffset + srcAllocSize > dstBlockSize)
    13046  {
    13047  // But before that, register remaining free space at the end of dst block.
    13048  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13049 
    13050  ++dstBlockInfoIndex;
    13051  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13052  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13053  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13054  dstBlockSize = pDstMetadata->GetSize();
    13055  dstOffset = 0;
    13056  dstAllocOffset = 0;
    13057  }
    13058 
    13059  // Same block
    13060  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13061  {
    13062  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13063 
    13064  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13065 
    13066  bool skipOver = overlap;
    13067  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13068  {
    13069  // If destination and source place overlap, skip if it would move it
    13070  // by only < 1/64 of its size.
    13071  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13072  }
    13073 
    13074  if(skipOver)
    13075  {
    13076  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13077 
    13078  dstOffset = srcAllocOffset + srcAllocSize;
    13079  ++srcSuballocIt;
    13080  }
    13081  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13082  else
    13083  {
    13084  srcSuballocIt->offset = dstAllocOffset;
    13085  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13086  dstOffset = dstAllocOffset + srcAllocSize;
    13087  m_BytesMoved += srcAllocSize;
    13088  ++m_AllocationsMoved;
    13089  ++srcSuballocIt;
    13090  VmaDefragmentationMove move = {
    13091  srcOrigBlockIndex, dstOrigBlockIndex,
    13092  srcAllocOffset, dstAllocOffset,
    13093  srcAllocSize };
    13094  moves.push_back(move);
    13095  }
    13096  }
    13097  // Different block
    13098  else
    13099  {
    13100  // MOVE OPTION 2: Move the allocation to a different block.
    13101 
    13102  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13103  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13104 
    13105  VmaSuballocation suballoc = *srcSuballocIt;
    13106  suballoc.offset = dstAllocOffset;
    13107  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13108  dstOffset = dstAllocOffset + srcAllocSize;
    13109  m_BytesMoved += srcAllocSize;
    13110  ++m_AllocationsMoved;
    13111 
    13112  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13113  ++nextSuballocIt;
    13114  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13115  srcSuballocIt = nextSuballocIt;
    13116 
    13117  pDstMetadata->m_Suballocations.push_back(suballoc);
    13118 
    13119  VmaDefragmentationMove move = {
    13120  srcOrigBlockIndex, dstOrigBlockIndex,
    13121  srcAllocOffset, dstAllocOffset,
    13122  srcAllocSize };
    13123  moves.push_back(move);
    13124  }
    13125  }
    13126  }
    13127  }
    13128 
    13129  m_BlockInfos.clear();
    13130 
    13131  PostprocessMetadata();
    13132 
    13133  return VK_SUCCESS;
    13134 }
    13135 
    13136 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13137 {
    13138  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13139  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13140  {
    13141  VmaBlockMetadata_Generic* const pMetadata =
    13142  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13143  pMetadata->m_FreeCount = 0;
    13144  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13145  pMetadata->m_FreeSuballocationsBySize.clear();
    13146  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13147  it != pMetadata->m_Suballocations.end(); )
    13148  {
    13149  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13150  {
    13151  VmaSuballocationList::iterator nextIt = it;
    13152  ++nextIt;
    13153  pMetadata->m_Suballocations.erase(it);
    13154  it = nextIt;
    13155  }
    13156  else
    13157  {
    13158  ++it;
    13159  }
    13160  }
    13161  }
    13162 }
    13163 
    13164 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13165 {
    13166  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13167  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13168  {
    13169  VmaBlockMetadata_Generic* const pMetadata =
    13170  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13171  const VkDeviceSize blockSize = pMetadata->GetSize();
    13172 
    13173  // No allocations in this block - entire area is free.
    13174  if(pMetadata->m_Suballocations.empty())
    13175  {
    13176  pMetadata->m_FreeCount = 1;
    13177  //pMetadata->m_SumFreeSize is already set to blockSize.
    13178  VmaSuballocation suballoc = {
    13179  0, // offset
    13180  blockSize, // size
    13181  VMA_NULL, // hAllocation
    13182  VMA_SUBALLOCATION_TYPE_FREE };
    13183  pMetadata->m_Suballocations.push_back(suballoc);
    13184  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13185  }
    13186  // There are some allocations in this block.
    13187  else
    13188  {
    13189  VkDeviceSize offset = 0;
    13190  VmaSuballocationList::iterator it;
    13191  for(it = pMetadata->m_Suballocations.begin();
    13192  it != pMetadata->m_Suballocations.end();
    13193  ++it)
    13194  {
    13195  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13196  VMA_ASSERT(it->offset >= offset);
    13197 
    13198  // Need to insert preceding free space.
    13199  if(it->offset > offset)
    13200  {
    13201  ++pMetadata->m_FreeCount;
    13202  const VkDeviceSize freeSize = it->offset - offset;
    13203  VmaSuballocation suballoc = {
    13204  offset, // offset
    13205  freeSize, // size
    13206  VMA_NULL, // hAllocation
    13207  VMA_SUBALLOCATION_TYPE_FREE };
    13208  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13209  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13210  {
    13211  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13212  }
    13213  }
    13214 
    13215  pMetadata->m_SumFreeSize -= it->size;
    13216  offset = it->offset + it->size;
    13217  }
    13218 
    13219  // Need to insert trailing free space.
    13220  if(offset < blockSize)
    13221  {
    13222  ++pMetadata->m_FreeCount;
    13223  const VkDeviceSize freeSize = blockSize - offset;
    13224  VmaSuballocation suballoc = {
    13225  offset, // offset
    13226  freeSize, // size
    13227  VMA_NULL, // hAllocation
    13228  VMA_SUBALLOCATION_TYPE_FREE };
    13229  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13230  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13231  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13232  {
    13233  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13234  }
    13235  }
    13236 
    13237  VMA_SORT(
    13238  pMetadata->m_FreeSuballocationsBySize.begin(),
    13239  pMetadata->m_FreeSuballocationsBySize.end(),
    13240  VmaSuballocationItemSizeLess());
    13241  }
    13242 
    13243  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13244  }
    13245 }
    13246 
    13247 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13248 {
    13249  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13250  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13251  while(it != pMetadata->m_Suballocations.end())
    13252  {
    13253  if(it->offset < suballoc.offset)
    13254  {
    13255  ++it;
    13256  }
    13257  }
    13258  pMetadata->m_Suballocations.insert(it, suballoc);
    13259 }
    13260 
    13262 // VmaBlockVectorDefragmentationContext
    13263 
    13264 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13265  VmaAllocator hAllocator,
    13266  VmaPool hCustomPool,
    13267  VmaBlockVector* pBlockVector,
    13268  uint32_t currFrameIndex) :
    13269  res(VK_SUCCESS),
    13270  mutexLocked(false),
    13271  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13272  m_hAllocator(hAllocator),
    13273  m_hCustomPool(hCustomPool),
    13274  m_pBlockVector(pBlockVector),
    13275  m_CurrFrameIndex(currFrameIndex),
    13276  m_pAlgorithm(VMA_NULL),
    13277  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13278  m_AllAllocations(false)
    13279 {
    13280 }
    13281 
    13282 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13283 {
    13284  vma_delete(m_hAllocator, m_pAlgorithm);
    13285 }
    13286 
    13287 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13288 {
    13289  AllocInfo info = { hAlloc, pChanged };
    13290  m_Allocations.push_back(info);
    13291 }
    13292 
    13293 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13294 {
    13295  const bool allAllocations = m_AllAllocations ||
    13296  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13297 
    13298  /********************************
    13299  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13300  ********************************/
    13301 
    13302  /*
    13303  Fast algorithm is supported only when certain criteria are met:
    13304  - VMA_DEBUG_MARGIN is 0.
    13305  - All allocations in this block vector are moveable.
    13306  - There is no possibility of image/buffer granularity conflict.
    13307  */
    13308  if(VMA_DEBUG_MARGIN == 0 &&
    13309  allAllocations &&
    13310  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13311  {
    13312  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13313  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13314  }
    13315  else
    13316  {
    13317  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13318  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13319  }
    13320 
    13321  if(allAllocations)
    13322  {
    13323  m_pAlgorithm->AddAll();
    13324  }
    13325  else
    13326  {
    13327  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13328  {
    13329  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13330  }
    13331  }
    13332 }
    13333 
    13335 // VmaDefragmentationContext
    13336 
    13337 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13338  VmaAllocator hAllocator,
    13339  uint32_t currFrameIndex,
    13340  uint32_t flags,
    13341  VmaDefragmentationStats* pStats) :
    13342  m_hAllocator(hAllocator),
    13343  m_CurrFrameIndex(currFrameIndex),
    13344  m_Flags(flags),
    13345  m_pStats(pStats),
    13346  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13347 {
    13348  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13349 }
    13350 
    13351 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13352 {
    13353  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13354  {
    13355  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13356  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13357  vma_delete(m_hAllocator, pBlockVectorCtx);
    13358  }
    13359  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13360  {
    13361  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13362  if(pBlockVectorCtx)
    13363  {
    13364  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13365  vma_delete(m_hAllocator, pBlockVectorCtx);
    13366  }
    13367  }
    13368 }
    13369 
    13370 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13371 {
    13372  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13373  {
    13374  VmaPool pool = pPools[poolIndex];
    13375  VMA_ASSERT(pool);
    13376  // Pools with algorithm other than default are not defragmented.
    13377  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13378  {
    13379  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13380 
    13381  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13382  {
    13383  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13384  {
    13385  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13386  break;
    13387  }
    13388  }
    13389 
    13390  if(!pBlockVectorDefragCtx)
    13391  {
    13392  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13393  m_hAllocator,
    13394  pool,
    13395  &pool->m_BlockVector,
    13396  m_CurrFrameIndex);
    13397  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13398  }
    13399 
    13400  pBlockVectorDefragCtx->AddAll();
    13401  }
    13402  }
    13403 }
    13404 
    13405 void VmaDefragmentationContext_T::AddAllocations(
    13406  uint32_t allocationCount,
    13407  VmaAllocation* pAllocations,
    13408  VkBool32* pAllocationsChanged)
    13409 {
    13410  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13411  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13412  {
    13413  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13414  VMA_ASSERT(hAlloc);
    13415  // DedicatedAlloc cannot be defragmented.
    13416  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13417  // Lost allocation cannot be defragmented.
    13418  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13419  {
    13420  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13421 
    13422  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13423  // This allocation belongs to custom pool.
    13424  if(hAllocPool != VK_NULL_HANDLE)
    13425  {
    13426  // Pools with algorithm other than default are not defragmented.
    13427  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13428  {
    13429  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13430  {
    13431  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13432  {
    13433  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13434  break;
    13435  }
    13436  }
    13437  if(!pBlockVectorDefragCtx)
    13438  {
    13439  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13440  m_hAllocator,
    13441  hAllocPool,
    13442  &hAllocPool->m_BlockVector,
    13443  m_CurrFrameIndex);
    13444  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13445  }
    13446  }
    13447  }
    13448  // This allocation belongs to default pool.
    13449  else
    13450  {
    13451  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13452  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13453  if(!pBlockVectorDefragCtx)
    13454  {
    13455  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13456  m_hAllocator,
    13457  VMA_NULL, // hCustomPool
    13458  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13459  m_CurrFrameIndex);
    13460  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13461  }
    13462  }
    13463 
    13464  if(pBlockVectorDefragCtx)
    13465  {
    13466  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13467  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13468  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13469  }
    13470  }
    13471  }
    13472 }
    13473 
    13474 VkResult VmaDefragmentationContext_T::Defragment(
    13475  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13476  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13477  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13478 {
    13479  if(pStats)
    13480  {
    13481  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13482  }
    13483 
    13484  if(commandBuffer == VK_NULL_HANDLE)
    13485  {
    13486  maxGpuBytesToMove = 0;
    13487  maxGpuAllocationsToMove = 0;
    13488  }
    13489 
    13490  VkResult res = VK_SUCCESS;
    13491 
    13492  // Process default pools.
    13493  for(uint32_t memTypeIndex = 0;
    13494  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13495  ++memTypeIndex)
    13496  {
    13497  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13498  if(pBlockVectorCtx)
    13499  {
    13500  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13501  pBlockVectorCtx->GetBlockVector()->Defragment(
    13502  pBlockVectorCtx,
    13503  pStats,
    13504  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13505  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13506  commandBuffer);
    13507  if(pBlockVectorCtx->res != VK_SUCCESS)
    13508  {
    13509  res = pBlockVectorCtx->res;
    13510  }
    13511  }
    13512  }
    13513 
    13514  // Process custom pools.
    13515  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13516  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13517  ++customCtxIndex)
    13518  {
    13519  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13520  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13521  pBlockVectorCtx->GetBlockVector()->Defragment(
    13522  pBlockVectorCtx,
    13523  pStats,
    13524  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13525  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13526  commandBuffer);
    13527  if(pBlockVectorCtx->res != VK_SUCCESS)
    13528  {
    13529  res = pBlockVectorCtx->res;
    13530  }
    13531  }
    13532 
    13533  return res;
    13534 }
    13535 
    13537 // VmaRecorder
    13538 
    13539 #if VMA_RECORDING_ENABLED
    13540 
    13541 VmaRecorder::VmaRecorder() :
    13542  m_UseMutex(true),
    13543  m_Flags(0),
    13544  m_File(VMA_NULL),
    13545  m_Freq(INT64_MAX),
    13546  m_StartCounter(INT64_MAX)
    13547 {
    13548 }
    13549 
    13550 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13551 {
    13552  m_UseMutex = useMutex;
    13553  m_Flags = settings.flags;
    13554 
    13555  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13556  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13557 
    13558  // Open file for writing.
    13559  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13560  if(err != 0)
    13561  {
    13562  return VK_ERROR_INITIALIZATION_FAILED;
    13563  }
    13564 
    13565  // Write header.
    13566  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13567  fprintf(m_File, "%s\n", "1,6");
    13568 
    13569  return VK_SUCCESS;
    13570 }
    13571 
    13572 VmaRecorder::~VmaRecorder()
    13573 {
    13574  if(m_File != VMA_NULL)
    13575  {
    13576  fclose(m_File);
    13577  }
    13578 }
    13579 
    13580 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13581 {
    13582  CallParams callParams;
    13583  GetBasicParams(callParams);
    13584 
    13585  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13586  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13587  Flush();
    13588 }
    13589 
    13590 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13591 {
    13592  CallParams callParams;
    13593  GetBasicParams(callParams);
    13594 
    13595  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13596  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13597  Flush();
    13598 }
    13599 
    13600 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13601 {
    13602  CallParams callParams;
    13603  GetBasicParams(callParams);
    13604 
    13605  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13606  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13607  createInfo.memoryTypeIndex,
    13608  createInfo.flags,
    13609  createInfo.blockSize,
    13610  (uint64_t)createInfo.minBlockCount,
    13611  (uint64_t)createInfo.maxBlockCount,
    13612  createInfo.frameInUseCount,
    13613  pool);
    13614  Flush();
    13615 }
    13616 
    13617 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13618 {
    13619  CallParams callParams;
    13620  GetBasicParams(callParams);
    13621 
    13622  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13623  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13624  pool);
    13625  Flush();
    13626 }
    13627 
    13628 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13629  const VkMemoryRequirements& vkMemReq,
    13630  const VmaAllocationCreateInfo& createInfo,
    13631  VmaAllocation allocation)
    13632 {
    13633  CallParams callParams;
    13634  GetBasicParams(callParams);
    13635 
    13636  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13637  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13638  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13639  vkMemReq.size,
    13640  vkMemReq.alignment,
    13641  vkMemReq.memoryTypeBits,
    13642  createInfo.flags,
    13643  createInfo.usage,
    13644  createInfo.requiredFlags,
    13645  createInfo.preferredFlags,
    13646  createInfo.memoryTypeBits,
    13647  createInfo.pool,
    13648  allocation,
    13649  userDataStr.GetString());
    13650  Flush();
    13651 }
    13652 
    13653 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13654  const VkMemoryRequirements& vkMemReq,
    13655  const VmaAllocationCreateInfo& createInfo,
    13656  uint64_t allocationCount,
    13657  const VmaAllocation* pAllocations)
    13658 {
    13659  CallParams callParams;
    13660  GetBasicParams(callParams);
    13661 
    13662  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13663  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13664  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13665  vkMemReq.size,
    13666  vkMemReq.alignment,
    13667  vkMemReq.memoryTypeBits,
    13668  createInfo.flags,
    13669  createInfo.usage,
    13670  createInfo.requiredFlags,
    13671  createInfo.preferredFlags,
    13672  createInfo.memoryTypeBits,
    13673  createInfo.pool);
    13674  PrintPointerList(allocationCount, pAllocations);
    13675  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13676  Flush();
    13677 }
    13678 
    13679 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13680  const VkMemoryRequirements& vkMemReq,
    13681  bool requiresDedicatedAllocation,
    13682  bool prefersDedicatedAllocation,
    13683  const VmaAllocationCreateInfo& createInfo,
    13684  VmaAllocation allocation)
    13685 {
    13686  CallParams callParams;
    13687  GetBasicParams(callParams);
    13688 
    13689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13690  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13691  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13692  vkMemReq.size,
    13693  vkMemReq.alignment,
    13694  vkMemReq.memoryTypeBits,
    13695  requiresDedicatedAllocation ? 1 : 0,
    13696  prefersDedicatedAllocation ? 1 : 0,
    13697  createInfo.flags,
    13698  createInfo.usage,
    13699  createInfo.requiredFlags,
    13700  createInfo.preferredFlags,
    13701  createInfo.memoryTypeBits,
    13702  createInfo.pool,
    13703  allocation,
    13704  userDataStr.GetString());
    13705  Flush();
    13706 }
    13707 
    13708 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13709  const VkMemoryRequirements& vkMemReq,
    13710  bool requiresDedicatedAllocation,
    13711  bool prefersDedicatedAllocation,
    13712  const VmaAllocationCreateInfo& createInfo,
    13713  VmaAllocation allocation)
    13714 {
    13715  CallParams callParams;
    13716  GetBasicParams(callParams);
    13717 
    13718  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13719  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13720  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13721  vkMemReq.size,
    13722  vkMemReq.alignment,
    13723  vkMemReq.memoryTypeBits,
    13724  requiresDedicatedAllocation ? 1 : 0,
    13725  prefersDedicatedAllocation ? 1 : 0,
    13726  createInfo.flags,
    13727  createInfo.usage,
    13728  createInfo.requiredFlags,
    13729  createInfo.preferredFlags,
    13730  createInfo.memoryTypeBits,
    13731  createInfo.pool,
    13732  allocation,
    13733  userDataStr.GetString());
    13734  Flush();
    13735 }
    13736 
    13737 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13738  VmaAllocation allocation)
    13739 {
    13740  CallParams callParams;
    13741  GetBasicParams(callParams);
    13742 
    13743  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13744  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13745  allocation);
    13746  Flush();
    13747 }
    13748 
    13749 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13750  uint64_t allocationCount,
    13751  const VmaAllocation* pAllocations)
    13752 {
    13753  CallParams callParams;
    13754  GetBasicParams(callParams);
    13755 
    13756  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13757  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13758  PrintPointerList(allocationCount, pAllocations);
    13759  fprintf(m_File, "\n");
    13760  Flush();
    13761 }
    13762 
    13763 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13764  VmaAllocation allocation,
    13765  const void* pUserData)
    13766 {
    13767  CallParams callParams;
    13768  GetBasicParams(callParams);
    13769 
    13770  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13771  UserDataString userDataStr(
    13772  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13773  pUserData);
    13774  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13775  allocation,
    13776  userDataStr.GetString());
    13777  Flush();
    13778 }
    13779 
    13780 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13781  VmaAllocation allocation)
    13782 {
    13783  CallParams callParams;
    13784  GetBasicParams(callParams);
    13785 
    13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13787  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13788  allocation);
    13789  Flush();
    13790 }
    13791 
    13792 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13793  VmaAllocation allocation)
    13794 {
    13795  CallParams callParams;
    13796  GetBasicParams(callParams);
    13797 
    13798  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13799  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13800  allocation);
    13801  Flush();
    13802 }
    13803 
    13804 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13805  VmaAllocation allocation)
    13806 {
    13807  CallParams callParams;
    13808  GetBasicParams(callParams);
    13809 
    13810  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13811  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13812  allocation);
    13813  Flush();
    13814 }
    13815 
    13816 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13817  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13818 {
    13819  CallParams callParams;
    13820  GetBasicParams(callParams);
    13821 
    13822  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13823  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13824  allocation,
    13825  offset,
    13826  size);
    13827  Flush();
    13828 }
    13829 
    13830 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13831  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13832 {
    13833  CallParams callParams;
    13834  GetBasicParams(callParams);
    13835 
    13836  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13837  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13838  allocation,
    13839  offset,
    13840  size);
    13841  Flush();
    13842 }
    13843 
    13844 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13845  const VkBufferCreateInfo& bufCreateInfo,
    13846  const VmaAllocationCreateInfo& allocCreateInfo,
    13847  VmaAllocation allocation)
    13848 {
    13849  CallParams callParams;
    13850  GetBasicParams(callParams);
    13851 
    13852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13853  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13854  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13855  bufCreateInfo.flags,
    13856  bufCreateInfo.size,
    13857  bufCreateInfo.usage,
    13858  bufCreateInfo.sharingMode,
    13859  allocCreateInfo.flags,
    13860  allocCreateInfo.usage,
    13861  allocCreateInfo.requiredFlags,
    13862  allocCreateInfo.preferredFlags,
    13863  allocCreateInfo.memoryTypeBits,
    13864  allocCreateInfo.pool,
    13865  allocation,
    13866  userDataStr.GetString());
    13867  Flush();
    13868 }
    13869 
    13870 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13871  const VkImageCreateInfo& imageCreateInfo,
    13872  const VmaAllocationCreateInfo& allocCreateInfo,
    13873  VmaAllocation allocation)
    13874 {
    13875  CallParams callParams;
    13876  GetBasicParams(callParams);
    13877 
    13878  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13879  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13880  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13881  imageCreateInfo.flags,
    13882  imageCreateInfo.imageType,
    13883  imageCreateInfo.format,
    13884  imageCreateInfo.extent.width,
    13885  imageCreateInfo.extent.height,
    13886  imageCreateInfo.extent.depth,
    13887  imageCreateInfo.mipLevels,
    13888  imageCreateInfo.arrayLayers,
    13889  imageCreateInfo.samples,
    13890  imageCreateInfo.tiling,
    13891  imageCreateInfo.usage,
    13892  imageCreateInfo.sharingMode,
    13893  imageCreateInfo.initialLayout,
    13894  allocCreateInfo.flags,
    13895  allocCreateInfo.usage,
    13896  allocCreateInfo.requiredFlags,
    13897  allocCreateInfo.preferredFlags,
    13898  allocCreateInfo.memoryTypeBits,
    13899  allocCreateInfo.pool,
    13900  allocation,
    13901  userDataStr.GetString());
    13902  Flush();
    13903 }
    13904 
    13905 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13906  VmaAllocation allocation)
    13907 {
    13908  CallParams callParams;
    13909  GetBasicParams(callParams);
    13910 
    13911  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13912  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13913  allocation);
    13914  Flush();
    13915 }
    13916 
    13917 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13918  VmaAllocation allocation)
    13919 {
    13920  CallParams callParams;
    13921  GetBasicParams(callParams);
    13922 
    13923  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13924  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13925  allocation);
    13926  Flush();
    13927 }
    13928 
    13929 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13930  VmaAllocation allocation)
    13931 {
    13932  CallParams callParams;
    13933  GetBasicParams(callParams);
    13934 
    13935  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13936  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13937  allocation);
    13938  Flush();
    13939 }
    13940 
    13941 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13942  VmaAllocation allocation)
    13943 {
    13944  CallParams callParams;
    13945  GetBasicParams(callParams);
    13946 
    13947  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13948  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13949  allocation);
    13950  Flush();
    13951 }
    13952 
    13953 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13954  VmaPool pool)
    13955 {
    13956  CallParams callParams;
    13957  GetBasicParams(callParams);
    13958 
    13959  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13960  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13961  pool);
    13962  Flush();
    13963 }
    13964 
    13965 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13966  const VmaDefragmentationInfo2& info,
    13968 {
    13969  CallParams callParams;
    13970  GetBasicParams(callParams);
    13971 
    13972  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13973  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13974  info.flags);
    13975  PrintPointerList(info.allocationCount, info.pAllocations);
    13976  fprintf(m_File, ",");
    13977  PrintPointerList(info.poolCount, info.pPools);
    13978  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    13979  info.maxCpuBytesToMove,
    13981  info.maxGpuBytesToMove,
    13983  info.commandBuffer,
    13984  ctx);
    13985  Flush();
    13986 }
    13987 
    13988 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    13990 {
    13991  CallParams callParams;
    13992  GetBasicParams(callParams);
    13993 
    13994  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13995  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    13996  ctx);
    13997  Flush();
    13998 }
    13999 
    14000 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14001 {
    14002  if(pUserData != VMA_NULL)
    14003  {
    14004  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14005  {
    14006  m_Str = (const char*)pUserData;
    14007  }
    14008  else
    14009  {
    14010  sprintf_s(m_PtrStr, "%p", pUserData);
    14011  m_Str = m_PtrStr;
    14012  }
    14013  }
    14014  else
    14015  {
    14016  m_Str = "";
    14017  }
    14018 }
    14019 
    14020 void VmaRecorder::WriteConfiguration(
    14021  const VkPhysicalDeviceProperties& devProps,
    14022  const VkPhysicalDeviceMemoryProperties& memProps,
    14023  bool dedicatedAllocationExtensionEnabled,
    14024  bool bindMemory2ExtensionEnabled)
    14025 {
    14026  fprintf(m_File, "Config,Begin\n");
    14027 
    14028  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14029  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14030  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14031  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14032  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14033  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14034 
    14035  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14036  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14037  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14038 
    14039  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14040  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14041  {
    14042  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14043  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14044  }
    14045  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14046  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14047  {
    14048  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14049  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14050  }
    14051 
    14052  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14053  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
    14054 
    14055  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14056  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14057  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14058  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14059  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14060  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14061  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14062  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14063  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14064 
    14065  fprintf(m_File, "Config,End\n");
    14066 }
    14067 
    14068 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14069 {
    14070  outParams.threadId = GetCurrentThreadId();
    14071 
    14072  LARGE_INTEGER counter;
    14073  QueryPerformanceCounter(&counter);
    14074  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14075 }
    14076 
    14077 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14078 {
    14079  if(count)
    14080  {
    14081  fprintf(m_File, "%p", pItems[0]);
    14082  for(uint64_t i = 1; i < count; ++i)
    14083  {
    14084  fprintf(m_File, " %p", pItems[i]);
    14085  }
    14086  }
    14087 }
    14088 
    14089 void VmaRecorder::Flush()
    14090 {
    14091  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14092  {
    14093  fflush(m_File);
    14094  }
    14095 }
    14096 
    14097 #endif // #if VMA_RECORDING_ENABLED
    14098 
    14100 // VmaAllocationObjectAllocator
    14101 
    14102 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14103  m_Allocator(pAllocationCallbacks, 1024)
    14104 {
    14105 }
    14106 
    14107 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14108 {
    14109  VmaMutexLock mutexLock(m_Mutex);
    14110  return m_Allocator.Alloc();
    14111 }
    14112 
    14113 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14114 {
    14115  VmaMutexLock mutexLock(m_Mutex);
    14116  m_Allocator.Free(hAlloc);
    14117 }
    14118 
    14120 // VmaAllocator_T
    14121 
    14122 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14123  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14124  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14125  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
    14126  m_hDevice(pCreateInfo->device),
    14127  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14128  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14129  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14130  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14131  m_PreferredLargeHeapBlockSize(0),
    14132  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14133  m_CurrentFrameIndex(0),
    14134  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14135  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14136  m_NextPoolId(0)
    14138  ,m_pRecorder(VMA_NULL)
    14139 #endif
    14140 {
    14141  if(VMA_DEBUG_DETECT_CORRUPTION)
    14142  {
    14143  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14144  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14145  }
    14146 
    14147  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14148 
    14149 #if !(VMA_DEDICATED_ALLOCATION)
    14151  {
    14152  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14153  }
    14154 #endif
    14155 #if !(VMA_BIND_MEMORY2)
    14156  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
    14157  {
    14158  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
    14159  }
    14160 #endif
    14161 
    14162  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14163  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14164  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14165 
    14166  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14167  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14168 
    14169  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14170  {
    14171  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14172  }
    14173 
    14174  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14175  {
    14176  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14177  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14178  }
    14179 
    14180  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14181 
    14182  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14183  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14184 
    14185  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14186  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14187  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14188  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14189 
    14190  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14191  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14192 
    14193  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14194  {
    14195  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14196  {
    14197  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14198  if(limit != VK_WHOLE_SIZE)
    14199  {
    14200  m_HeapSizeLimit[heapIndex] = limit;
    14201  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14202  {
    14203  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14204  }
    14205  }
    14206  }
    14207  }
    14208 
    14209  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14210  {
    14211  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14212 
    14213  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14214  this,
    14215  VK_NULL_HANDLE, // hParentPool
    14216  memTypeIndex,
    14217  preferredBlockSize,
    14218  0,
    14219  SIZE_MAX,
    14220  GetBufferImageGranularity(),
    14221  pCreateInfo->frameInUseCount,
    14222  false, // isCustomPool
    14223  false, // explicitBlockSize
    14224  false); // linearAlgorithm
    14225  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14226  // becase minBlockCount is 0.
    14227  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14228 
    14229  }
    14230 }
    14231 
    14232 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14233 {
    14234  VkResult res = VK_SUCCESS;
    14235 
    14236  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14237  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14238  {
    14239 #if VMA_RECORDING_ENABLED
    14240  m_pRecorder = vma_new(this, VmaRecorder)();
    14241  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14242  if(res != VK_SUCCESS)
    14243  {
    14244  return res;
    14245  }
    14246  m_pRecorder->WriteConfiguration(
    14247  m_PhysicalDeviceProperties,
    14248  m_MemProps,
    14249  m_UseKhrDedicatedAllocation,
    14250  m_UseKhrBindMemory2);
    14251  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14252 #else
    14253  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14254  return VK_ERROR_FEATURE_NOT_PRESENT;
    14255 #endif
    14256  }
    14257 
    14258  return res;
    14259 }
    14260 
    14261 VmaAllocator_T::~VmaAllocator_T()
    14262 {
    14263 #if VMA_RECORDING_ENABLED
    14264  if(m_pRecorder != VMA_NULL)
    14265  {
    14266  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14267  vma_delete(this, m_pRecorder);
    14268  }
    14269 #endif
    14270 
    14271  VMA_ASSERT(m_Pools.empty());
    14272 
    14273  for(size_t i = GetMemoryTypeCount(); i--; )
    14274  {
    14275  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14276  {
    14277  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14278  }
    14279 
    14280  vma_delete(this, m_pDedicatedAllocations[i]);
    14281  vma_delete(this, m_pBlockVectors[i]);
    14282  }
    14283 }
    14284 
    14285 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14286 {
    14287 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14288  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14289  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14290  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14291  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14292  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14293  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14294  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14295  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14296  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14297  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14298  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14299  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14300  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14301  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14302  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14303  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14304  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14305 #if VMA_DEDICATED_ALLOCATION
    14306  if(m_UseKhrDedicatedAllocation)
    14307  {
    14308  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14309  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14310  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14311  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14312  }
    14313 #endif // #if VMA_DEDICATED_ALLOCATION
    14314 #if VMA_BIND_MEMORY2
    14315  if(m_UseKhrBindMemory2)
    14316  {
    14317  m_VulkanFunctions.vkBindBufferMemory2KHR =
    14318  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
    14319  m_VulkanFunctions.vkBindImageMemory2KHR =
    14320  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
    14321  }
    14322 #endif // #if VMA_BIND_MEMORY2
    14323 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14324 
    14325 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14326  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14327 
    14328  if(pVulkanFunctions != VMA_NULL)
    14329  {
    14330  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14331  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14332  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14333  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14334  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14335  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14336  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14337  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14338  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14339  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14340  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14341  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14342  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14343  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14344  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14345  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14346  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14347 #if VMA_DEDICATED_ALLOCATION
    14348  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14349  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14350 #endif
    14351 #if VMA_BIND_MEMORY2
    14352  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
    14353  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
    14354 #endif
    14355  }
    14356 
    14357 #undef VMA_COPY_IF_NOT_NULL
    14358 
    14359  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14360  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14361  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14362  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14363  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14364  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14365  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14366  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14367  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14368  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14369  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14370  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14371  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14372  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14373  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14374  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14375  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14376  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14377  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14378 #if VMA_DEDICATED_ALLOCATION
    14379  if(m_UseKhrDedicatedAllocation)
    14380  {
    14381  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14382  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14383  }
    14384 #endif
    14385 #if VMA_BIND_MEMORY2
    14386  if(m_UseKhrBindMemory2)
    14387  {
    14388  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
    14389  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
    14390  }
    14391 #endif
    14392 }
    14393 
    14394 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14395 {
    14396  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14397  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14398  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14399  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14400 }
    14401 
    14402 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14403  VkDeviceSize size,
    14404  VkDeviceSize alignment,
    14405  bool dedicatedAllocation,
    14406  VkBuffer dedicatedBuffer,
    14407  VkImage dedicatedImage,
    14408  const VmaAllocationCreateInfo& createInfo,
    14409  uint32_t memTypeIndex,
    14410  VmaSuballocationType suballocType,
    14411  size_t allocationCount,
    14412  VmaAllocation* pAllocations)
    14413 {
    14414  VMA_ASSERT(pAllocations != VMA_NULL);
    14415  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14416 
    14417  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14418 
    14419  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14420  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14421  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14422  {
    14423  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14424  }
    14425 
    14426  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14427  VMA_ASSERT(blockVector);
    14428 
    14429  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14430  bool preferDedicatedMemory =
    14431  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14432  dedicatedAllocation ||
    14433  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14434  size > preferredBlockSize / 2;
    14435 
    14436  if(preferDedicatedMemory &&
    14437  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14438  finalCreateInfo.pool == VK_NULL_HANDLE)
    14439  {
    14441  }
    14442 
    14443  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14444  {
    14445  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14446  {
    14447  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14448  }
    14449  else
    14450  {
    14451  return AllocateDedicatedMemory(
    14452  size,
    14453  suballocType,
    14454  memTypeIndex,
    14455  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14456  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14457  finalCreateInfo.pUserData,
    14458  dedicatedBuffer,
    14459  dedicatedImage,
    14460  allocationCount,
    14461  pAllocations);
    14462  }
    14463  }
    14464  else
    14465  {
    14466  VkResult res = blockVector->Allocate(
    14467  m_CurrentFrameIndex.load(),
    14468  size,
    14469  alignment,
    14470  finalCreateInfo,
    14471  suballocType,
    14472  allocationCount,
    14473  pAllocations);
    14474  if(res == VK_SUCCESS)
    14475  {
    14476  return res;
    14477  }
    14478 
    14479  // 5. Try dedicated memory.
    14480  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14481  {
    14482  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14483  }
    14484  else
    14485  {
    14486  res = AllocateDedicatedMemory(
    14487  size,
    14488  suballocType,
    14489  memTypeIndex,
    14490  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14491  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14492  finalCreateInfo.pUserData,
    14493  dedicatedBuffer,
    14494  dedicatedImage,
    14495  allocationCount,
    14496  pAllocations);
    14497  if(res == VK_SUCCESS)
    14498  {
    14499  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14500  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14501  return VK_SUCCESS;
    14502  }
    14503  else
    14504  {
    14505  // Everything failed: Return error code.
    14506  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14507  return res;
    14508  }
    14509  }
    14510  }
    14511 }
    14512 
    14513 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14514  VkDeviceSize size,
    14515  VmaSuballocationType suballocType,
    14516  uint32_t memTypeIndex,
    14517  bool map,
    14518  bool isUserDataString,
    14519  void* pUserData,
    14520  VkBuffer dedicatedBuffer,
    14521  VkImage dedicatedImage,
    14522  size_t allocationCount,
    14523  VmaAllocation* pAllocations)
    14524 {
    14525  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14526 
    14527  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14528  allocInfo.memoryTypeIndex = memTypeIndex;
    14529  allocInfo.allocationSize = size;
    14530 
    14531 #if VMA_DEDICATED_ALLOCATION
    14532  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14533  if(m_UseKhrDedicatedAllocation)
    14534  {
    14535  if(dedicatedBuffer != VK_NULL_HANDLE)
    14536  {
    14537  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14538  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14539  allocInfo.pNext = &dedicatedAllocInfo;
    14540  }
    14541  else if(dedicatedImage != VK_NULL_HANDLE)
    14542  {
    14543  dedicatedAllocInfo.image = dedicatedImage;
    14544  allocInfo.pNext = &dedicatedAllocInfo;
    14545  }
    14546  }
    14547 #endif // #if VMA_DEDICATED_ALLOCATION
    14548 
    14549  size_t allocIndex;
    14550  VkResult res = VK_SUCCESS;
    14551  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14552  {
    14553  res = AllocateDedicatedMemoryPage(
    14554  size,
    14555  suballocType,
    14556  memTypeIndex,
    14557  allocInfo,
    14558  map,
    14559  isUserDataString,
    14560  pUserData,
    14561  pAllocations + allocIndex);
    14562  if(res != VK_SUCCESS)
    14563  {
    14564  break;
    14565  }
    14566  }
    14567 
    14568  if(res == VK_SUCCESS)
    14569  {
    14570  // Register them in m_pDedicatedAllocations.
    14571  {
    14572  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14573  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14574  VMA_ASSERT(pDedicatedAllocations);
    14575  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14576  {
    14577  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14578  }
    14579  }
    14580 
    14581  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14582  }
    14583  else
    14584  {
    14585  // Free all already created allocations.
    14586  while(allocIndex--)
    14587  {
    14588  VmaAllocation currAlloc = pAllocations[allocIndex];
    14589  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14590 
    14591  /*
    14592  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14593  before vkFreeMemory.
    14594 
    14595  if(currAlloc->GetMappedData() != VMA_NULL)
    14596  {
    14597  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14598  }
    14599  */
    14600 
    14601  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14602 
    14603  currAlloc->SetUserData(this, VMA_NULL);
    14604  currAlloc->Dtor();
    14605  m_AllocationObjectAllocator.Free(currAlloc);
    14606  }
    14607 
    14608  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14609  }
    14610 
    14611  return res;
    14612 }
    14613 
    14614 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14615  VkDeviceSize size,
    14616  VmaSuballocationType suballocType,
    14617  uint32_t memTypeIndex,
    14618  const VkMemoryAllocateInfo& allocInfo,
    14619  bool map,
    14620  bool isUserDataString,
    14621  void* pUserData,
    14622  VmaAllocation* pAllocation)
    14623 {
    14624  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14625  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14626  if(res < 0)
    14627  {
    14628  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14629  return res;
    14630  }
    14631 
    14632  void* pMappedData = VMA_NULL;
    14633  if(map)
    14634  {
    14635  res = (*m_VulkanFunctions.vkMapMemory)(
    14636  m_hDevice,
    14637  hMemory,
    14638  0,
    14639  VK_WHOLE_SIZE,
    14640  0,
    14641  &pMappedData);
    14642  if(res < 0)
    14643  {
    14644  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14645  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14646  return res;
    14647  }
    14648  }
    14649 
    14650  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14651  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14652  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14653  (*pAllocation)->SetUserData(this, pUserData);
    14654  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14655  {
    14656  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14657  }
    14658 
    14659  return VK_SUCCESS;
    14660 }
    14661 
    14662 void VmaAllocator_T::GetBufferMemoryRequirements(
    14663  VkBuffer hBuffer,
    14664  VkMemoryRequirements& memReq,
    14665  bool& requiresDedicatedAllocation,
    14666  bool& prefersDedicatedAllocation) const
    14667 {
    14668 #if VMA_DEDICATED_ALLOCATION
    14669  if(m_UseKhrDedicatedAllocation)
    14670  {
    14671  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14672  memReqInfo.buffer = hBuffer;
    14673 
    14674  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14675 
    14676  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14677  memReq2.pNext = &memDedicatedReq;
    14678 
    14679  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14680 
    14681  memReq = memReq2.memoryRequirements;
    14682  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14683  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14684  }
    14685  else
    14686 #endif // #if VMA_DEDICATED_ALLOCATION
    14687  {
    14688  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14689  requiresDedicatedAllocation = false;
    14690  prefersDedicatedAllocation = false;
    14691  }
    14692 }
    14693 
    14694 void VmaAllocator_T::GetImageMemoryRequirements(
    14695  VkImage hImage,
    14696  VkMemoryRequirements& memReq,
    14697  bool& requiresDedicatedAllocation,
    14698  bool& prefersDedicatedAllocation) const
    14699 {
    14700 #if VMA_DEDICATED_ALLOCATION
    14701  if(m_UseKhrDedicatedAllocation)
    14702  {
    14703  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14704  memReqInfo.image = hImage;
    14705 
    14706  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14707 
    14708  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14709  memReq2.pNext = &memDedicatedReq;
    14710 
    14711  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14712 
    14713  memReq = memReq2.memoryRequirements;
    14714  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14715  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14716  }
    14717  else
    14718 #endif // #if VMA_DEDICATED_ALLOCATION
    14719  {
    14720  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14721  requiresDedicatedAllocation = false;
    14722  prefersDedicatedAllocation = false;
    14723  }
    14724 }
    14725 
    14726 VkResult VmaAllocator_T::AllocateMemory(
    14727  const VkMemoryRequirements& vkMemReq,
    14728  bool requiresDedicatedAllocation,
    14729  bool prefersDedicatedAllocation,
    14730  VkBuffer dedicatedBuffer,
    14731  VkImage dedicatedImage,
    14732  const VmaAllocationCreateInfo& createInfo,
    14733  VmaSuballocationType suballocType,
    14734  size_t allocationCount,
    14735  VmaAllocation* pAllocations)
    14736 {
    14737  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14738 
    14739  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14740 
    14741  if(vkMemReq.size == 0)
    14742  {
    14743  return VK_ERROR_VALIDATION_FAILED_EXT;
    14744  }
    14745  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14746  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14747  {
    14748  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14749  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14750  }
    14751  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14753  {
    14754  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14755  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14756  }
    14757  if(requiresDedicatedAllocation)
    14758  {
    14759  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14760  {
    14761  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14763  }
    14764  if(createInfo.pool != VK_NULL_HANDLE)
    14765  {
    14766  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14767  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14768  }
    14769  }
    14770  if((createInfo.pool != VK_NULL_HANDLE) &&
    14771  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14772  {
    14773  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14774  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14775  }
    14776 
    14777  if(createInfo.pool != VK_NULL_HANDLE)
    14778  {
    14779  const VkDeviceSize alignmentForPool = VMA_MAX(
    14780  vkMemReq.alignment,
    14781  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14782 
    14783  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14784  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14785  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14786  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14787  {
    14788  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14789  }
    14790 
    14791  return createInfo.pool->m_BlockVector.Allocate(
    14792  m_CurrentFrameIndex.load(),
    14793  vkMemReq.size,
    14794  alignmentForPool,
    14795  createInfoForPool,
    14796  suballocType,
    14797  allocationCount,
    14798  pAllocations);
    14799  }
    14800  else
    14801  {
    14802  // Bit mask of memory Vulkan types acceptable for this allocation.
    14803  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14804  uint32_t memTypeIndex = UINT32_MAX;
    14805  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14806  if(res == VK_SUCCESS)
    14807  {
    14808  VkDeviceSize alignmentForMemType = VMA_MAX(
    14809  vkMemReq.alignment,
    14810  GetMemoryTypeMinAlignment(memTypeIndex));
    14811 
    14812  res = AllocateMemoryOfType(
    14813  vkMemReq.size,
    14814  alignmentForMemType,
    14815  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14816  dedicatedBuffer,
    14817  dedicatedImage,
    14818  createInfo,
    14819  memTypeIndex,
    14820  suballocType,
    14821  allocationCount,
    14822  pAllocations);
    14823  // Succeeded on first try.
    14824  if(res == VK_SUCCESS)
    14825  {
    14826  return res;
    14827  }
    14828  // Allocation from this memory type failed. Try other compatible memory types.
    14829  else
    14830  {
    14831  for(;;)
    14832  {
    14833  // Remove old memTypeIndex from list of possibilities.
    14834  memoryTypeBits &= ~(1u << memTypeIndex);
    14835  // Find alternative memTypeIndex.
    14836  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14837  if(res == VK_SUCCESS)
    14838  {
    14839  alignmentForMemType = VMA_MAX(
    14840  vkMemReq.alignment,
    14841  GetMemoryTypeMinAlignment(memTypeIndex));
    14842 
    14843  res = AllocateMemoryOfType(
    14844  vkMemReq.size,
    14845  alignmentForMemType,
    14846  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14847  dedicatedBuffer,
    14848  dedicatedImage,
    14849  createInfo,
    14850  memTypeIndex,
    14851  suballocType,
    14852  allocationCount,
    14853  pAllocations);
    14854  // Allocation from this alternative memory type succeeded.
    14855  if(res == VK_SUCCESS)
    14856  {
    14857  return res;
    14858  }
    14859  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14860  }
    14861  // No other matching memory type index could be found.
    14862  else
    14863  {
    14864  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14865  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14866  }
    14867  }
    14868  }
    14869  }
    14870  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14871  else
    14872  return res;
    14873  }
    14874 }
    14875 
    14876 void VmaAllocator_T::FreeMemory(
    14877  size_t allocationCount,
    14878  const VmaAllocation* pAllocations)
    14879 {
    14880  VMA_ASSERT(pAllocations);
    14881 
    14882  for(size_t allocIndex = allocationCount; allocIndex--; )
    14883  {
    14884  VmaAllocation allocation = pAllocations[allocIndex];
    14885 
    14886  if(allocation != VK_NULL_HANDLE)
    14887  {
    14888  if(TouchAllocation(allocation))
    14889  {
    14890  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14891  {
    14892  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14893  }
    14894 
    14895  switch(allocation->GetType())
    14896  {
    14897  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14898  {
    14899  VmaBlockVector* pBlockVector = VMA_NULL;
    14900  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14901  if(hPool != VK_NULL_HANDLE)
    14902  {
    14903  pBlockVector = &hPool->m_BlockVector;
    14904  }
    14905  else
    14906  {
    14907  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14908  pBlockVector = m_pBlockVectors[memTypeIndex];
    14909  }
    14910  pBlockVector->Free(allocation);
    14911  }
    14912  break;
    14913  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14914  FreeDedicatedMemory(allocation);
    14915  break;
    14916  default:
    14917  VMA_ASSERT(0);
    14918  }
    14919  }
    14920 
    14921  allocation->SetUserData(this, VMA_NULL);
    14922  allocation->Dtor();
    14923  m_AllocationObjectAllocator.Free(allocation);
    14924  }
    14925  }
    14926 }
    14927 
    14928 VkResult VmaAllocator_T::ResizeAllocation(
    14929  const VmaAllocation alloc,
    14930  VkDeviceSize newSize)
    14931 {
    14932  // This function is deprecated and so it does nothing. It's left for backward compatibility.
    14933  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14934  {
    14935  return VK_ERROR_VALIDATION_FAILED_EXT;
    14936  }
    14937  if(newSize == alloc->GetSize())
    14938  {
    14939  return VK_SUCCESS;
    14940  }
    14941  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14942 }
    14943 
    14944 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14945 {
    14946  // Initialize.
    14947  InitStatInfo(pStats->total);
    14948  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14949  InitStatInfo(pStats->memoryType[i]);
    14950  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14951  InitStatInfo(pStats->memoryHeap[i]);
    14952 
    14953  // Process default pools.
    14954  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14955  {
    14956  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14957  VMA_ASSERT(pBlockVector);
    14958  pBlockVector->AddStats(pStats);
    14959  }
    14960 
    14961  // Process custom pools.
    14962  {
    14963  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14964  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14965  {
    14966  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14967  }
    14968  }
    14969 
    14970  // Process dedicated allocations.
    14971  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14972  {
    14973  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14974  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14975  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14976  VMA_ASSERT(pDedicatedAllocVector);
    14977  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14978  {
    14979  VmaStatInfo allocationStatInfo;
    14980  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14981  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14982  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14983  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14984  }
    14985  }
    14986 
    14987  // Postprocess.
    14988  VmaPostprocessCalcStatInfo(pStats->total);
    14989  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14990  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14991  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14992  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14993 }
    14994 
    14995 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14996 
    14997 VkResult VmaAllocator_T::DefragmentationBegin(
    14998  const VmaDefragmentationInfo2& info,
    14999  VmaDefragmentationStats* pStats,
    15000  VmaDefragmentationContext* pContext)
    15001 {
    15002  if(info.pAllocationsChanged != VMA_NULL)
    15003  {
    15004  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15005  }
    15006 
    15007  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15008  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15009 
    15010  (*pContext)->AddPools(info.poolCount, info.pPools);
    15011  (*pContext)->AddAllocations(
    15013 
    15014  VkResult res = (*pContext)->Defragment(
    15017  info.commandBuffer, pStats);
    15018 
    15019  if(res != VK_NOT_READY)
    15020  {
    15021  vma_delete(this, *pContext);
    15022  *pContext = VMA_NULL;
    15023  }
    15024 
    15025  return res;
    15026 }
    15027 
    15028 VkResult VmaAllocator_T::DefragmentationEnd(
    15029  VmaDefragmentationContext context)
    15030 {
    15031  vma_delete(this, context);
    15032  return VK_SUCCESS;
    15033 }
    15034 
    15035 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15036 {
    15037  if(hAllocation->CanBecomeLost())
    15038  {
    15039  /*
    15040  Warning: This is a carefully designed algorithm.
    15041  Do not modify unless you really know what you're doing :)
    15042  */
    15043  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15044  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15045  for(;;)
    15046  {
    15047  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15048  {
    15049  pAllocationInfo->memoryType = UINT32_MAX;
    15050  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15051  pAllocationInfo->offset = 0;
    15052  pAllocationInfo->size = hAllocation->GetSize();
    15053  pAllocationInfo->pMappedData = VMA_NULL;
    15054  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15055  return;
    15056  }
    15057  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15058  {
    15059  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15060  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15061  pAllocationInfo->offset = hAllocation->GetOffset();
    15062  pAllocationInfo->size = hAllocation->GetSize();
    15063  pAllocationInfo->pMappedData = VMA_NULL;
    15064  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15065  return;
    15066  }
    15067  else // Last use time earlier than current time.
    15068  {
    15069  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15070  {
    15071  localLastUseFrameIndex = localCurrFrameIndex;
    15072  }
    15073  }
    15074  }
    15075  }
    15076  else
    15077  {
    15078 #if VMA_STATS_STRING_ENABLED
    15079  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15080  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15081  for(;;)
    15082  {
    15083  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15084  if(localLastUseFrameIndex == localCurrFrameIndex)
    15085  {
    15086  break;
    15087  }
    15088  else // Last use time earlier than current time.
    15089  {
    15090  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15091  {
    15092  localLastUseFrameIndex = localCurrFrameIndex;
    15093  }
    15094  }
    15095  }
    15096 #endif
    15097 
    15098  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15099  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15100  pAllocationInfo->offset = hAllocation->GetOffset();
    15101  pAllocationInfo->size = hAllocation->GetSize();
    15102  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15103  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15104  }
    15105 }
    15106 
    15107 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15108 {
    15109  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15110  if(hAllocation->CanBecomeLost())
    15111  {
    15112  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15113  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15114  for(;;)
    15115  {
    15116  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15117  {
    15118  return false;
    15119  }
    15120  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15121  {
    15122  return true;
    15123  }
    15124  else // Last use time earlier than current time.
    15125  {
    15126  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15127  {
    15128  localLastUseFrameIndex = localCurrFrameIndex;
    15129  }
    15130  }
    15131  }
    15132  }
    15133  else
    15134  {
    15135 #if VMA_STATS_STRING_ENABLED
    15136  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15137  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15138  for(;;)
    15139  {
    15140  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15141  if(localLastUseFrameIndex == localCurrFrameIndex)
    15142  {
    15143  break;
    15144  }
    15145  else // Last use time earlier than current time.
    15146  {
    15147  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15148  {
    15149  localLastUseFrameIndex = localCurrFrameIndex;
    15150  }
    15151  }
    15152  }
    15153 #endif
    15154 
    15155  return true;
    15156  }
    15157 }
    15158 
    15159 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15160 {
    15161  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15162 
    15163  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15164 
    15165  if(newCreateInfo.maxBlockCount == 0)
    15166  {
    15167  newCreateInfo.maxBlockCount = SIZE_MAX;
    15168  }
    15169  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15170  {
    15171  return VK_ERROR_INITIALIZATION_FAILED;
    15172  }
    15173 
    15174  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15175 
    15176  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15177 
    15178  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15179  if(res != VK_SUCCESS)
    15180  {
    15181  vma_delete(this, *pPool);
    15182  *pPool = VMA_NULL;
    15183  return res;
    15184  }
    15185 
    15186  // Add to m_Pools.
    15187  {
    15188  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15189  (*pPool)->SetId(m_NextPoolId++);
    15190  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15191  }
    15192 
    15193  return VK_SUCCESS;
    15194 }
    15195 
    15196 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15197 {
    15198  // Remove from m_Pools.
    15199  {
    15200  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15201  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15202  VMA_ASSERT(success && "Pool not found in Allocator.");
    15203  }
    15204 
    15205  vma_delete(this, pool);
    15206 }
    15207 
    15208 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15209 {
    15210  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15211 }
    15212 
    15213 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15214 {
    15215  m_CurrentFrameIndex.store(frameIndex);
    15216 }
    15217 
    15218 void VmaAllocator_T::MakePoolAllocationsLost(
    15219  VmaPool hPool,
    15220  size_t* pLostAllocationCount)
    15221 {
    15222  hPool->m_BlockVector.MakePoolAllocationsLost(
    15223  m_CurrentFrameIndex.load(),
    15224  pLostAllocationCount);
    15225 }
    15226 
    15227 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15228 {
    15229  return hPool->m_BlockVector.CheckCorruption();
    15230 }
    15231 
    15232 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15233 {
    15234  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15235 
    15236  // Process default pools.
    15237  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15238  {
    15239  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15240  {
    15241  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15242  VMA_ASSERT(pBlockVector);
    15243  VkResult localRes = pBlockVector->CheckCorruption();
    15244  switch(localRes)
    15245  {
    15246  case VK_ERROR_FEATURE_NOT_PRESENT:
    15247  break;
    15248  case VK_SUCCESS:
    15249  finalRes = VK_SUCCESS;
    15250  break;
    15251  default:
    15252  return localRes;
    15253  }
    15254  }
    15255  }
    15256 
    15257  // Process custom pools.
    15258  {
    15259  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15260  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15261  {
    15262  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15263  {
    15264  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15265  switch(localRes)
    15266  {
    15267  case VK_ERROR_FEATURE_NOT_PRESENT:
    15268  break;
    15269  case VK_SUCCESS:
    15270  finalRes = VK_SUCCESS;
    15271  break;
    15272  default:
    15273  return localRes;
    15274  }
    15275  }
    15276  }
    15277  }
    15278 
    15279  return finalRes;
    15280 }
    15281 
    15282 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15283 {
    15284  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15285  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15286  (*pAllocation)->InitLost();
    15287 }
    15288 
    15289 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15290 {
    15291  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15292 
    15293  VkResult res;
    15294  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15295  {
    15296  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15297  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15298  {
    15299  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15300  if(res == VK_SUCCESS)
    15301  {
    15302  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15303  }
    15304  }
    15305  else
    15306  {
    15307  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15308  }
    15309  }
    15310  else
    15311  {
    15312  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15313  }
    15314 
    15315  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15316  {
    15317  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15318  }
    15319 
    15320  return res;
    15321 }
    15322 
    15323 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15324 {
    15325  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15326  {
    15327  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15328  }
    15329 
    15330  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15331 
    15332  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15333  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15334  {
    15335  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15336  m_HeapSizeLimit[heapIndex] += size;
    15337  }
    15338 }
    15339 
    15340 VkResult VmaAllocator_T::BindVulkanBuffer(
    15341  VkDeviceMemory memory,
    15342  VkDeviceSize memoryOffset,
    15343  VkBuffer buffer,
    15344  const void* pNext)
    15345 {
    15346  if(pNext != VMA_NULL)
    15347  {
    15348 #if VMA_BIND_MEMORY2
    15349  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
    15350  {
    15351  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
    15352  bindBufferMemoryInfo.pNext = pNext;
    15353  bindBufferMemoryInfo.buffer = buffer;
    15354  bindBufferMemoryInfo.memory = memory;
    15355  bindBufferMemoryInfo.memoryOffset = memoryOffset;
    15356  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
    15357  }
    15358  else
    15359 #endif // #if VMA_BIND_MEMORY2
    15360  {
    15361  return VK_ERROR_EXTENSION_NOT_PRESENT;
    15362  }
    15363  }
    15364  else
    15365  {
    15366  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
    15367  }
    15368 }
    15369 
    15370 VkResult VmaAllocator_T::BindVulkanImage(
    15371  VkDeviceMemory memory,
    15372  VkDeviceSize memoryOffset,
    15373  VkImage image,
    15374  const void* pNext)
    15375 {
    15376  if(pNext != VMA_NULL)
    15377  {
    15378 #if VMA_BIND_MEMORY2
    15379  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
    15380  {
    15381  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
    15382  bindBufferMemoryInfo.pNext = pNext;
    15383  bindBufferMemoryInfo.image = image;
    15384  bindBufferMemoryInfo.memory = memory;
    15385  bindBufferMemoryInfo.memoryOffset = memoryOffset;
    15386  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
    15387  }
    15388  else
    15389 #endif // #if VMA_BIND_MEMORY2
    15390  {
    15391  return VK_ERROR_EXTENSION_NOT_PRESENT;
    15392  }
    15393  }
    15394  else
    15395  {
    15396  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
    15397  }
    15398 }
    15399 
    15400 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15401 {
    15402  if(hAllocation->CanBecomeLost())
    15403  {
    15404  return VK_ERROR_MEMORY_MAP_FAILED;
    15405  }
    15406 
    15407  switch(hAllocation->GetType())
    15408  {
    15409  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15410  {
    15411  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15412  char *pBytes = VMA_NULL;
    15413  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15414  if(res == VK_SUCCESS)
    15415  {
    15416  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15417  hAllocation->BlockAllocMap();
    15418  }
    15419  return res;
    15420  }
    15421  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15422  return hAllocation->DedicatedAllocMap(this, ppData);
    15423  default:
    15424  VMA_ASSERT(0);
    15425  return VK_ERROR_MEMORY_MAP_FAILED;
    15426  }
    15427 }
    15428 
    15429 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15430 {
    15431  switch(hAllocation->GetType())
    15432  {
    15433  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15434  {
    15435  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15436  hAllocation->BlockAllocUnmap();
    15437  pBlock->Unmap(this, 1);
    15438  }
    15439  break;
    15440  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15441  hAllocation->DedicatedAllocUnmap(this);
    15442  break;
    15443  default:
    15444  VMA_ASSERT(0);
    15445  }
    15446 }
    15447 
    15448 VkResult VmaAllocator_T::BindBufferMemory(
    15449  VmaAllocation hAllocation,
    15450  VkDeviceSize allocationLocalOffset,
    15451  VkBuffer hBuffer,
    15452  const void* pNext)
    15453 {
    15454  VkResult res = VK_SUCCESS;
    15455  switch(hAllocation->GetType())
    15456  {
    15457  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15458  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
    15459  break;
    15460  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15461  {
    15462  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15463  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15464  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
    15465  break;
    15466  }
    15467  default:
    15468  VMA_ASSERT(0);
    15469  }
    15470  return res;
    15471 }
    15472 
    15473 VkResult VmaAllocator_T::BindImageMemory(
    15474  VmaAllocation hAllocation,
    15475  VkDeviceSize allocationLocalOffset,
    15476  VkImage hImage,
    15477  const void* pNext)
    15478 {
    15479  VkResult res = VK_SUCCESS;
    15480  switch(hAllocation->GetType())
    15481  {
    15482  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15483  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
    15484  break;
    15485  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15486  {
    15487  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15488  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15489  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
    15490  break;
    15491  }
    15492  default:
    15493  VMA_ASSERT(0);
    15494  }
    15495  return res;
    15496 }
    15497 
    15498 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15499  VmaAllocation hAllocation,
    15500  VkDeviceSize offset, VkDeviceSize size,
    15501  VMA_CACHE_OPERATION op)
    15502 {
    15503  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15504  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15505  {
    15506  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15507  VMA_ASSERT(offset <= allocationSize);
    15508 
    15509  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15510 
    15511  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15512  memRange.memory = hAllocation->GetMemory();
    15513 
    15514  switch(hAllocation->GetType())
    15515  {
    15516  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15517  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15518  if(size == VK_WHOLE_SIZE)
    15519  {
    15520  memRange.size = allocationSize - memRange.offset;
    15521  }
    15522  else
    15523  {
    15524  VMA_ASSERT(offset + size <= allocationSize);
    15525  memRange.size = VMA_MIN(
    15526  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15527  allocationSize - memRange.offset);
    15528  }
    15529  break;
    15530 
    15531  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15532  {
    15533  // 1. Still within this allocation.
    15534  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15535  if(size == VK_WHOLE_SIZE)
    15536  {
    15537  size = allocationSize - offset;
    15538  }
    15539  else
    15540  {
    15541  VMA_ASSERT(offset + size <= allocationSize);
    15542  }
    15543  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15544 
    15545  // 2. Adjust to whole block.
    15546  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15547  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15548  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15549  memRange.offset += allocationOffset;
    15550  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15551 
    15552  break;
    15553  }
    15554 
    15555  default:
    15556  VMA_ASSERT(0);
    15557  }
    15558 
    15559  switch(op)
    15560  {
    15561  case VMA_CACHE_FLUSH:
    15562  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15563  break;
    15564  case VMA_CACHE_INVALIDATE:
    15565  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15566  break;
    15567  default:
    15568  VMA_ASSERT(0);
    15569  }
    15570  }
    15571  // else: Just ignore this call.
    15572 }
    15573 
    15574 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15575 {
    15576  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15577 
    15578  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15579  {
    15580  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15581  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15582  VMA_ASSERT(pDedicatedAllocations);
    15583  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15584  VMA_ASSERT(success);
    15585  }
    15586 
    15587  VkDeviceMemory hMemory = allocation->GetMemory();
    15588 
    15589  /*
    15590  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15591  before vkFreeMemory.
    15592 
    15593  if(allocation->GetMappedData() != VMA_NULL)
    15594  {
    15595  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15596  }
    15597  */
    15598 
    15599  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15600 
    15601  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15602 }
    15603 
    15604 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15605 {
    15606  VkBufferCreateInfo dummyBufCreateInfo;
    15607  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15608 
    15609  uint32_t memoryTypeBits = 0;
    15610 
    15611  // Create buffer.
    15612  VkBuffer buf = VK_NULL_HANDLE;
    15613  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15614  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15615  if(res == VK_SUCCESS)
    15616  {
    15617  // Query for supported memory types.
    15618  VkMemoryRequirements memReq;
    15619  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15620  memoryTypeBits = memReq.memoryTypeBits;
    15621 
    15622  // Destroy buffer.
    15623  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15624  }
    15625 
    15626  return memoryTypeBits;
    15627 }
    15628 
    15629 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15630 {
    15631  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15632  !hAllocation->CanBecomeLost() &&
    15633  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15634  {
    15635  void* pData = VMA_NULL;
    15636  VkResult res = Map(hAllocation, &pData);
    15637  if(res == VK_SUCCESS)
    15638  {
    15639  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15640  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15641  Unmap(hAllocation);
    15642  }
    15643  else
    15644  {
    15645  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15646  }
    15647  }
    15648 }
    15649 
    15650 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15651 {
    15652  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15653  if(memoryTypeBits == UINT32_MAX)
    15654  {
    15655  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15656  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15657  }
    15658  return memoryTypeBits;
    15659 }
    15660 
    15661 #if VMA_STATS_STRING_ENABLED
    15662 
    15663 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15664 {
    15665  bool dedicatedAllocationsStarted = false;
    15666  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15667  {
    15668  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15669  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15670  VMA_ASSERT(pDedicatedAllocVector);
    15671  if(pDedicatedAllocVector->empty() == false)
    15672  {
    15673  if(dedicatedAllocationsStarted == false)
    15674  {
    15675  dedicatedAllocationsStarted = true;
    15676  json.WriteString("DedicatedAllocations");
    15677  json.BeginObject();
    15678  }
    15679 
    15680  json.BeginString("Type ");
    15681  json.ContinueString(memTypeIndex);
    15682  json.EndString();
    15683 
    15684  json.BeginArray();
    15685 
    15686  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15687  {
    15688  json.BeginObject(true);
    15689  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15690  hAlloc->PrintParameters(json);
    15691  json.EndObject();
    15692  }
    15693 
    15694  json.EndArray();
    15695  }
    15696  }
    15697  if(dedicatedAllocationsStarted)
    15698  {
    15699  json.EndObject();
    15700  }
    15701 
    15702  {
    15703  bool allocationsStarted = false;
    15704  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15705  {
    15706  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15707  {
    15708  if(allocationsStarted == false)
    15709  {
    15710  allocationsStarted = true;
    15711  json.WriteString("DefaultPools");
    15712  json.BeginObject();
    15713  }
    15714 
    15715  json.BeginString("Type ");
    15716  json.ContinueString(memTypeIndex);
    15717  json.EndString();
    15718 
    15719  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15720  }
    15721  }
    15722  if(allocationsStarted)
    15723  {
    15724  json.EndObject();
    15725  }
    15726  }
    15727 
    15728  // Custom pools
    15729  {
    15730  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15731  const size_t poolCount = m_Pools.size();
    15732  if(poolCount > 0)
    15733  {
    15734  json.WriteString("Pools");
    15735  json.BeginObject();
    15736  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15737  {
    15738  json.BeginString();
    15739  json.ContinueString(m_Pools[poolIndex]->GetId());
    15740  json.EndString();
    15741 
    15742  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15743  }
    15744  json.EndObject();
    15745  }
    15746  }
    15747 }
    15748 
    15749 #endif // #if VMA_STATS_STRING_ENABLED
    15750 
    15752 // Public interface
    15753 
    15754 VkResult vmaCreateAllocator(
    15755  const VmaAllocatorCreateInfo* pCreateInfo,
    15756  VmaAllocator* pAllocator)
    15757 {
    15758  VMA_ASSERT(pCreateInfo && pAllocator);
    15759  VMA_DEBUG_LOG("vmaCreateAllocator");
    15760  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15761  return (*pAllocator)->Init(pCreateInfo);
    15762 }
    15763 
    15764 void vmaDestroyAllocator(
    15765  VmaAllocator allocator)
    15766 {
    15767  if(allocator != VK_NULL_HANDLE)
    15768  {
    15769  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15770  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15771  vma_delete(&allocationCallbacks, allocator);
    15772  }
    15773 }
    15774 
    15776  VmaAllocator allocator,
    15777  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15778 {
    15779  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15780  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15781 }
    15782 
    15784  VmaAllocator allocator,
    15785  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15786 {
    15787  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15788  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15789 }
    15790 
    15792  VmaAllocator allocator,
    15793  uint32_t memoryTypeIndex,
    15794  VkMemoryPropertyFlags* pFlags)
    15795 {
    15796  VMA_ASSERT(allocator && pFlags);
    15797  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15798  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15799 }
    15800 
    15802  VmaAllocator allocator,
    15803  uint32_t frameIndex)
    15804 {
    15805  VMA_ASSERT(allocator);
    15806  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15807 
    15808  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15809 
    15810  allocator->SetCurrentFrameIndex(frameIndex);
    15811 }
    15812 
    15813 void vmaCalculateStats(
    15814  VmaAllocator allocator,
    15815  VmaStats* pStats)
    15816 {
    15817  VMA_ASSERT(allocator && pStats);
    15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15819  allocator->CalculateStats(pStats);
    15820 }
    15821 
    15822 #if VMA_STATS_STRING_ENABLED
    15823 
    15824 void vmaBuildStatsString(
    15825  VmaAllocator allocator,
    15826  char** ppStatsString,
    15827  VkBool32 detailedMap)
    15828 {
    15829  VMA_ASSERT(allocator && ppStatsString);
    15830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15831 
    15832  VmaStringBuilder sb(allocator);
    15833  {
    15834  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15835  json.BeginObject();
    15836 
    15837  VmaStats stats;
    15838  allocator->CalculateStats(&stats);
    15839 
    15840  json.WriteString("Total");
    15841  VmaPrintStatInfo(json, stats.total);
    15842 
    15843  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15844  {
    15845  json.BeginString("Heap ");
    15846  json.ContinueString(heapIndex);
    15847  json.EndString();
    15848  json.BeginObject();
    15849 
    15850  json.WriteString("Size");
    15851  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15852 
    15853  json.WriteString("Flags");
    15854  json.BeginArray(true);
    15855  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15856  {
    15857  json.WriteString("DEVICE_LOCAL");
    15858  }
    15859  json.EndArray();
    15860 
    15861  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15862  {
    15863  json.WriteString("Stats");
    15864  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15865  }
    15866 
    15867  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15868  {
    15869  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15870  {
    15871  json.BeginString("Type ");
    15872  json.ContinueString(typeIndex);
    15873  json.EndString();
    15874 
    15875  json.BeginObject();
    15876 
    15877  json.WriteString("Flags");
    15878  json.BeginArray(true);
    15879  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15880  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15881  {
    15882  json.WriteString("DEVICE_LOCAL");
    15883  }
    15884  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15885  {
    15886  json.WriteString("HOST_VISIBLE");
    15887  }
    15888  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15889  {
    15890  json.WriteString("HOST_COHERENT");
    15891  }
    15892  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15893  {
    15894  json.WriteString("HOST_CACHED");
    15895  }
    15896  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15897  {
    15898  json.WriteString("LAZILY_ALLOCATED");
    15899  }
    15900  json.EndArray();
    15901 
    15902  if(stats.memoryType[typeIndex].blockCount > 0)
    15903  {
    15904  json.WriteString("Stats");
    15905  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15906  }
    15907 
    15908  json.EndObject();
    15909  }
    15910  }
    15911 
    15912  json.EndObject();
    15913  }
    15914  if(detailedMap == VK_TRUE)
    15915  {
    15916  allocator->PrintDetailedMap(json);
    15917  }
    15918 
    15919  json.EndObject();
    15920  }
    15921 
    15922  const size_t len = sb.GetLength();
    15923  char* const pChars = vma_new_array(allocator, char, len + 1);
    15924  if(len > 0)
    15925  {
    15926  memcpy(pChars, sb.GetData(), len);
    15927  }
    15928  pChars[len] = '\0';
    15929  *ppStatsString = pChars;
    15930 }
    15931 
    15932 void vmaFreeStatsString(
    15933  VmaAllocator allocator,
    15934  char* pStatsString)
    15935 {
    15936  if(pStatsString != VMA_NULL)
    15937  {
    15938  VMA_ASSERT(allocator);
    15939  size_t len = strlen(pStatsString);
    15940  vma_delete_array(allocator, pStatsString, len + 1);
    15941  }
    15942 }
    15943 
    15944 #endif // #if VMA_STATS_STRING_ENABLED
    15945 
    15946 /*
    15947 This function is not protected by any mutex because it just reads immutable data.
    15948 */
    15949 VkResult vmaFindMemoryTypeIndex(
    15950  VmaAllocator allocator,
    15951  uint32_t memoryTypeBits,
    15952  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15953  uint32_t* pMemoryTypeIndex)
    15954 {
    15955  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15956  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15957  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15958 
    15959  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15960  {
    15961  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15962  }
    15963 
    15964  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15965  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15966 
    15967  // Convert usage to requiredFlags and preferredFlags.
    15968  switch(pAllocationCreateInfo->usage)
    15969  {
    15971  break;
    15973  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15974  {
    15975  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15976  }
    15977  break;
    15979  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15980  break;
    15982  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15983  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15984  {
    15985  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15986  }
    15987  break;
    15989  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15990  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15991  break;
    15992  default:
    15993  break;
    15994  }
    15995 
    15996  *pMemoryTypeIndex = UINT32_MAX;
    15997  uint32_t minCost = UINT32_MAX;
    15998  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15999  memTypeIndex < allocator->GetMemoryTypeCount();
    16000  ++memTypeIndex, memTypeBit <<= 1)
    16001  {
    16002  // This memory type is acceptable according to memoryTypeBits bitmask.
    16003  if((memTypeBit & memoryTypeBits) != 0)
    16004  {
    16005  const VkMemoryPropertyFlags currFlags =
    16006  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    16007  // This memory type contains requiredFlags.
    16008  if((requiredFlags & ~currFlags) == 0)
    16009  {
    16010  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    16011  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    16012  // Remember memory type with lowest cost.
    16013  if(currCost < minCost)
    16014  {
    16015  *pMemoryTypeIndex = memTypeIndex;
    16016  if(currCost == 0)
    16017  {
    16018  return VK_SUCCESS;
    16019  }
    16020  minCost = currCost;
    16021  }
    16022  }
    16023  }
    16024  }
    16025  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    16026 }
    16027 
    16029  VmaAllocator allocator,
    16030  const VkBufferCreateInfo* pBufferCreateInfo,
    16031  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16032  uint32_t* pMemoryTypeIndex)
    16033 {
    16034  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16035  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    16036  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16037  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16038 
    16039  const VkDevice hDev = allocator->m_hDevice;
    16040  VkBuffer hBuffer = VK_NULL_HANDLE;
    16041  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    16042  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    16043  if(res == VK_SUCCESS)
    16044  {
    16045  VkMemoryRequirements memReq = {};
    16046  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    16047  hDev, hBuffer, &memReq);
    16048 
    16049  res = vmaFindMemoryTypeIndex(
    16050  allocator,
    16051  memReq.memoryTypeBits,
    16052  pAllocationCreateInfo,
    16053  pMemoryTypeIndex);
    16054 
    16055  allocator->GetVulkanFunctions().vkDestroyBuffer(
    16056  hDev, hBuffer, allocator->GetAllocationCallbacks());
    16057  }
    16058  return res;
    16059 }
    16060 
    16062  VmaAllocator allocator,
    16063  const VkImageCreateInfo* pImageCreateInfo,
    16064  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16065  uint32_t* pMemoryTypeIndex)
    16066 {
    16067  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16068  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    16069  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16070  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16071 
    16072  const VkDevice hDev = allocator->m_hDevice;
    16073  VkImage hImage = VK_NULL_HANDLE;
    16074  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    16075  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    16076  if(res == VK_SUCCESS)
    16077  {
    16078  VkMemoryRequirements memReq = {};
    16079  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    16080  hDev, hImage, &memReq);
    16081 
    16082  res = vmaFindMemoryTypeIndex(
    16083  allocator,
    16084  memReq.memoryTypeBits,
    16085  pAllocationCreateInfo,
    16086  pMemoryTypeIndex);
    16087 
    16088  allocator->GetVulkanFunctions().vkDestroyImage(
    16089  hDev, hImage, allocator->GetAllocationCallbacks());
    16090  }
    16091  return res;
    16092 }
    16093 
    16094 VkResult vmaCreatePool(
    16095  VmaAllocator allocator,
    16096  const VmaPoolCreateInfo* pCreateInfo,
    16097  VmaPool* pPool)
    16098 {
    16099  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16100 
    16101  VMA_DEBUG_LOG("vmaCreatePool");
    16102 
    16103  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16104 
    16105  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16106 
    16107 #if VMA_RECORDING_ENABLED
    16108  if(allocator->GetRecorder() != VMA_NULL)
    16109  {
    16110  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16111  }
    16112 #endif
    16113 
    16114  return res;
    16115 }
    16116 
    16117 void vmaDestroyPool(
    16118  VmaAllocator allocator,
    16119  VmaPool pool)
    16120 {
    16121  VMA_ASSERT(allocator);
    16122 
    16123  if(pool == VK_NULL_HANDLE)
    16124  {
    16125  return;
    16126  }
    16127 
    16128  VMA_DEBUG_LOG("vmaDestroyPool");
    16129 
    16130  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16131 
    16132 #if VMA_RECORDING_ENABLED
    16133  if(allocator->GetRecorder() != VMA_NULL)
    16134  {
    16135  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16136  }
    16137 #endif
    16138 
    16139  allocator->DestroyPool(pool);
    16140 }
    16141 
    16142 void vmaGetPoolStats(
    16143  VmaAllocator allocator,
    16144  VmaPool pool,
    16145  VmaPoolStats* pPoolStats)
    16146 {
    16147  VMA_ASSERT(allocator && pool && pPoolStats);
    16148 
    16149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16150 
    16151  allocator->GetPoolStats(pool, pPoolStats);
    16152 }
    16153 
    16155  VmaAllocator allocator,
    16156  VmaPool pool,
    16157  size_t* pLostAllocationCount)
    16158 {
    16159  VMA_ASSERT(allocator && pool);
    16160 
    16161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16162 
    16163 #if VMA_RECORDING_ENABLED
    16164  if(allocator->GetRecorder() != VMA_NULL)
    16165  {
    16166  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16167  }
    16168 #endif
    16169 
    16170  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16171 }
    16172 
    16173 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16174 {
    16175  VMA_ASSERT(allocator && pool);
    16176 
    16177  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16178 
    16179  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16180 
    16181  return allocator->CheckPoolCorruption(pool);
    16182 }
    16183 
    16184 VkResult vmaAllocateMemory(
    16185  VmaAllocator allocator,
    16186  const VkMemoryRequirements* pVkMemoryRequirements,
    16187  const VmaAllocationCreateInfo* pCreateInfo,
    16188  VmaAllocation* pAllocation,
    16189  VmaAllocationInfo* pAllocationInfo)
    16190 {
    16191  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16192 
    16193  VMA_DEBUG_LOG("vmaAllocateMemory");
    16194 
    16195  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16196 
    16197  VkResult result = allocator->AllocateMemory(
    16198  *pVkMemoryRequirements,
    16199  false, // requiresDedicatedAllocation
    16200  false, // prefersDedicatedAllocation
    16201  VK_NULL_HANDLE, // dedicatedBuffer
    16202  VK_NULL_HANDLE, // dedicatedImage
    16203  *pCreateInfo,
    16204  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16205  1, // allocationCount
    16206  pAllocation);
    16207 
    16208 #if VMA_RECORDING_ENABLED
    16209  if(allocator->GetRecorder() != VMA_NULL)
    16210  {
    16211  allocator->GetRecorder()->RecordAllocateMemory(
    16212  allocator->GetCurrentFrameIndex(),
    16213  *pVkMemoryRequirements,
    16214  *pCreateInfo,
    16215  *pAllocation);
    16216  }
    16217 #endif
    16218 
    16219  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16220  {
    16221  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16222  }
    16223 
    16224  return result;
    16225 }
    16226 
    16227 VkResult vmaAllocateMemoryPages(
    16228  VmaAllocator allocator,
    16229  const VkMemoryRequirements* pVkMemoryRequirements,
    16230  const VmaAllocationCreateInfo* pCreateInfo,
    16231  size_t allocationCount,
    16232  VmaAllocation* pAllocations,
    16233  VmaAllocationInfo* pAllocationInfo)
    16234 {
    16235  if(allocationCount == 0)
    16236  {
    16237  return VK_SUCCESS;
    16238  }
    16239 
    16240  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16241 
    16242  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16243 
    16244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16245 
    16246  VkResult result = allocator->AllocateMemory(
    16247  *pVkMemoryRequirements,
    16248  false, // requiresDedicatedAllocation
    16249  false, // prefersDedicatedAllocation
    16250  VK_NULL_HANDLE, // dedicatedBuffer
    16251  VK_NULL_HANDLE, // dedicatedImage
    16252  *pCreateInfo,
    16253  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16254  allocationCount,
    16255  pAllocations);
    16256 
    16257 #if VMA_RECORDING_ENABLED
    16258  if(allocator->GetRecorder() != VMA_NULL)
    16259  {
    16260  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16261  allocator->GetCurrentFrameIndex(),
    16262  *pVkMemoryRequirements,
    16263  *pCreateInfo,
    16264  (uint64_t)allocationCount,
    16265  pAllocations);
    16266  }
    16267 #endif
    16268 
    16269  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16270  {
    16271  for(size_t i = 0; i < allocationCount; ++i)
    16272  {
    16273  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16274  }
    16275  }
    16276 
    16277  return result;
    16278 }
    16279 
    16281  VmaAllocator allocator,
    16282  VkBuffer buffer,
    16283  const VmaAllocationCreateInfo* pCreateInfo,
    16284  VmaAllocation* pAllocation,
    16285  VmaAllocationInfo* pAllocationInfo)
    16286 {
    16287  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16288 
    16289  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16290 
    16291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16292 
    16293  VkMemoryRequirements vkMemReq = {};
    16294  bool requiresDedicatedAllocation = false;
    16295  bool prefersDedicatedAllocation = false;
    16296  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16297  requiresDedicatedAllocation,
    16298  prefersDedicatedAllocation);
    16299 
    16300  VkResult result = allocator->AllocateMemory(
    16301  vkMemReq,
    16302  requiresDedicatedAllocation,
    16303  prefersDedicatedAllocation,
    16304  buffer, // dedicatedBuffer
    16305  VK_NULL_HANDLE, // dedicatedImage
    16306  *pCreateInfo,
    16307  VMA_SUBALLOCATION_TYPE_BUFFER,
    16308  1, // allocationCount
    16309  pAllocation);
    16310 
    16311 #if VMA_RECORDING_ENABLED
    16312  if(allocator->GetRecorder() != VMA_NULL)
    16313  {
    16314  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16315  allocator->GetCurrentFrameIndex(),
    16316  vkMemReq,
    16317  requiresDedicatedAllocation,
    16318  prefersDedicatedAllocation,
    16319  *pCreateInfo,
    16320  *pAllocation);
    16321  }
    16322 #endif
    16323 
    16324  if(pAllocationInfo && result == VK_SUCCESS)
    16325  {
    16326  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16327  }
    16328 
    16329  return result;
    16330 }
    16331 
    16332 VkResult vmaAllocateMemoryForImage(
    16333  VmaAllocator allocator,
    16334  VkImage image,
    16335  const VmaAllocationCreateInfo* pCreateInfo,
    16336  VmaAllocation* pAllocation,
    16337  VmaAllocationInfo* pAllocationInfo)
    16338 {
    16339  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16340 
    16341  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16342 
    16343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16344 
    16345  VkMemoryRequirements vkMemReq = {};
    16346  bool requiresDedicatedAllocation = false;
    16347  bool prefersDedicatedAllocation = false;
    16348  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16349  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16350 
    16351  VkResult result = allocator->AllocateMemory(
    16352  vkMemReq,
    16353  requiresDedicatedAllocation,
    16354  prefersDedicatedAllocation,
    16355  VK_NULL_HANDLE, // dedicatedBuffer
    16356  image, // dedicatedImage
    16357  *pCreateInfo,
    16358  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16359  1, // allocationCount
    16360  pAllocation);
    16361 
    16362 #if VMA_RECORDING_ENABLED
    16363  if(allocator->GetRecorder() != VMA_NULL)
    16364  {
    16365  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16366  allocator->GetCurrentFrameIndex(),
    16367  vkMemReq,
    16368  requiresDedicatedAllocation,
    16369  prefersDedicatedAllocation,
    16370  *pCreateInfo,
    16371  *pAllocation);
    16372  }
    16373 #endif
    16374 
    16375  if(pAllocationInfo && result == VK_SUCCESS)
    16376  {
    16377  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16378  }
    16379 
    16380  return result;
    16381 }
    16382 
    16383 void vmaFreeMemory(
    16384  VmaAllocator allocator,
    16385  VmaAllocation allocation)
    16386 {
    16387  VMA_ASSERT(allocator);
    16388 
    16389  if(allocation == VK_NULL_HANDLE)
    16390  {
    16391  return;
    16392  }
    16393 
    16394  VMA_DEBUG_LOG("vmaFreeMemory");
    16395 
    16396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16397 
    16398 #if VMA_RECORDING_ENABLED
    16399  if(allocator->GetRecorder() != VMA_NULL)
    16400  {
    16401  allocator->GetRecorder()->RecordFreeMemory(
    16402  allocator->GetCurrentFrameIndex(),
    16403  allocation);
    16404  }
    16405 #endif
    16406 
    16407  allocator->FreeMemory(
    16408  1, // allocationCount
    16409  &allocation);
    16410 }
    16411 
    16412 void vmaFreeMemoryPages(
    16413  VmaAllocator allocator,
    16414  size_t allocationCount,
    16415  VmaAllocation* pAllocations)
    16416 {
    16417  if(allocationCount == 0)
    16418  {
    16419  return;
    16420  }
    16421 
    16422  VMA_ASSERT(allocator);
    16423 
    16424  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16425 
    16426  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16427 
    16428 #if VMA_RECORDING_ENABLED
    16429  if(allocator->GetRecorder() != VMA_NULL)
    16430  {
    16431  allocator->GetRecorder()->RecordFreeMemoryPages(
    16432  allocator->GetCurrentFrameIndex(),
    16433  (uint64_t)allocationCount,
    16434  pAllocations);
    16435  }
    16436 #endif
    16437 
    16438  allocator->FreeMemory(allocationCount, pAllocations);
    16439 }
    16440 
    16441 VkResult vmaResizeAllocation(
    16442  VmaAllocator allocator,
    16443  VmaAllocation allocation,
    16444  VkDeviceSize newSize)
    16445 {
    16446  VMA_ASSERT(allocator && allocation);
    16447 
    16448  VMA_DEBUG_LOG("vmaResizeAllocation");
    16449 
    16450  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16451 
    16452  return allocator->ResizeAllocation(allocation, newSize);
    16453 }
    16454 
    16456  VmaAllocator allocator,
    16457  VmaAllocation allocation,
    16458  VmaAllocationInfo* pAllocationInfo)
    16459 {
    16460  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16461 
    16462  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16463 
    16464 #if VMA_RECORDING_ENABLED
    16465  if(allocator->GetRecorder() != VMA_NULL)
    16466  {
    16467  allocator->GetRecorder()->RecordGetAllocationInfo(
    16468  allocator->GetCurrentFrameIndex(),
    16469  allocation);
    16470  }
    16471 #endif
    16472 
    16473  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16474 }
    16475 
    16476 VkBool32 vmaTouchAllocation(
    16477  VmaAllocator allocator,
    16478  VmaAllocation allocation)
    16479 {
    16480  VMA_ASSERT(allocator && allocation);
    16481 
    16482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16483 
    16484 #if VMA_RECORDING_ENABLED
    16485  if(allocator->GetRecorder() != VMA_NULL)
    16486  {
    16487  allocator->GetRecorder()->RecordTouchAllocation(
    16488  allocator->GetCurrentFrameIndex(),
    16489  allocation);
    16490  }
    16491 #endif
    16492 
    16493  return allocator->TouchAllocation(allocation);
    16494 }
    16495 
    16497  VmaAllocator allocator,
    16498  VmaAllocation allocation,
    16499  void* pUserData)
    16500 {
    16501  VMA_ASSERT(allocator && allocation);
    16502 
    16503  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16504 
    16505  allocation->SetUserData(allocator, pUserData);
    16506 
    16507 #if VMA_RECORDING_ENABLED
    16508  if(allocator->GetRecorder() != VMA_NULL)
    16509  {
    16510  allocator->GetRecorder()->RecordSetAllocationUserData(
    16511  allocator->GetCurrentFrameIndex(),
    16512  allocation,
    16513  pUserData);
    16514  }
    16515 #endif
    16516 }
    16517 
    16519  VmaAllocator allocator,
    16520  VmaAllocation* pAllocation)
    16521 {
    16522  VMA_ASSERT(allocator && pAllocation);
    16523 
    16524  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16525 
    16526  allocator->CreateLostAllocation(pAllocation);
    16527 
    16528 #if VMA_RECORDING_ENABLED
    16529  if(allocator->GetRecorder() != VMA_NULL)
    16530  {
    16531  allocator->GetRecorder()->RecordCreateLostAllocation(
    16532  allocator->GetCurrentFrameIndex(),
    16533  *pAllocation);
    16534  }
    16535 #endif
    16536 }
    16537 
    16538 VkResult vmaMapMemory(
    16539  VmaAllocator allocator,
    16540  VmaAllocation allocation,
    16541  void** ppData)
    16542 {
    16543  VMA_ASSERT(allocator && allocation && ppData);
    16544 
    16545  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16546 
    16547  VkResult res = allocator->Map(allocation, ppData);
    16548 
    16549 #if VMA_RECORDING_ENABLED
    16550  if(allocator->GetRecorder() != VMA_NULL)
    16551  {
    16552  allocator->GetRecorder()->RecordMapMemory(
    16553  allocator->GetCurrentFrameIndex(),
    16554  allocation);
    16555  }
    16556 #endif
    16557 
    16558  return res;
    16559 }
    16560 
    16561 void vmaUnmapMemory(
    16562  VmaAllocator allocator,
    16563  VmaAllocation allocation)
    16564 {
    16565  VMA_ASSERT(allocator && allocation);
    16566 
    16567  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16568 
    16569 #if VMA_RECORDING_ENABLED
    16570  if(allocator->GetRecorder() != VMA_NULL)
    16571  {
    16572  allocator->GetRecorder()->RecordUnmapMemory(
    16573  allocator->GetCurrentFrameIndex(),
    16574  allocation);
    16575  }
    16576 #endif
    16577 
    16578  allocator->Unmap(allocation);
    16579 }
    16580 
    16581 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16582 {
    16583  VMA_ASSERT(allocator && allocation);
    16584 
    16585  VMA_DEBUG_LOG("vmaFlushAllocation");
    16586 
    16587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16588 
    16589  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16590 
    16591 #if VMA_RECORDING_ENABLED
    16592  if(allocator->GetRecorder() != VMA_NULL)
    16593  {
    16594  allocator->GetRecorder()->RecordFlushAllocation(
    16595  allocator->GetCurrentFrameIndex(),
    16596  allocation, offset, size);
    16597  }
    16598 #endif
    16599 }
    16600 
    16601 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16602 {
    16603  VMA_ASSERT(allocator && allocation);
    16604 
    16605  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16606 
    16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16608 
    16609  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16610 
    16611 #if VMA_RECORDING_ENABLED
    16612  if(allocator->GetRecorder() != VMA_NULL)
    16613  {
    16614  allocator->GetRecorder()->RecordInvalidateAllocation(
    16615  allocator->GetCurrentFrameIndex(),
    16616  allocation, offset, size);
    16617  }
    16618 #endif
    16619 }
    16620 
    16621 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16622 {
    16623  VMA_ASSERT(allocator);
    16624 
    16625  VMA_DEBUG_LOG("vmaCheckCorruption");
    16626 
    16627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16628 
    16629  return allocator->CheckCorruption(memoryTypeBits);
    16630 }
    16631 
    16632 VkResult vmaDefragment(
    16633  VmaAllocator allocator,
    16634  VmaAllocation* pAllocations,
    16635  size_t allocationCount,
    16636  VkBool32* pAllocationsChanged,
    16637  const VmaDefragmentationInfo *pDefragmentationInfo,
    16638  VmaDefragmentationStats* pDefragmentationStats)
    16639 {
    16640  // Deprecated interface, reimplemented using new one.
    16641 
    16642  VmaDefragmentationInfo2 info2 = {};
    16643  info2.allocationCount = (uint32_t)allocationCount;
    16644  info2.pAllocations = pAllocations;
    16645  info2.pAllocationsChanged = pAllocationsChanged;
    16646  if(pDefragmentationInfo != VMA_NULL)
    16647  {
    16648  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16649  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16650  }
    16651  else
    16652  {
    16653  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16654  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16655  }
    16656  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16657 
    16659  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16660  if(res == VK_NOT_READY)
    16661  {
    16662  res = vmaDefragmentationEnd( allocator, ctx);
    16663  }
    16664  return res;
    16665 }
    16666 
    16667 VkResult vmaDefragmentationBegin(
    16668  VmaAllocator allocator,
    16669  const VmaDefragmentationInfo2* pInfo,
    16670  VmaDefragmentationStats* pStats,
    16671  VmaDefragmentationContext *pContext)
    16672 {
    16673  VMA_ASSERT(allocator && pInfo && pContext);
    16674 
    16675  // Degenerate case: Nothing to defragment.
    16676  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16677  {
    16678  return VK_SUCCESS;
    16679  }
    16680 
    16681  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16682  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16683  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16684  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16685 
    16686  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16687 
    16688  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16689 
    16690  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16691 
    16692 #if VMA_RECORDING_ENABLED
    16693  if(allocator->GetRecorder() != VMA_NULL)
    16694  {
    16695  allocator->GetRecorder()->RecordDefragmentationBegin(
    16696  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16697  }
    16698 #endif
    16699 
    16700  return res;
    16701 }
    16702 
    16703 VkResult vmaDefragmentationEnd(
    16704  VmaAllocator allocator,
    16705  VmaDefragmentationContext context)
    16706 {
    16707  VMA_ASSERT(allocator);
    16708 
    16709  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16710 
    16711  if(context != VK_NULL_HANDLE)
    16712  {
    16713  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16714 
    16715 #if VMA_RECORDING_ENABLED
    16716  if(allocator->GetRecorder() != VMA_NULL)
    16717  {
    16718  allocator->GetRecorder()->RecordDefragmentationEnd(
    16719  allocator->GetCurrentFrameIndex(), context);
    16720  }
    16721 #endif
    16722 
    16723  return allocator->DefragmentationEnd(context);
    16724  }
    16725  else
    16726  {
    16727  return VK_SUCCESS;
    16728  }
    16729 }
    16730 
    16731 VkResult vmaBindBufferMemory(
    16732  VmaAllocator allocator,
    16733  VmaAllocation allocation,
    16734  VkBuffer buffer)
    16735 {
    16736  VMA_ASSERT(allocator && allocation && buffer);
    16737 
    16738  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16739 
    16740  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16741 
    16742  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
    16743 }
    16744 
    16745 VkResult vmaBindBufferMemory2(
    16746  VmaAllocator allocator,
    16747  VmaAllocation allocation,
    16748  VkDeviceSize allocationLocalOffset,
    16749  VkBuffer buffer,
    16750  const void* pNext)
    16751 {
    16752  VMA_ASSERT(allocator && allocation && buffer);
    16753 
    16754  VMA_DEBUG_LOG("vmaBindBufferMemory2");
    16755 
    16756  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16757 
    16758  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
    16759 }
    16760 
    16761 VkResult vmaBindImageMemory(
    16762  VmaAllocator allocator,
    16763  VmaAllocation allocation,
    16764  VkImage image)
    16765 {
    16766  VMA_ASSERT(allocator && allocation && image);
    16767 
    16768  VMA_DEBUG_LOG("vmaBindImageMemory");
    16769 
    16770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16771 
    16772  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
    16773 }
    16774 
    16775 VkResult vmaBindImageMemory2(
    16776  VmaAllocator allocator,
    16777  VmaAllocation allocation,
    16778  VkDeviceSize allocationLocalOffset,
    16779  VkImage image,
    16780  const void* pNext)
    16781 {
    16782  VMA_ASSERT(allocator && allocation && image);
    16783 
    16784  VMA_DEBUG_LOG("vmaBindImageMemory2");
    16785 
    16786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16787 
    16788  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
    16789 }
    16790 
    16791 VkResult vmaCreateBuffer(
    16792  VmaAllocator allocator,
    16793  const VkBufferCreateInfo* pBufferCreateInfo,
    16794  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16795  VkBuffer* pBuffer,
    16796  VmaAllocation* pAllocation,
    16797  VmaAllocationInfo* pAllocationInfo)
    16798 {
    16799  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16800 
    16801  if(pBufferCreateInfo->size == 0)
    16802  {
    16803  return VK_ERROR_VALIDATION_FAILED_EXT;
    16804  }
    16805 
    16806  VMA_DEBUG_LOG("vmaCreateBuffer");
    16807 
    16808  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16809 
    16810  *pBuffer = VK_NULL_HANDLE;
    16811  *pAllocation = VK_NULL_HANDLE;
    16812 
    16813  // 1. Create VkBuffer.
    16814  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16815  allocator->m_hDevice,
    16816  pBufferCreateInfo,
    16817  allocator->GetAllocationCallbacks(),
    16818  pBuffer);
    16819  if(res >= 0)
    16820  {
    16821  // 2. vkGetBufferMemoryRequirements.
    16822  VkMemoryRequirements vkMemReq = {};
    16823  bool requiresDedicatedAllocation = false;
    16824  bool prefersDedicatedAllocation = false;
    16825  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16826  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16827 
    16828  // Make sure alignment requirements for specific buffer usages reported
    16829  // in Physical Device Properties are included in alignment reported by memory requirements.
    16830  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16831  {
    16832  VMA_ASSERT(vkMemReq.alignment %
    16833  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16834  }
    16835  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16836  {
    16837  VMA_ASSERT(vkMemReq.alignment %
    16838  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16839  }
    16840  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16841  {
    16842  VMA_ASSERT(vkMemReq.alignment %
    16843  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16844  }
    16845 
    16846  // 3. Allocate memory using allocator.
    16847  res = allocator->AllocateMemory(
    16848  vkMemReq,
    16849  requiresDedicatedAllocation,
    16850  prefersDedicatedAllocation,
    16851  *pBuffer, // dedicatedBuffer
    16852  VK_NULL_HANDLE, // dedicatedImage
    16853  *pAllocationCreateInfo,
    16854  VMA_SUBALLOCATION_TYPE_BUFFER,
    16855  1, // allocationCount
    16856  pAllocation);
    16857 
    16858 #if VMA_RECORDING_ENABLED
    16859  if(allocator->GetRecorder() != VMA_NULL)
    16860  {
    16861  allocator->GetRecorder()->RecordCreateBuffer(
    16862  allocator->GetCurrentFrameIndex(),
    16863  *pBufferCreateInfo,
    16864  *pAllocationCreateInfo,
    16865  *pAllocation);
    16866  }
    16867 #endif
    16868 
    16869  if(res >= 0)
    16870  {
    16871  // 3. Bind buffer with memory.
    16872  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16873  {
    16874  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
    16875  }
    16876  if(res >= 0)
    16877  {
    16878  // All steps succeeded.
    16879  #if VMA_STATS_STRING_ENABLED
    16880  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16881  #endif
    16882  if(pAllocationInfo != VMA_NULL)
    16883  {
    16884  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16885  }
    16886 
    16887  return VK_SUCCESS;
    16888  }
    16889  allocator->FreeMemory(
    16890  1, // allocationCount
    16891  pAllocation);
    16892  *pAllocation = VK_NULL_HANDLE;
    16893  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16894  *pBuffer = VK_NULL_HANDLE;
    16895  return res;
    16896  }
    16897  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16898  *pBuffer = VK_NULL_HANDLE;
    16899  return res;
    16900  }
    16901  return res;
    16902 }
    16903 
    16904 void vmaDestroyBuffer(
    16905  VmaAllocator allocator,
    16906  VkBuffer buffer,
    16907  VmaAllocation allocation)
    16908 {
    16909  VMA_ASSERT(allocator);
    16910 
    16911  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16912  {
    16913  return;
    16914  }
    16915 
    16916  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16917 
    16918  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16919 
    16920 #if VMA_RECORDING_ENABLED
    16921  if(allocator->GetRecorder() != VMA_NULL)
    16922  {
    16923  allocator->GetRecorder()->RecordDestroyBuffer(
    16924  allocator->GetCurrentFrameIndex(),
    16925  allocation);
    16926  }
    16927 #endif
    16928 
    16929  if(buffer != VK_NULL_HANDLE)
    16930  {
    16931  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16932  }
    16933 
    16934  if(allocation != VK_NULL_HANDLE)
    16935  {
    16936  allocator->FreeMemory(
    16937  1, // allocationCount
    16938  &allocation);
    16939  }
    16940 }
    16941 
    16942 VkResult vmaCreateImage(
    16943  VmaAllocator allocator,
    16944  const VkImageCreateInfo* pImageCreateInfo,
    16945  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16946  VkImage* pImage,
    16947  VmaAllocation* pAllocation,
    16948  VmaAllocationInfo* pAllocationInfo)
    16949 {
    16950  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16951 
    16952  if(pImageCreateInfo->extent.width == 0 ||
    16953  pImageCreateInfo->extent.height == 0 ||
    16954  pImageCreateInfo->extent.depth == 0 ||
    16955  pImageCreateInfo->mipLevels == 0 ||
    16956  pImageCreateInfo->arrayLayers == 0)
    16957  {
    16958  return VK_ERROR_VALIDATION_FAILED_EXT;
    16959  }
    16960 
    16961  VMA_DEBUG_LOG("vmaCreateImage");
    16962 
    16963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16964 
    16965  *pImage = VK_NULL_HANDLE;
    16966  *pAllocation = VK_NULL_HANDLE;
    16967 
    16968  // 1. Create VkImage.
    16969  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16970  allocator->m_hDevice,
    16971  pImageCreateInfo,
    16972  allocator->GetAllocationCallbacks(),
    16973  pImage);
    16974  if(res >= 0)
    16975  {
    16976  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16977  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16978  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16979 
    16980  // 2. Allocate memory using allocator.
    16981  VkMemoryRequirements vkMemReq = {};
    16982  bool requiresDedicatedAllocation = false;
    16983  bool prefersDedicatedAllocation = false;
    16984  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16985  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16986 
    16987  res = allocator->AllocateMemory(
    16988  vkMemReq,
    16989  requiresDedicatedAllocation,
    16990  prefersDedicatedAllocation,
    16991  VK_NULL_HANDLE, // dedicatedBuffer
    16992  *pImage, // dedicatedImage
    16993  *pAllocationCreateInfo,
    16994  suballocType,
    16995  1, // allocationCount
    16996  pAllocation);
    16997 
    16998 #if VMA_RECORDING_ENABLED
    16999  if(allocator->GetRecorder() != VMA_NULL)
    17000  {
    17001  allocator->GetRecorder()->RecordCreateImage(
    17002  allocator->GetCurrentFrameIndex(),
    17003  *pImageCreateInfo,
    17004  *pAllocationCreateInfo,
    17005  *pAllocation);
    17006  }
    17007 #endif
    17008 
    17009  if(res >= 0)
    17010  {
    17011  // 3. Bind image with memory.
    17012  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    17013  {
    17014  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
    17015  }
    17016  if(res >= 0)
    17017  {
    17018  // All steps succeeded.
    17019  #if VMA_STATS_STRING_ENABLED
    17020  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    17021  #endif
    17022  if(pAllocationInfo != VMA_NULL)
    17023  {
    17024  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    17025  }
    17026 
    17027  return VK_SUCCESS;
    17028  }
    17029  allocator->FreeMemory(
    17030  1, // allocationCount
    17031  pAllocation);
    17032  *pAllocation = VK_NULL_HANDLE;
    17033  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17034  *pImage = VK_NULL_HANDLE;
    17035  return res;
    17036  }
    17037  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17038  *pImage = VK_NULL_HANDLE;
    17039  return res;
    17040  }
    17041  return res;
    17042 }
    17043 
    17044 void vmaDestroyImage(
    17045  VmaAllocator allocator,
    17046  VkImage image,
    17047  VmaAllocation allocation)
    17048 {
    17049  VMA_ASSERT(allocator);
    17050 
    17051  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    17052  {
    17053  return;
    17054  }
    17055 
    17056  VMA_DEBUG_LOG("vmaDestroyImage");
    17057 
    17058  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    17059 
    17060 #if VMA_RECORDING_ENABLED
    17061  if(allocator->GetRecorder() != VMA_NULL)
    17062  {
    17063  allocator->GetRecorder()->RecordDestroyImage(
    17064  allocator->GetCurrentFrameIndex(),
    17065  allocation);
    17066  }
    17067 #endif
    17068 
    17069  if(image != VK_NULL_HANDLE)
    17070  {
    17071  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    17072  }
    17073  if(allocation != VK_NULL_HANDLE)
    17074  {
    17075  allocator->FreeMemory(
    17076  1, // allocationCount
    17077  &allocation);
    17078  }
    17079 }
    17080 
    17081 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1807
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2111
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1844
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2883
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1869
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2908
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1818
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2417
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1798
    +
    Definition: vk_mem_alloc.h:1843
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2442
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1819
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:2048
    -
    Definition: vk_mem_alloc.h:2152
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2836
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1790
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2517
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1841
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2919
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2306
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1685
    +
    Definition: vk_mem_alloc.h:2073
    +
    Definition: vk_mem_alloc.h:2177
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2861
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1811
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2542
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1866
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2944
    +
    VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
    Binds image to allocation with additional parameters.
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2331
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1686
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2398
    -
    Definition: vk_mem_alloc.h:2123
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2839
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1779
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2205
    -
    Definition: vk_mem_alloc.h:2075
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1853
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2334
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2423
    +
    Definition: vk_mem_alloc.h:2148
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2864
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1800
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2230
    +
    Definition: vk_mem_alloc.h:2100
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1878
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2359
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1907
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1838
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1932
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1863
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2079
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2104
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1979
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1795
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2873
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1978
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2923
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:2004
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1816
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2898
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:2003
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2948
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1870
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1988
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2931
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2189
    -
    Definition: vk_mem_alloc.h:2147
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2914
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1796
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1721
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1895
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:2013
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2956
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2214
    +
    Definition: vk_mem_alloc.h:2172
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2939
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1817
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1730
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1847
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1872
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2348
    -
    Definition: vk_mem_alloc.h:2342
    -
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1802
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1914
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2527
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2373
    +
    Definition: vk_mem_alloc.h:2367
    +
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1823
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1939
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2552
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1791
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1812
    VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
    Begins defragmentation process.
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1816
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2226
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2368
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2404
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1841
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2251
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2393
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2429
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1777
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2351
    +
    Definition: vk_mem_alloc.h:1798
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2376
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2888
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2026
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2913
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2051
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2848
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2873
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2909
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2934
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2927
    -
    Definition: vk_mem_alloc.h:2065
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2213
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1794
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2952
    +
    Definition: vk_mem_alloc.h:2090
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2238
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1815
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1984
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1727
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2827
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:2009
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1736
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2852
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2825
    - -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2854
    +
    Definition: vk_mem_alloc.h:2850
    + +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2879
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1748
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1757
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1820
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1753
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2929
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1845
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1762
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2954
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2200
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2414
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2225
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2439
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1787
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1967
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2363
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1740
    -
    Definition: vk_mem_alloc.h:2338
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1808
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1992
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2388
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1749
    +
    Definition: vk_mem_alloc.h:2363
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2130
    +
    Definition: vk_mem_alloc.h:2155
    Represents Opaque object that represents started defragmentation process.
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1980
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1744
    -
    Definition: vk_mem_alloc.h:2163
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2354
    -
    Definition: vk_mem_alloc.h:2074
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1793
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:2005
    +
    Definition: vk_mem_alloc.h:1796
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1753
    +
    Definition: vk_mem_alloc.h:2188
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2379
    +
    Definition: vk_mem_alloc.h:2099
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1814
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2195
    -
    Definition: vk_mem_alloc.h:2186
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2220
    +
    Definition: vk_mem_alloc.h:2211
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1970
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1789
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2376
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1856
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2407
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2184
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2878
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2219
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1995
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1810
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2401
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1881
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2432
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2209
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2903
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2244
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1895
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1986
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2110
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1979
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1920
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:2011
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2135
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:2004
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1800
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1826
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2824
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2902
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1742
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1799
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1821
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1851
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2849
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2927
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1751
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1820
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2390
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1792
    -
    Definition: vk_mem_alloc.h:2141
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2415
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1813
    +
    Definition: vk_mem_alloc.h:2166
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1834
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2541
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1850
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1979
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1859
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2566
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1875
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:2004
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1976
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:2001
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2395
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2833
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2420
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2858
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions.
    -
    Definition: vk_mem_alloc.h:2156
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2522
    -
    Definition: vk_mem_alloc.h:2170
    -
    Definition: vk_mem_alloc.h:2182
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2925
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1785
    +
    Definition: vk_mem_alloc.h:2181
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2547
    +
    Definition: vk_mem_alloc.h:2195
    +
    Definition: vk_mem_alloc.h:2207
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2950
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1806
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1974
    -
    Definition: vk_mem_alloc.h:2031
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2344
    +
    VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
    Binds buffer to allocation with additional parameters.
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1999
    +
    Definition: vk_mem_alloc.h:2056
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2369
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1823
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1972
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1797
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1801
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2097
    -
    Definition: vk_mem_alloc.h:2177
    -
    Definition: vk_mem_alloc.h:2058
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2536
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1848
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1997
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1818
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1822
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2122
    +
    Definition: vk_mem_alloc.h:2202
    +
    Definition: vk_mem_alloc.h:2083
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2561
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1775
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1784
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1788
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2323
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1809
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2348
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Deprecated.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2503
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2528
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2167
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2288
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1980
    +
    Definition: vk_mem_alloc.h:2192
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2313
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:2005
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1810
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1987
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1835
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:2012
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2401
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1980
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2426
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:2005
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2893
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2918
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2508
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2857
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2533
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2882