diff --git a/README.md b/README.md index 550686e..1b5e51a 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ Additional features: - Customization: Predefine appropriate macros to provide your own implementation of all external facilities used by the library, from assert, mutex, and atomic, to vector and linked list. - Support for persistently mapped memory: Just allocate memory with appropriate flag and you get access to mapped pointer. - Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it. +- Support for VK_KHR_dedicated_allocation extension. - Defragmentation: Call one function and let the library move data around to free some memory blocks and make your allocations better compacted. - Lost allocations: Allocate memory with appropriate flags and let the library remove allocations that are not used for many frames to make room for new ones. - Statistics: Obtain detailed statistics about the amount of memory used, unused, number of allocated blocks, number of allocations etc. - globally, per memory heap, and per memory type. diff --git a/bin/VulkanSample_Release_2015.exe b/bin/VulkanSample_Release_2015.exe index d0de1b8..ac61f75 100644 Binary files a/bin/VulkanSample_Release_2015.exe and b/bin/VulkanSample_Release_2015.exe differ diff --git a/docs/html/functions.html b/docs/html/functions.html index 61453d6..d7cef82 100644 --- a/docs/html/functions.html +++ b/docs/html/functions.html @@ -279,9 +279,15 @@ $(function() {
  • vkGetBufferMemoryRequirements : VmaVulkanFunctions
  • +
  • vkGetBufferMemoryRequirements2KHR +: VmaVulkanFunctions +
  • vkGetImageMemoryRequirements : VmaVulkanFunctions
  • +
  • vkGetImageMemoryRequirements2KHR +: VmaVulkanFunctions +
  • vkGetPhysicalDeviceMemoryProperties : VmaVulkanFunctions
  • diff --git a/docs/html/functions_vars.html b/docs/html/functions_vars.html index d0f2ac6..4f95fe1 100644 --- a/docs/html/functions_vars.html +++ b/docs/html/functions_vars.html @@ -279,9 +279,15 @@ $(function() {
  • vkGetBufferMemoryRequirements : VmaVulkanFunctions
  • +
  • vkGetBufferMemoryRequirements2KHR +: VmaVulkanFunctions +
  • vkGetImageMemoryRequirements : VmaVulkanFunctions
  • +
  • vkGetImageMemoryRequirements2KHR +: VmaVulkanFunctions +
  • vkGetPhysicalDeviceMemoryProperties : VmaVulkanFunctions
  • diff --git a/docs/html/globals.html b/docs/html/globals.html index 6438de7..7300936 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -77,15 +77,15 @@ $(function() {
  • VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT : vk_mem_alloc.h
  • -
  • VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT -: vk_mem_alloc.h -
  • VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT : vk_mem_alloc.h
  • @@ -95,6 +95,9 @@ $(function() {
  • VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT +: vk_mem_alloc.h +
  • VMA_MEMORY_USAGE_CPU_ONLY : vk_mem_alloc.h
  • diff --git a/docs/html/globals_eval.html b/docs/html/globals_eval.html index 8c6790b..f3992df 100644 --- a/docs/html/globals_eval.html +++ b/docs/html/globals_eval.html @@ -65,15 +65,15 @@ $(function() {
  • VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT : vk_mem_alloc.h
  • +
  • VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT +: vk_mem_alloc.h +
  • VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT : vk_mem_alloc.h
  • -
  • VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT -: vk_mem_alloc.h -
  • VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT : vk_mem_alloc.h
  • @@ -83,6 +83,9 @@ $(function() {
  • VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM : vk_mem_alloc.h
  • +
  • VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT +: vk_mem_alloc.h +
  • VMA_MEMORY_USAGE_CPU_ONLY : vk_mem_alloc.h
  • diff --git a/docs/html/group__general.html b/docs/html/group__general.html index a1826e1..97b2fa5 100644 --- a/docs/html/group__general.html +++ b/docs/html/group__general.html @@ -124,6 +124,7 @@ Typedefs

    Enumerations

    enum  VmaAllocatorFlagBits { VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, +VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } Flags for created VmaAllocator. More...
    @@ -336,6 +337,19 @@ Functions EnumeratorVMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT 

    Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.

    Using this flag may increase performance because internal mutexes are not used.

    +VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT 

    Enables usage of VK_KHR_dedicated_allocation extension.

    +

    Using this extenion will automatically allocate dedicated blocks of memory for some buffers and images instead of suballocating place for them out of bigger memory blocks (as if you explicitly used VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag) when it is recommended by the driver. It may improve performance on some GPUs.

    +

    You may set this flag only if you found out that following device extensions are supported, you enabled them while creating Vulkan device passed as VmaAllocatorCreateInfo::device, and you want them to be used internally by this library:

    + +

    If this flag is enabled, you must also provide VmaAllocatorCreateInfo::pVulkanFunctions and fill at least members: VmaVulkanFunctions::vkGetBufferMemoryRequirements2KHR, VmaVulkanFunctions::vkGetImageMemoryRequirements2KHR, because they are never imported statically.

    +

    When this flag is set, you can experience following warnings reported by Vulkan validation layer. You can ignore them.

    +
    +

    vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.

    +
    + VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM  diff --git a/docs/html/group__layer1.html b/docs/html/group__layer1.html index 6559315..a77dca8 100644 --- a/docs/html/group__layer1.html +++ b/docs/html/group__layer1.html @@ -99,7 +99,7 @@ Enumerations }   enum  VmaAllocationCreateFlagBits {
    -  VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT = 0x00000001, +  VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT = 0x00000004, VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, @@ -191,14 +191,14 @@ Functions

    Flags to be passed as VmaAllocationCreateInfo::flags.

    -

    This function works by moving allocations to different places (different VkDeviceMemory objects and/or different offsets) in order to optimize memory usage. Only allocations that are in pAllocations array can be moved. All other allocations are considered nonmovable in this call. Basic rules:

    diff --git a/docs/html/index.html b/docs/html/index.html index e73c089..5041602 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -259,7 +259,7 @@ void MyBuffer::EnsureBuffer()
  • If failed, try to create a new block of VkDeviceMemory, with preferred block size.
  • If failed, try to create such block with size/2 and size/4.
  • If failed and VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was specified, try to find space in existing blocks, possilby making some other allocations lost.
  • -
  • If failed, try to allocate separate VkDeviceMemory for this allocation, just like when you use VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT.
  • +
  • If failed, try to allocate separate VkDeviceMemory for this allocation, just like when you use VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
  • If failed, choose other memory type that meets the requirements specified in VmaAllocationCreateInfo and go to point 1.
  • If failed, return VK_ERROR_OUT_OF_DEVICE_MEMORY.
  • diff --git a/docs/html/search/all_d.js b/docs/html/search/all_d.js index 16be3ae..2ad8b72 100644 --- a/docs/html/search/all_d.js +++ b/docs/html/search/all_d.js @@ -11,19 +11,22 @@ var searchData= ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], + ['vkgetbuffermemoryrequirements2khr',['vkGetBufferMemoryRequirements2KHR',['../struct_vma_vulkan_functions.html#a9d8d1b05d2b1e7e1d9b27f6f585acf9c',1,'VmaVulkanFunctions']]], ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], + ['vkgetimagememoryrequirements2khr',['vkGetImageMemoryRequirements2KHR',['../struct_vma_vulkan_functions.html#a9cdcdc1e2b2ea7c571f7d27e30ba6875',1,'VmaVulkanFunctions']]], ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], ['vkunmapmemory',['vkUnmapMemory',['../struct_vma_vulkan_functions.html#acc798589736f0becb317fc2196c1d8b9',1,'VmaVulkanFunctions']]], ['vma_5fallocation_5fcreate_5fcan_5fbecome_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a5f436af6c8fe8540573a6d22627a6fd2',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], - ['vma_5fallocation_5fcreate_5fown_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a965e035b69f8728b317803ef2d523aa4',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fpersistent_5fmap_5fbit',['VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597ae443691ef3d077c0dc3de5576ac4c312',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fabe92b706180652ceb320da5bc383aef4',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fa914e905a08c2e578f76b9d6c418626cc',1,'vk_mem_alloc.h']]], + ['vma_5fallocator_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fa96990602a42bd78d27fb25e2265880b4',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fonly',['VMA_MEMORY_USAGE_CPU_ONLY',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305cca40bdf4cddeffeb12f43d45ca1286e0a5',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fto_5fgpu',['VMA_MEMORY_USAGE_CPU_TO_GPU',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305cca9066b52c5a7079bb74a69aaf8b92ff67',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fgpu_5fonly',['VMA_MEMORY_USAGE_GPU_ONLY',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305ccac6b5dc1432d88647aa4cd456246eadf7',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/enumvalues_0.js b/docs/html/search/enumvalues_0.js index f624ca0..cfbc530 100644 --- a/docs/html/search/enumvalues_0.js +++ b/docs/html/search/enumvalues_0.js @@ -2,12 +2,13 @@ var searchData= [ ['vma_5fallocation_5fcreate_5fcan_5fbecome_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a5f436af6c8fe8540573a6d22627a6fd2',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fcan_5fmake_5fother_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a68686d0ce9beb0d4d1b9f2b8b1389a7e',1,'vk_mem_alloc.h']]], + ['vma_5fallocation_5fcreate_5fdedicated_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a3fc311d855c2ff53f1090ef5c722b38f',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597ae5633ec569f4899cf8f29e7385b2f882',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fnever_5fallocate_5fbit',['VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a89759603401014eb325eb22a3839f2ff',1,'vk_mem_alloc.h']]], - ['vma_5fallocation_5fcreate_5fown_5fmemory_5fbit',['VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597a965e035b69f8728b317803ef2d523aa4',1,'vk_mem_alloc.h']]], ['vma_5fallocation_5fcreate_5fpersistent_5fmap_5fbit',['VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT',['../group__layer1.html#ggad9889c10c798b040d59c92f257cae597ae443691ef3d077c0dc3de5576ac4c312',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fexternally_5fsynchronized_5fbit',['VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fabe92b706180652ceb320da5bc383aef4',1,'vk_mem_alloc.h']]], ['vma_5fallocator_5fflag_5fbits_5fmax_5fenum',['VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fa914e905a08c2e578f76b9d6c418626cc',1,'vk_mem_alloc.h']]], + ['vma_5fallocator_5fkhr_5fdedicated_5fallocation_5fbit',['VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT',['../group__general.html#gga34fff29c218d23a7ff7dff44b77b6b6fa96990602a42bd78d27fb25e2265880b4',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fonly',['VMA_MEMORY_USAGE_CPU_ONLY',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305cca40bdf4cddeffeb12f43d45ca1286e0a5',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fcpu_5fto_5fgpu',['VMA_MEMORY_USAGE_CPU_TO_GPU',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305cca9066b52c5a7079bb74a69aaf8b92ff67',1,'vk_mem_alloc.h']]], ['vma_5fmemory_5fusage_5fgpu_5fonly',['VMA_MEMORY_USAGE_GPU_ONLY',['../group__layer1.html#ggaa5846affa1e9da3800e3e78fae2305ccac6b5dc1432d88647aa4cd456246eadf7',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/variables_b.js b/docs/html/search/variables_b.js index e40881e..42b7df9 100644 --- a/docs/html/search/variables_b.js +++ b/docs/html/search/variables_b.js @@ -9,7 +9,9 @@ var searchData= ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], + ['vkgetbuffermemoryrequirements2khr',['vkGetBufferMemoryRequirements2KHR',['../struct_vma_vulkan_functions.html#a9d8d1b05d2b1e7e1d9b27f6f585acf9c',1,'VmaVulkanFunctions']]], ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], + ['vkgetimagememoryrequirements2khr',['vkGetImageMemoryRequirements2KHR',['../struct_vma_vulkan_functions.html#a9cdcdc1e2b2ea7c571f7d27e30ba6875',1,'VmaVulkanFunctions']]], ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], diff --git a/docs/html/struct_vma_vulkan_functions-members.html b/docs/html/struct_vma_vulkan_functions-members.html index 05d3e41..b000949 100644 --- a/docs/html/struct_vma_vulkan_functions-members.html +++ b/docs/html/struct_vma_vulkan_functions-members.html @@ -74,7 +74,9 @@ $(function() { - + + + diff --git a/docs/html/struct_vma_vulkan_functions.html b/docs/html/struct_vma_vulkan_functions.html index 58ae3d7..6d0e3ce 100644 --- a/docs/html/struct_vma_vulkan_functions.html +++ b/docs/html/struct_vma_vulkan_functions.html @@ -101,6 +101,10 @@ Public Attributes + + + +
    Enumerator
    VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT 

    Set this flag if the allocation should have its own memory block.

    +
    Enumerator
    VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT 

    Set this flag if the allocation should have its own memory block.

    Use it for special, big resources, like fullscreen images used as attachments.

    This flag must also be used for host visible resources that you want to map simultaneously because otherwise they might end up as regions of the same VkDeviceMemory, while mapping same VkDeviceMemory multiple times simultaneously is illegal.

    You should not use this flag if VmaAllocationCreateInfo::pool is not null.

    VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT 

    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such block.

    If new allocation cannot be placed in any of the existing blocks, allocation fails with VK_ERROR_OUT_OF_DEVICE_MEMORY error.

    -

    You should not use VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.

    +

    You should not use VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.

    If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored.

    VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT 

    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.

    diff --git a/docs/html/group__layer2.html b/docs/html/group__layer2.html index 0a807df..1024a4f 100644 --- a/docs/html/group__layer2.html +++ b/docs/html/group__layer2.html @@ -609,7 +609,7 @@ Functions
    vkDestroyImageVmaVulkanFunctions
    vkFreeMemoryVmaVulkanFunctions
    vkGetBufferMemoryRequirementsVmaVulkanFunctions
    vkGetImageMemoryRequirementsVmaVulkanFunctions
    vkGetBufferMemoryRequirements2KHRVmaVulkanFunctions
    vkGetImageMemoryRequirementsVmaVulkanFunctions
    vkGetImageMemoryRequirements2KHRVmaVulkanFunctions
    vkGetPhysicalDeviceMemoryPropertiesVmaVulkanFunctions
    vkGetPhysicalDevicePropertiesVmaVulkanFunctions
    vkMapMemoryVmaVulkanFunctions
     
    PFN_vkDestroyImage vkDestroyImage
     
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
     
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
     

    Detailed Description

    Pointers to some Vulkan functions - a subset used by the library.

    @@ -230,6 +234,20 @@ Public Attributes
    +
    + + +

    ◆ vkGetBufferMemoryRequirements2KHR

    + +
    +
    + + + + +
    PFN_vkGetBufferMemoryRequirements2KHR VmaVulkanFunctions::vkGetBufferMemoryRequirements2KHR
    +
    +
    @@ -244,6 +262,20 @@ Public Attributes
    +
    + + +

    ◆ vkGetImageMemoryRequirements2KHR

    + +
    +
    + + + + +
    PFN_vkGetImageMemoryRequirements2KHR VmaVulkanFunctions::vkGetImageMemoryRequirements2KHR
    +
    +
    diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 90f8119..e9e2e8e 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -173,6 +173,7 @@ Typedefs

    Enumerations

    enum  VmaAllocatorFlagBits { VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, +VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } Flags for created VmaAllocator. More...
    @@ -189,7 +190,7 @@ Enumerations }   enum  VmaAllocationCreateFlagBits {
    -  VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT = 0x00000001, +  VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT = 0x00000004, VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 41633d7..52340d8 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,152 +62,155 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    393 #include <vulkan/vulkan.h>
    394 
    396 
    400 VK_DEFINE_HANDLE(VmaAllocator)
    401 
    402 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    404  VmaAllocator allocator,
    405  uint32_t memoryType,
    406  VkDeviceMemory memory,
    407  VkDeviceSize size);
    409 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    410  VmaAllocator allocator,
    411  uint32_t memoryType,
    412  VkDeviceMemory memory,
    413  VkDeviceSize size);
    414 
    422 typedef struct VmaDeviceMemoryCallbacks {
    428 
    430 typedef enum VmaAllocatorFlagBits {
    436 
    439 typedef VkFlags VmaAllocatorFlags;
    440 
    445 typedef struct VmaVulkanFunctions {
    446  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    447  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    448  PFN_vkAllocateMemory vkAllocateMemory;
    449  PFN_vkFreeMemory vkFreeMemory;
    450  PFN_vkMapMemory vkMapMemory;
    451  PFN_vkUnmapMemory vkUnmapMemory;
    452  PFN_vkBindBufferMemory vkBindBufferMemory;
    453  PFN_vkBindImageMemory vkBindImageMemory;
    454  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    455  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    456  PFN_vkCreateBuffer vkCreateBuffer;
    457  PFN_vkDestroyBuffer vkDestroyBuffer;
    458  PFN_vkCreateImage vkCreateImage;
    459  PFN_vkDestroyImage vkDestroyImage;
    461 
    464 {
    466  VmaAllocatorFlags flags;
    468 
    469  VkPhysicalDevice physicalDevice;
    471 
    472  VkDevice device;
    474 
    477 
    480 
    481  const VkAllocationCallbacks* pAllocationCallbacks;
    483 
    498  uint32_t frameInUseCount;
    516  const VkDeviceSize* pHeapSizeLimit;
    530 
    532 VkResult vmaCreateAllocator(
    533  const VmaAllocatorCreateInfo* pCreateInfo,
    534  VmaAllocator* pAllocator);
    535 
    538  VmaAllocator allocator);
    539 
    545  VmaAllocator allocator,
    546  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    547 
    553  VmaAllocator allocator,
    554  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    555 
    563  VmaAllocator allocator,
    564  uint32_t memoryTypeIndex,
    565  VkMemoryPropertyFlags* pFlags);
    566 
    576  VmaAllocator allocator,
    577  uint32_t frameIndex);
    578 
    581 typedef struct VmaStatInfo
    582 {
    584  uint32_t blockCount;
    586  uint32_t allocationCount;
    590  VkDeviceSize usedBytes;
    592  VkDeviceSize unusedBytes;
    593  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    594  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    595 } VmaStatInfo;
    596 
    598 typedef struct VmaStats
    599 {
    600  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    601  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    603 } VmaStats;
    604 
    606 void vmaCalculateStats(
    607  VmaAllocator allocator,
    608  VmaStats* pStats);
    609 
    610 #define VMA_STATS_STRING_ENABLED 1
    611 
    612 #if VMA_STATS_STRING_ENABLED
    613 
    615 
    618  VmaAllocator allocator,
    619  char** ppStatsString,
    620  VkBool32 detailedMap);
    621 
    622 void vmaFreeStatsString(
    623  VmaAllocator allocator,
    624  char* pStatsString);
    625 
    626 #endif // #if VMA_STATS_STRING_ENABLED
    627 
    630 
    635 VK_DEFINE_HANDLE(VmaPool)
    636 
    637 typedef enum VmaMemoryUsage
    638 {
    644 
    647 
    650 
    654 
    669 
    714 
    717 typedef VkFlags VmaAllocationCreateFlags;
    718 
    720 {
    722  VmaAllocationCreateFlags flags;
    733  VkMemoryPropertyFlags requiredFlags;
    739  VkMemoryPropertyFlags preferredFlags;
    741  void* pUserData;
    746  VmaPool pool;
    748 
    763 VkResult vmaFindMemoryTypeIndex(
    764  VmaAllocator allocator,
    765  uint32_t memoryTypeBits,
    766  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    767  uint32_t* pMemoryTypeIndex);
    768 
    771 
    776 typedef enum VmaPoolCreateFlagBits {
    805 
    808 typedef VkFlags VmaPoolCreateFlags;
    809 
    812 typedef struct VmaPoolCreateInfo {
    815  uint32_t memoryTypeIndex;
    818  VmaPoolCreateFlags flags;
    823  VkDeviceSize blockSize;
    850  uint32_t frameInUseCount;
    852 
    855 typedef struct VmaPoolStats {
    858  VkDeviceSize size;
    861  VkDeviceSize unusedSize;
    874  VkDeviceSize unusedRangeSizeMax;
    875 } VmaPoolStats;
    876 
    883 VkResult vmaCreatePool(
    884  VmaAllocator allocator,
    885  const VmaPoolCreateInfo* pCreateInfo,
    886  VmaPool* pPool);
    887 
    890 void vmaDestroyPool(
    891  VmaAllocator allocator,
    892  VmaPool pool);
    893 
    900 void vmaGetPoolStats(
    901  VmaAllocator allocator,
    902  VmaPool pool,
    903  VmaPoolStats* pPoolStats);
    904 
    912  VmaAllocator allocator,
    913  VmaPool pool,
    914  size_t* pLostAllocationCount);
    915 
    916 VK_DEFINE_HANDLE(VmaAllocation)
    917 
    918 
    920 typedef struct VmaAllocationInfo {
    925  uint32_t memoryType;
    934  VkDeviceMemory deviceMemory;
    939  VkDeviceSize offset;
    944  VkDeviceSize size;
    950  void* pMappedData;
    955  void* pUserData;
    957 
    968 VkResult vmaAllocateMemory(
    969  VmaAllocator allocator,
    970  const VkMemoryRequirements* pVkMemoryRequirements,
    971  const VmaAllocationCreateInfo* pCreateInfo,
    972  VmaAllocation* pAllocation,
    973  VmaAllocationInfo* pAllocationInfo);
    974 
    982  VmaAllocator allocator,
    983  VkBuffer buffer,
    984  const VmaAllocationCreateInfo* pCreateInfo,
    985  VmaAllocation* pAllocation,
    986  VmaAllocationInfo* pAllocationInfo);
    987 
    990  VmaAllocator allocator,
    991  VkImage image,
    992  const VmaAllocationCreateInfo* pCreateInfo,
    993  VmaAllocation* pAllocation,
    994  VmaAllocationInfo* pAllocationInfo);
    995 
    997 void vmaFreeMemory(
    998  VmaAllocator allocator,
    999  VmaAllocation allocation);
    1000 
    1003  VmaAllocator allocator,
    1004  VmaAllocation allocation,
    1005  VmaAllocationInfo* pAllocationInfo);
    1006 
    1009  VmaAllocator allocator,
    1010  VmaAllocation allocation,
    1011  void* pUserData);
    1012 
    1024  VmaAllocator allocator,
    1025  VmaAllocation* pAllocation);
    1026 
    1035 VkResult vmaMapMemory(
    1036  VmaAllocator allocator,
    1037  VmaAllocation allocation,
    1038  void** ppData);
    1039 
    1040 void vmaUnmapMemory(
    1041  VmaAllocator allocator,
    1042  VmaAllocation allocation);
    1043 
    1065 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1066 
    1074 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1075 
    1077 typedef struct VmaDefragmentationInfo {
    1082  VkDeviceSize maxBytesToMove;
    1089 
    1091 typedef struct VmaDefragmentationStats {
    1093  VkDeviceSize bytesMoved;
    1095  VkDeviceSize bytesFreed;
    1101 
    1172 VkResult vmaDefragment(
    1173  VmaAllocator allocator,
    1174  VmaAllocation* pAllocations,
    1175  size_t allocationCount,
    1176  VkBool32* pAllocationsChanged,
    1177  const VmaDefragmentationInfo *pDefragmentationInfo,
    1178  VmaDefragmentationStats* pDefragmentationStats);
    1179 
    1182 
    1205 VkResult vmaCreateBuffer(
    1206  VmaAllocator allocator,
    1207  const VkBufferCreateInfo* pBufferCreateInfo,
    1208  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1209  VkBuffer* pBuffer,
    1210  VmaAllocation* pAllocation,
    1211  VmaAllocationInfo* pAllocationInfo);
    1212 
    1221 void vmaDestroyBuffer(
    1222  VmaAllocator allocator,
    1223  VkBuffer buffer,
    1224  VmaAllocation allocation);
    1225 
    1227 VkResult vmaCreateImage(
    1228  VmaAllocator allocator,
    1229  const VkImageCreateInfo* pImageCreateInfo,
    1230  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1231  VkImage* pImage,
    1232  VmaAllocation* pAllocation,
    1233  VmaAllocationInfo* pAllocationInfo);
    1234 
    1243 void vmaDestroyImage(
    1244  VmaAllocator allocator,
    1245  VkImage image,
    1246  VmaAllocation allocation);
    1247 
    1250 #ifdef __cplusplus
    1251 }
    1252 #endif
    1253 
    1254 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1255 
    1256 // For Visual Studio IntelliSense.
    1257 #ifdef __INTELLISENSE__
    1258 #define VMA_IMPLEMENTATION
    1259 #endif
    1260 
    1261 #ifdef VMA_IMPLEMENTATION
    1262 #undef VMA_IMPLEMENTATION
    1263 
    1264 #include <cstdint>
    1265 #include <cstdlib>
    1266 #include <cstring>
    1267 
    1268 /*******************************************************************************
    1269 CONFIGURATION SECTION
    1270 
    1271 Define some of these macros before each #include of this header or change them
    1272 here if you need other then default behavior depending on your environment.
    1273 */
    1274 
    1275 /*
    1276 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1277 internally, like:
    1278 
    1279  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1280 
    1281 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1282 VmaAllocatorCreateInfo::pVulkanFunctions.
    1283 */
    1284 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1285 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1286 #endif
    1287 
    1288 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1289 //#define VMA_USE_STL_CONTAINERS 1
    1290 
    1291 /* Set this macro to 1 to make the library including and using STL containers:
    1292 std::pair, std::vector, std::list, std::unordered_map.
    1293 
    1294 Set it to 0 or undefined to make the library using its own implementation of
    1295 the containers.
    1296 */
    1297 #if VMA_USE_STL_CONTAINERS
    1298  #define VMA_USE_STL_VECTOR 1
    1299  #define VMA_USE_STL_UNORDERED_MAP 1
    1300  #define VMA_USE_STL_LIST 1
    1301 #endif
    1302 
    1303 #if VMA_USE_STL_VECTOR
    1304  #include <vector>
    1305 #endif
    1306 
    1307 #if VMA_USE_STL_UNORDERED_MAP
    1308  #include <unordered_map>
    1309 #endif
    1310 
    1311 #if VMA_USE_STL_LIST
    1312  #include <list>
    1313 #endif
    1314 
    1315 /*
    1316 Following headers are used in this CONFIGURATION section only, so feel free to
    1317 remove them if not needed.
    1318 */
    1319 #include <cassert> // for assert
    1320 #include <algorithm> // for min, max
    1321 #include <mutex> // for std::mutex
    1322 #include <atomic> // for std::atomic
    1323 
    1324 #if !defined(_WIN32)
    1325  #include <malloc.h> // for aligned_alloc()
    1326 #endif
    1327 
    1328 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1329 #ifndef VMA_ASSERT
    1330  #ifdef _DEBUG
    1331  #define VMA_ASSERT(expr) assert(expr)
    1332  #else
    1333  #define VMA_ASSERT(expr)
    1334  #endif
    1335 #endif
    1336 
    1337 // Assert that will be called very often, like inside data structures e.g. operator[].
    1338 // Making it non-empty can make program slow.
    1339 #ifndef VMA_HEAVY_ASSERT
    1340  #ifdef _DEBUG
    1341  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1342  #else
    1343  #define VMA_HEAVY_ASSERT(expr)
    1344  #endif
    1345 #endif
    1346 
    1347 #ifndef VMA_NULL
    1348  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1349  #define VMA_NULL nullptr
    1350 #endif
    1351 
    1352 #ifndef VMA_ALIGN_OF
    1353  #define VMA_ALIGN_OF(type) (__alignof(type))
    1354 #endif
    1355 
    1356 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1357  #if defined(_WIN32)
    1358  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1359  #else
    1360  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1361  #endif
    1362 #endif
    1363 
    1364 #ifndef VMA_SYSTEM_FREE
    1365  #if defined(_WIN32)
    1366  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1367  #else
    1368  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1369  #endif
    1370 #endif
    1371 
    1372 #ifndef VMA_MIN
    1373  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1374 #endif
    1375 
    1376 #ifndef VMA_MAX
    1377  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1378 #endif
    1379 
    1380 #ifndef VMA_SWAP
    1381  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1382 #endif
    1383 
    1384 #ifndef VMA_SORT
    1385  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1386 #endif
    1387 
    1388 #ifndef VMA_DEBUG_LOG
    1389  #define VMA_DEBUG_LOG(format, ...)
    1390  /*
    1391  #define VMA_DEBUG_LOG(format, ...) do { \
    1392  printf(format, __VA_ARGS__); \
    1393  printf("\n"); \
    1394  } while(false)
    1395  */
    1396 #endif
    1397 
    1398 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1399 #if VMA_STATS_STRING_ENABLED
    1400  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1401  {
    1402  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1403  }
    1404  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1405  {
    1406  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1407  }
    1408  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1409  {
    1410  snprintf(outStr, strLen, "%p", ptr);
    1411  }
    1412 #endif
    1413 
    1414 #ifndef VMA_MUTEX
    1415  class VmaMutex
    1416  {
    1417  public:
    1418  VmaMutex() { }
    1419  ~VmaMutex() { }
    1420  void Lock() { m_Mutex.lock(); }
    1421  void Unlock() { m_Mutex.unlock(); }
    1422  private:
    1423  std::mutex m_Mutex;
    1424  };
    1425  #define VMA_MUTEX VmaMutex
    1426 #endif
    1427 
    1428 /*
    1429 If providing your own implementation, you need to implement a subset of std::atomic:
    1430 
    1431 - Constructor(uint32_t desired)
    1432 - uint32_t load() const
    1433 - void store(uint32_t desired)
    1434 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1435 */
    1436 #ifndef VMA_ATOMIC_UINT32
    1437  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1438 #endif
    1439 
    1440 #ifndef VMA_BEST_FIT
    1441 
    1453  #define VMA_BEST_FIT (1)
    1454 #endif
    1455 
    1456 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
    1457 
    1461  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
    1462 #endif
    1463 
    1464 #ifndef VMA_DEBUG_ALIGNMENT
    1465 
    1469  #define VMA_DEBUG_ALIGNMENT (1)
    1470 #endif
    1471 
    1472 #ifndef VMA_DEBUG_MARGIN
    1473 
    1477  #define VMA_DEBUG_MARGIN (0)
    1478 #endif
    1479 
    1480 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1481 
    1485  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1486 #endif
    1487 
    1488 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1489 
    1493  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1494 #endif
    1495 
    1496 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1497  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1499 #endif
    1500 
    1501 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1502  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1504 #endif
    1505 
    1506 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1507  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1509 #endif
    1510 
    1511 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1512 
    1513 /*******************************************************************************
    1514 END OF CONFIGURATION
    1515 */
    1516 
    1517 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1518  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1519 
    1520 // Returns number of bits set to 1 in (v).
    1521 static inline uint32_t CountBitsSet(uint32_t v)
    1522 {
    1523  uint32_t c = v - ((v >> 1) & 0x55555555);
    1524  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1525  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1526  c = ((c >> 8) + c) & 0x00FF00FF;
    1527  c = ((c >> 16) + c) & 0x0000FFFF;
    1528  return c;
    1529 }
    1530 
    1531 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1532 // Use types like uint32_t, uint64_t as T.
    1533 template <typename T>
    1534 static inline T VmaAlignUp(T val, T align)
    1535 {
    1536  return (val + align - 1) / align * align;
    1537 }
    1538 
    1539 // Division with mathematical rounding to nearest number.
    1540 template <typename T>
    1541 inline T VmaRoundDiv(T x, T y)
    1542 {
    1543  return (x + (y / (T)2)) / y;
    1544 }
    1545 
    1546 #ifndef VMA_SORT
    1547 
    1548 template<typename Iterator, typename Compare>
    1549 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1550 {
    1551  Iterator centerValue = end; --centerValue;
    1552  Iterator insertIndex = beg;
    1553  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1554  {
    1555  if(cmp(*memTypeIndex, *centerValue))
    1556  {
    1557  if(insertIndex != memTypeIndex)
    1558  {
    1559  VMA_SWAP(*memTypeIndex, *insertIndex);
    1560  }
    1561  ++insertIndex;
    1562  }
    1563  }
    1564  if(insertIndex != centerValue)
    1565  {
    1566  VMA_SWAP(*insertIndex, *centerValue);
    1567  }
    1568  return insertIndex;
    1569 }
    1570 
    1571 template<typename Iterator, typename Compare>
    1572 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1573 {
    1574  if(beg < end)
    1575  {
    1576  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1577  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1578  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1579  }
    1580 }
    1581 
    1582 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1583 
    1584 #endif // #ifndef VMA_SORT
    1585 
    1586 /*
    1587 Returns true if two memory blocks occupy overlapping pages.
    1588 ResourceA must be in less memory offset than ResourceB.
    1589 
    1590 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1591 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1592 */
    1593 static inline bool VmaBlocksOnSamePage(
    1594  VkDeviceSize resourceAOffset,
    1595  VkDeviceSize resourceASize,
    1596  VkDeviceSize resourceBOffset,
    1597  VkDeviceSize pageSize)
    1598 {
    1599  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1600  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1601  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1602  VkDeviceSize resourceBStart = resourceBOffset;
    1603  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1604  return resourceAEndPage == resourceBStartPage;
    1605 }
    1606 
    1607 enum VmaSuballocationType
    1608 {
    1609  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1610  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1611  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1612  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1613  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1614  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1615  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1616 };
    1617 
    1618 /*
    1619 Returns true if given suballocation types could conflict and must respect
    1620 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1621 or linear image and another one is optimal image. If type is unknown, behave
    1622 conservatively.
    1623 */
    1624 static inline bool VmaIsBufferImageGranularityConflict(
    1625  VmaSuballocationType suballocType1,
    1626  VmaSuballocationType suballocType2)
    1627 {
    1628  if(suballocType1 > suballocType2)
    1629  {
    1630  VMA_SWAP(suballocType1, suballocType2);
    1631  }
    1632 
    1633  switch(suballocType1)
    1634  {
    1635  case VMA_SUBALLOCATION_TYPE_FREE:
    1636  return false;
    1637  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1638  return true;
    1639  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1640  return
    1641  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1642  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1643  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1644  return
    1645  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1646  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1647  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1648  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1649  return
    1650  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1651  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1652  return false;
    1653  default:
    1654  VMA_ASSERT(0);
    1655  return true;
    1656  }
    1657 }
    1658 
    1659 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1660 struct VmaMutexLock
    1661 {
    1662 public:
    1663  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1664  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1665  {
    1666  if(m_pMutex)
    1667  {
    1668  m_pMutex->Lock();
    1669  }
    1670  }
    1671 
    1672  ~VmaMutexLock()
    1673  {
    1674  if(m_pMutex)
    1675  {
    1676  m_pMutex->Unlock();
    1677  }
    1678  }
    1679 
    1680 private:
    1681  VMA_MUTEX* m_pMutex;
    1682 };
    1683 
    1684 #if VMA_DEBUG_GLOBAL_MUTEX
    1685  static VMA_MUTEX gDebugGlobalMutex;
    1686  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1687 #else
    1688  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1689 #endif
    1690 
    1691 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1692 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1693 
    1694 /*
    1695 Performs binary search and returns iterator to first element that is greater or
    1696 equal to (key), according to comparison (cmp).
    1697 
    1698 Cmp should return true if first argument is less than second argument.
    1699 
    1700 Returned value is the found element, if present in the collection or place where
    1701 new element with value (key) should be inserted.
    1702 */
    1703 template <typename IterT, typename KeyT, typename CmpT>
    1704 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1705 {
    1706  size_t down = 0, up = (end - beg);
    1707  while(down < up)
    1708  {
    1709  const size_t mid = (down + up) / 2;
    1710  if(cmp(*(beg+mid), key))
    1711  {
    1712  down = mid + 1;
    1713  }
    1714  else
    1715  {
    1716  up = mid;
    1717  }
    1718  }
    1719  return beg + down;
    1720 }
    1721 
    1723 // Memory allocation
    1724 
    1725 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1726 {
    1727  if((pAllocationCallbacks != VMA_NULL) &&
    1728  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1729  {
    1730  return (*pAllocationCallbacks->pfnAllocation)(
    1731  pAllocationCallbacks->pUserData,
    1732  size,
    1733  alignment,
    1734  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1735  }
    1736  else
    1737  {
    1738  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1739  }
    1740 }
    1741 
    1742 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1743 {
    1744  if((pAllocationCallbacks != VMA_NULL) &&
    1745  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1746  {
    1747  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1748  }
    1749  else
    1750  {
    1751  VMA_SYSTEM_FREE(ptr);
    1752  }
    1753 }
    1754 
    1755 template<typename T>
    1756 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1757 {
    1758  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1759 }
    1760 
    1761 template<typename T>
    1762 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1763 {
    1764  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1765 }
    1766 
    1767 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1768 
    1769 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1770 
    1771 template<typename T>
    1772 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1773 {
    1774  ptr->~T();
    1775  VmaFree(pAllocationCallbacks, ptr);
    1776 }
    1777 
    1778 template<typename T>
    1779 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1780 {
    1781  if(ptr != VMA_NULL)
    1782  {
    1783  for(size_t i = count; i--; )
    1784  {
    1785  ptr[i].~T();
    1786  }
    1787  VmaFree(pAllocationCallbacks, ptr);
    1788  }
    1789 }
    1790 
    1791 // STL-compatible allocator.
    1792 template<typename T>
    1793 class VmaStlAllocator
    1794 {
    1795 public:
    1796  const VkAllocationCallbacks* const m_pCallbacks;
    1797  typedef T value_type;
    1798 
    1799  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1800  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1801 
    1802  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1803  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1804 
    1805  template<typename U>
    1806  bool operator==(const VmaStlAllocator<U>& rhs) const
    1807  {
    1808  return m_pCallbacks == rhs.m_pCallbacks;
    1809  }
    1810  template<typename U>
    1811  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1812  {
    1813  return m_pCallbacks != rhs.m_pCallbacks;
    1814  }
    1815 
    1816  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1817 };
    1818 
    1819 #if VMA_USE_STL_VECTOR
    1820 
    1821 #define VmaVector std::vector
    1822 
    1823 template<typename T, typename allocatorT>
    1824 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1825 {
    1826  vec.insert(vec.begin() + index, item);
    1827 }
    1828 
    1829 template<typename T, typename allocatorT>
    1830 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1831 {
    1832  vec.erase(vec.begin() + index);
    1833 }
    1834 
    1835 #else // #if VMA_USE_STL_VECTOR
    1836 
    1837 /* Class with interface compatible with subset of std::vector.
    1838 T must be POD because constructors and destructors are not called and memcpy is
    1839 used for these objects. */
    1840 template<typename T, typename AllocatorT>
    1841 class VmaVector
    1842 {
    1843 public:
    1844  typedef T value_type;
    1845 
    1846  VmaVector(const AllocatorT& allocator) :
    1847  m_Allocator(allocator),
    1848  m_pArray(VMA_NULL),
    1849  m_Count(0),
    1850  m_Capacity(0)
    1851  {
    1852  }
    1853 
    1854  VmaVector(size_t count, const AllocatorT& allocator) :
    1855  m_Allocator(allocator),
    1856  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1857  m_Count(count),
    1858  m_Capacity(count)
    1859  {
    1860  }
    1861 
    1862  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1863  m_Allocator(src.m_Allocator),
    1864  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1865  m_Count(src.m_Count),
    1866  m_Capacity(src.m_Count)
    1867  {
    1868  if(m_Count != 0)
    1869  {
    1870  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1871  }
    1872  }
    1873 
    1874  ~VmaVector()
    1875  {
    1876  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1877  }
    1878 
    1879  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1880  {
    1881  if(&rhs != this)
    1882  {
    1883  resize(rhs.m_Count);
    1884  if(m_Count != 0)
    1885  {
    1886  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    1887  }
    1888  }
    1889  return *this;
    1890  }
    1891 
    1892  bool empty() const { return m_Count == 0; }
    1893  size_t size() const { return m_Count; }
    1894  T* data() { return m_pArray; }
    1895  const T* data() const { return m_pArray; }
    1896 
    1897  T& operator[](size_t index)
    1898  {
    1899  VMA_HEAVY_ASSERT(index < m_Count);
    1900  return m_pArray[index];
    1901  }
    1902  const T& operator[](size_t index) const
    1903  {
    1904  VMA_HEAVY_ASSERT(index < m_Count);
    1905  return m_pArray[index];
    1906  }
    1907 
    1908  T& front()
    1909  {
    1910  VMA_HEAVY_ASSERT(m_Count > 0);
    1911  return m_pArray[0];
    1912  }
    1913  const T& front() const
    1914  {
    1915  VMA_HEAVY_ASSERT(m_Count > 0);
    1916  return m_pArray[0];
    1917  }
    1918  T& back()
    1919  {
    1920  VMA_HEAVY_ASSERT(m_Count > 0);
    1921  return m_pArray[m_Count - 1];
    1922  }
    1923  const T& back() const
    1924  {
    1925  VMA_HEAVY_ASSERT(m_Count > 0);
    1926  return m_pArray[m_Count - 1];
    1927  }
    1928 
    1929  void reserve(size_t newCapacity, bool freeMemory = false)
    1930  {
    1931  newCapacity = VMA_MAX(newCapacity, m_Count);
    1932 
    1933  if((newCapacity < m_Capacity) && !freeMemory)
    1934  {
    1935  newCapacity = m_Capacity;
    1936  }
    1937 
    1938  if(newCapacity != m_Capacity)
    1939  {
    1940  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    1941  if(m_Count != 0)
    1942  {
    1943  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    1944  }
    1945  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1946  m_Capacity = newCapacity;
    1947  m_pArray = newArray;
    1948  }
    1949  }
    1950 
    1951  void resize(size_t newCount, bool freeMemory = false)
    1952  {
    1953  size_t newCapacity = m_Capacity;
    1954  if(newCount > m_Capacity)
    1955  {
    1956  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    1957  }
    1958  else if(freeMemory)
    1959  {
    1960  newCapacity = newCount;
    1961  }
    1962 
    1963  if(newCapacity != m_Capacity)
    1964  {
    1965  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    1966  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    1967  if(elementsToCopy != 0)
    1968  {
    1969  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    1970  }
    1971  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1972  m_Capacity = newCapacity;
    1973  m_pArray = newArray;
    1974  }
    1975 
    1976  m_Count = newCount;
    1977  }
    1978 
    1979  void clear(bool freeMemory = false)
    1980  {
    1981  resize(0, freeMemory);
    1982  }
    1983 
    1984  void insert(size_t index, const T& src)
    1985  {
    1986  VMA_HEAVY_ASSERT(index <= m_Count);
    1987  const size_t oldCount = size();
    1988  resize(oldCount + 1);
    1989  if(index < oldCount)
    1990  {
    1991  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    1992  }
    1993  m_pArray[index] = src;
    1994  }
    1995 
    1996  void remove(size_t index)
    1997  {
    1998  VMA_HEAVY_ASSERT(index < m_Count);
    1999  const size_t oldCount = size();
    2000  if(index < oldCount - 1)
    2001  {
    2002  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2003  }
    2004  resize(oldCount - 1);
    2005  }
    2006 
    2007  void push_back(const T& src)
    2008  {
    2009  const size_t newIndex = size();
    2010  resize(newIndex + 1);
    2011  m_pArray[newIndex] = src;
    2012  }
    2013 
    2014  void pop_back()
    2015  {
    2016  VMA_HEAVY_ASSERT(m_Count > 0);
    2017  resize(size() - 1);
    2018  }
    2019 
    2020  void push_front(const T& src)
    2021  {
    2022  insert(0, src);
    2023  }
    2024 
    2025  void pop_front()
    2026  {
    2027  VMA_HEAVY_ASSERT(m_Count > 0);
    2028  remove(0);
    2029  }
    2030 
    2031  typedef T* iterator;
    2032 
    2033  iterator begin() { return m_pArray; }
    2034  iterator end() { return m_pArray + m_Count; }
    2035 
    2036 private:
    2037  AllocatorT m_Allocator;
    2038  T* m_pArray;
    2039  size_t m_Count;
    2040  size_t m_Capacity;
    2041 };
    2042 
    2043 template<typename T, typename allocatorT>
    2044 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2045 {
    2046  vec.insert(index, item);
    2047 }
    2048 
    2049 template<typename T, typename allocatorT>
    2050 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2051 {
    2052  vec.remove(index);
    2053 }
    2054 
    2055 #endif // #if VMA_USE_STL_VECTOR
    2056 
    2057 template<typename CmpLess, typename VectorT>
    2058 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2059 {
    2060  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2061  vector.data(),
    2062  vector.data() + vector.size(),
    2063  value,
    2064  CmpLess()) - vector.data();
    2065  VmaVectorInsert(vector, indexToInsert, value);
    2066  return indexToInsert;
    2067 }
    2068 
    2069 template<typename CmpLess, typename VectorT>
    2070 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2071 {
    2072  CmpLess comparator;
    2073  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2074  vector.begin(),
    2075  vector.end(),
    2076  value,
    2077  comparator);
    2078  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2079  {
    2080  size_t indexToRemove = it - vector.begin();
    2081  VmaVectorRemove(vector, indexToRemove);
    2082  return true;
    2083  }
    2084  return false;
    2085 }
    2086 
    2087 template<typename CmpLess, typename VectorT>
    2088 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2089 {
    2090  CmpLess comparator;
    2091  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2092  vector.data(),
    2093  vector.data() + vector.size(),
    2094  value,
    2095  comparator);
    2096  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2097  {
    2098  return it - vector.begin();
    2099  }
    2100  else
    2101  {
    2102  return vector.size();
    2103  }
    2104 }
    2105 
    2107 // class VmaPoolAllocator
    2108 
    2109 /*
    2110 Allocator for objects of type T using a list of arrays (pools) to speed up
    2111 allocation. Number of elements that can be allocated is not bounded because
    2112 allocator can create multiple blocks.
    2113 */
    2114 template<typename T>
    2115 class VmaPoolAllocator
    2116 {
    2117 public:
    2118  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2119  ~VmaPoolAllocator();
    2120  void Clear();
    2121  T* Alloc();
    2122  void Free(T* ptr);
    2123 
    2124 private:
    2125  union Item
    2126  {
    2127  uint32_t NextFreeIndex;
    2128  T Value;
    2129  };
    2130 
    2131  struct ItemBlock
    2132  {
    2133  Item* pItems;
    2134  uint32_t FirstFreeIndex;
    2135  };
    2136 
    2137  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2138  size_t m_ItemsPerBlock;
    2139  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2140 
    2141  ItemBlock& CreateNewBlock();
    2142 };
    2143 
    2144 template<typename T>
    2145 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2146  m_pAllocationCallbacks(pAllocationCallbacks),
    2147  m_ItemsPerBlock(itemsPerBlock),
    2148  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2149 {
    2150  VMA_ASSERT(itemsPerBlock > 0);
    2151 }
    2152 
    2153 template<typename T>
    2154 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2155 {
    2156  Clear();
    2157 }
    2158 
    2159 template<typename T>
    2160 void VmaPoolAllocator<T>::Clear()
    2161 {
    2162  for(size_t i = m_ItemBlocks.size(); i--; )
    2163  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2164  m_ItemBlocks.clear();
    2165 }
    2166 
    2167 template<typename T>
    2168 T* VmaPoolAllocator<T>::Alloc()
    2169 {
    2170  for(size_t i = m_ItemBlocks.size(); i--; )
    2171  {
    2172  ItemBlock& block = m_ItemBlocks[i];
    2173  // This block has some free items: Use first one.
    2174  if(block.FirstFreeIndex != UINT32_MAX)
    2175  {
    2176  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2177  block.FirstFreeIndex = pItem->NextFreeIndex;
    2178  return &pItem->Value;
    2179  }
    2180  }
    2181 
    2182  // No block has free item: Create new one and use it.
    2183  ItemBlock& newBlock = CreateNewBlock();
    2184  Item* const pItem = &newBlock.pItems[0];
    2185  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2186  return &pItem->Value;
    2187 }
    2188 
    2189 template<typename T>
    2190 void VmaPoolAllocator<T>::Free(T* ptr)
    2191 {
    2192  // Search all memory blocks to find ptr.
    2193  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2194  {
    2195  ItemBlock& block = m_ItemBlocks[i];
    2196 
    2197  // Casting to union.
    2198  Item* pItemPtr;
    2199  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2200 
    2201  // Check if pItemPtr is in address range of this block.
    2202  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2203  {
    2204  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2205  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2206  block.FirstFreeIndex = index;
    2207  return;
    2208  }
    2209  }
    2210  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2211 }
    2212 
    2213 template<typename T>
    2214 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2215 {
    2216  ItemBlock newBlock = {
    2217  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2218 
    2219  m_ItemBlocks.push_back(newBlock);
    2220 
    2221  // Setup singly-linked list of all free items in this block.
    2222  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2223  newBlock.pItems[i].NextFreeIndex = i + 1;
    2224  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2225  return m_ItemBlocks.back();
    2226 }
    2227 
    2229 // class VmaRawList, VmaList
    2230 
    2231 #if VMA_USE_STL_LIST
    2232 
    2233 #define VmaList std::list
    2234 
    2235 #else // #if VMA_USE_STL_LIST
    2236 
    2237 template<typename T>
    2238 struct VmaListItem
    2239 {
    2240  VmaListItem* pPrev;
    2241  VmaListItem* pNext;
    2242  T Value;
    2243 };
    2244 
    2245 // Doubly linked list.
    2246 template<typename T>
    2247 class VmaRawList
    2248 {
    2249 public:
    2250  typedef VmaListItem<T> ItemType;
    2251 
    2252  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2253  ~VmaRawList();
    2254  void Clear();
    2255 
    2256  size_t GetCount() const { return m_Count; }
    2257  bool IsEmpty() const { return m_Count == 0; }
    2258 
    2259  ItemType* Front() { return m_pFront; }
    2260  const ItemType* Front() const { return m_pFront; }
    2261  ItemType* Back() { return m_pBack; }
    2262  const ItemType* Back() const { return m_pBack; }
    2263 
    2264  ItemType* PushBack();
    2265  ItemType* PushFront();
    2266  ItemType* PushBack(const T& value);
    2267  ItemType* PushFront(const T& value);
    2268  void PopBack();
    2269  void PopFront();
    2270 
    2271  // Item can be null - it means PushBack.
    2272  ItemType* InsertBefore(ItemType* pItem);
    2273  // Item can be null - it means PushFront.
    2274  ItemType* InsertAfter(ItemType* pItem);
    2275 
    2276  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2277  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2278 
    2279  void Remove(ItemType* pItem);
    2280 
    2281 private:
    2282  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2283  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2284  ItemType* m_pFront;
    2285  ItemType* m_pBack;
    2286  size_t m_Count;
    2287 
    2288  // Declared not defined, to block copy constructor and assignment operator.
    2289  VmaRawList(const VmaRawList<T>& src);
    2290  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2291 };
    2292 
    2293 template<typename T>
    2294 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2295  m_pAllocationCallbacks(pAllocationCallbacks),
    2296  m_ItemAllocator(pAllocationCallbacks, 128),
    2297  m_pFront(VMA_NULL),
    2298  m_pBack(VMA_NULL),
    2299  m_Count(0)
    2300 {
    2301 }
    2302 
    2303 template<typename T>
    2304 VmaRawList<T>::~VmaRawList()
    2305 {
    2306  // Intentionally not calling Clear, because that would be unnecessary
    2307  // computations to return all items to m_ItemAllocator as free.
    2308 }
    2309 
    2310 template<typename T>
    2311 void VmaRawList<T>::Clear()
    2312 {
    2313  if(IsEmpty() == false)
    2314  {
    2315  ItemType* pItem = m_pBack;
    2316  while(pItem != VMA_NULL)
    2317  {
    2318  ItemType* const pPrevItem = pItem->pPrev;
    2319  m_ItemAllocator.Free(pItem);
    2320  pItem = pPrevItem;
    2321  }
    2322  m_pFront = VMA_NULL;
    2323  m_pBack = VMA_NULL;
    2324  m_Count = 0;
    2325  }
    2326 }
    2327 
    2328 template<typename T>
    2329 VmaListItem<T>* VmaRawList<T>::PushBack()
    2330 {
    2331  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2332  pNewItem->pNext = VMA_NULL;
    2333  if(IsEmpty())
    2334  {
    2335  pNewItem->pPrev = VMA_NULL;
    2336  m_pFront = pNewItem;
    2337  m_pBack = pNewItem;
    2338  m_Count = 1;
    2339  }
    2340  else
    2341  {
    2342  pNewItem->pPrev = m_pBack;
    2343  m_pBack->pNext = pNewItem;
    2344  m_pBack = pNewItem;
    2345  ++m_Count;
    2346  }
    2347  return pNewItem;
    2348 }
    2349 
    2350 template<typename T>
    2351 VmaListItem<T>* VmaRawList<T>::PushFront()
    2352 {
    2353  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2354  pNewItem->pPrev = VMA_NULL;
    2355  if(IsEmpty())
    2356  {
    2357  pNewItem->pNext = VMA_NULL;
    2358  m_pFront = pNewItem;
    2359  m_pBack = pNewItem;
    2360  m_Count = 1;
    2361  }
    2362  else
    2363  {
    2364  pNewItem->pNext = m_pFront;
    2365  m_pFront->pPrev = pNewItem;
    2366  m_pFront = pNewItem;
    2367  ++m_Count;
    2368  }
    2369  return pNewItem;
    2370 }
    2371 
    2372 template<typename T>
    2373 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2374 {
    2375  ItemType* const pNewItem = PushBack();
    2376  pNewItem->Value = value;
    2377  return pNewItem;
    2378 }
    2379 
    2380 template<typename T>
    2381 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2382 {
    2383  ItemType* const pNewItem = PushFront();
    2384  pNewItem->Value = value;
    2385  return pNewItem;
    2386 }
    2387 
    2388 template<typename T>
    2389 void VmaRawList<T>::PopBack()
    2390 {
    2391  VMA_HEAVY_ASSERT(m_Count > 0);
    2392  ItemType* const pBackItem = m_pBack;
    2393  ItemType* const pPrevItem = pBackItem->pPrev;
    2394  if(pPrevItem != VMA_NULL)
    2395  {
    2396  pPrevItem->pNext = VMA_NULL;
    2397  }
    2398  m_pBack = pPrevItem;
    2399  m_ItemAllocator.Free(pBackItem);
    2400  --m_Count;
    2401 }
    2402 
    2403 template<typename T>
    2404 void VmaRawList<T>::PopFront()
    2405 {
    2406  VMA_HEAVY_ASSERT(m_Count > 0);
    2407  ItemType* const pFrontItem = m_pFront;
    2408  ItemType* const pNextItem = pFrontItem->pNext;
    2409  if(pNextItem != VMA_NULL)
    2410  {
    2411  pNextItem->pPrev = VMA_NULL;
    2412  }
    2413  m_pFront = pNextItem;
    2414  m_ItemAllocator.Free(pFrontItem);
    2415  --m_Count;
    2416 }
    2417 
    2418 template<typename T>
    2419 void VmaRawList<T>::Remove(ItemType* pItem)
    2420 {
    2421  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2422  VMA_HEAVY_ASSERT(m_Count > 0);
    2423 
    2424  if(pItem->pPrev != VMA_NULL)
    2425  {
    2426  pItem->pPrev->pNext = pItem->pNext;
    2427  }
    2428  else
    2429  {
    2430  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2431  m_pFront = pItem->pNext;
    2432  }
    2433 
    2434  if(pItem->pNext != VMA_NULL)
    2435  {
    2436  pItem->pNext->pPrev = pItem->pPrev;
    2437  }
    2438  else
    2439  {
    2440  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2441  m_pBack = pItem->pPrev;
    2442  }
    2443 
    2444  m_ItemAllocator.Free(pItem);
    2445  --m_Count;
    2446 }
    2447 
    2448 template<typename T>
    2449 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2450 {
    2451  if(pItem != VMA_NULL)
    2452  {
    2453  ItemType* const prevItem = pItem->pPrev;
    2454  ItemType* const newItem = m_ItemAllocator.Alloc();
    2455  newItem->pPrev = prevItem;
    2456  newItem->pNext = pItem;
    2457  pItem->pPrev = newItem;
    2458  if(prevItem != VMA_NULL)
    2459  {
    2460  prevItem->pNext = newItem;
    2461  }
    2462  else
    2463  {
    2464  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2465  m_pFront = newItem;
    2466  }
    2467  ++m_Count;
    2468  return newItem;
    2469  }
    2470  else
    2471  return PushBack();
    2472 }
    2473 
    2474 template<typename T>
    2475 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2476 {
    2477  if(pItem != VMA_NULL)
    2478  {
    2479  ItemType* const nextItem = pItem->pNext;
    2480  ItemType* const newItem = m_ItemAllocator.Alloc();
    2481  newItem->pNext = nextItem;
    2482  newItem->pPrev = pItem;
    2483  pItem->pNext = newItem;
    2484  if(nextItem != VMA_NULL)
    2485  {
    2486  nextItem->pPrev = newItem;
    2487  }
    2488  else
    2489  {
    2490  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2491  m_pBack = newItem;
    2492  }
    2493  ++m_Count;
    2494  return newItem;
    2495  }
    2496  else
    2497  return PushFront();
    2498 }
    2499 
    2500 template<typename T>
    2501 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2502 {
    2503  ItemType* const newItem = InsertBefore(pItem);
    2504  newItem->Value = value;
    2505  return newItem;
    2506 }
    2507 
    2508 template<typename T>
    2509 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2510 {
    2511  ItemType* const newItem = InsertAfter(pItem);
    2512  newItem->Value = value;
    2513  return newItem;
    2514 }
    2515 
    2516 template<typename T, typename AllocatorT>
    2517 class VmaList
    2518 {
    2519 public:
    2520  class iterator
    2521  {
    2522  public:
    2523  iterator() :
    2524  m_pList(VMA_NULL),
    2525  m_pItem(VMA_NULL)
    2526  {
    2527  }
    2528 
    2529  T& operator*() const
    2530  {
    2531  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2532  return m_pItem->Value;
    2533  }
    2534  T* operator->() const
    2535  {
    2536  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2537  return &m_pItem->Value;
    2538  }
    2539 
    2540  iterator& operator++()
    2541  {
    2542  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2543  m_pItem = m_pItem->pNext;
    2544  return *this;
    2545  }
    2546  iterator& operator--()
    2547  {
    2548  if(m_pItem != VMA_NULL)
    2549  {
    2550  m_pItem = m_pItem->pPrev;
    2551  }
    2552  else
    2553  {
    2554  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2555  m_pItem = m_pList->Back();
    2556  }
    2557  return *this;
    2558  }
    2559 
    2560  iterator operator++(int)
    2561  {
    2562  iterator result = *this;
    2563  ++*this;
    2564  return result;
    2565  }
    2566  iterator operator--(int)
    2567  {
    2568  iterator result = *this;
    2569  --*this;
    2570  return result;
    2571  }
    2572 
    2573  bool operator==(const iterator& rhs) const
    2574  {
    2575  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2576  return m_pItem == rhs.m_pItem;
    2577  }
    2578  bool operator!=(const iterator& rhs) const
    2579  {
    2580  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2581  return m_pItem != rhs.m_pItem;
    2582  }
    2583 
    2584  private:
    2585  VmaRawList<T>* m_pList;
    2586  VmaListItem<T>* m_pItem;
    2587 
    2588  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2589  m_pList(pList),
    2590  m_pItem(pItem)
    2591  {
    2592  }
    2593 
    2594  friend class VmaList<T, AllocatorT>;
    2595  };
    2596 
    2597  class const_iterator
    2598  {
    2599  public:
    2600  const_iterator() :
    2601  m_pList(VMA_NULL),
    2602  m_pItem(VMA_NULL)
    2603  {
    2604  }
    2605 
    2606  const_iterator(const iterator& src) :
    2607  m_pList(src.m_pList),
    2608  m_pItem(src.m_pItem)
    2609  {
    2610  }
    2611 
    2612  const T& operator*() const
    2613  {
    2614  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2615  return m_pItem->Value;
    2616  }
    2617  const T* operator->() const
    2618  {
    2619  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2620  return &m_pItem->Value;
    2621  }
    2622 
    2623  const_iterator& operator++()
    2624  {
    2625  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2626  m_pItem = m_pItem->pNext;
    2627  return *this;
    2628  }
    2629  const_iterator& operator--()
    2630  {
    2631  if(m_pItem != VMA_NULL)
    2632  {
    2633  m_pItem = m_pItem->pPrev;
    2634  }
    2635  else
    2636  {
    2637  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2638  m_pItem = m_pList->Back();
    2639  }
    2640  return *this;
    2641  }
    2642 
    2643  const_iterator operator++(int)
    2644  {
    2645  const_iterator result = *this;
    2646  ++*this;
    2647  return result;
    2648  }
    2649  const_iterator operator--(int)
    2650  {
    2651  const_iterator result = *this;
    2652  --*this;
    2653  return result;
    2654  }
    2655 
    2656  bool operator==(const const_iterator& rhs) const
    2657  {
    2658  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2659  return m_pItem == rhs.m_pItem;
    2660  }
    2661  bool operator!=(const const_iterator& rhs) const
    2662  {
    2663  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2664  return m_pItem != rhs.m_pItem;
    2665  }
    2666 
    2667  private:
    2668  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2669  m_pList(pList),
    2670  m_pItem(pItem)
    2671  {
    2672  }
    2673 
    2674  const VmaRawList<T>* m_pList;
    2675  const VmaListItem<T>* m_pItem;
    2676 
    2677  friend class VmaList<T, AllocatorT>;
    2678  };
    2679 
    2680  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2681 
    2682  bool empty() const { return m_RawList.IsEmpty(); }
    2683  size_t size() const { return m_RawList.GetCount(); }
    2684 
    2685  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2686  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2687 
    2688  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2689  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2690 
    2691  void clear() { m_RawList.Clear(); }
    2692  void push_back(const T& value) { m_RawList.PushBack(value); }
    2693  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2694  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2695 
    2696 private:
    2697  VmaRawList<T> m_RawList;
    2698 };
    2699 
    2700 #endif // #if VMA_USE_STL_LIST
    2701 
    2703 // class VmaMap
    2704 
    2705 // Unused in this version.
    2706 #if 0
    2707 
    2708 #if VMA_USE_STL_UNORDERED_MAP
    2709 
    2710 #define VmaPair std::pair
    2711 
    2712 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2713  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2714 
    2715 #else // #if VMA_USE_STL_UNORDERED_MAP
    2716 
    2717 template<typename T1, typename T2>
    2718 struct VmaPair
    2719 {
    2720  T1 first;
    2721  T2 second;
    2722 
    2723  VmaPair() : first(), second() { }
    2724  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2725 };
    2726 
    2727 /* Class compatible with subset of interface of std::unordered_map.
    2728 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2729 */
    2730 template<typename KeyT, typename ValueT>
    2731 class VmaMap
    2732 {
    2733 public:
    2734  typedef VmaPair<KeyT, ValueT> PairType;
    2735  typedef PairType* iterator;
    2736 
    2737  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2738 
    2739  iterator begin() { return m_Vector.begin(); }
    2740  iterator end() { return m_Vector.end(); }
    2741 
    2742  void insert(const PairType& pair);
    2743  iterator find(const KeyT& key);
    2744  void erase(iterator it);
    2745 
    2746 private:
    2747  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2748 };
    2749 
    2750 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2751 
    2752 template<typename FirstT, typename SecondT>
    2753 struct VmaPairFirstLess
    2754 {
    2755  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2756  {
    2757  return lhs.first < rhs.first;
    2758  }
    2759  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2760  {
    2761  return lhs.first < rhsFirst;
    2762  }
    2763 };
    2764 
    2765 template<typename KeyT, typename ValueT>
    2766 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2767 {
    2768  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2769  m_Vector.data(),
    2770  m_Vector.data() + m_Vector.size(),
    2771  pair,
    2772  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2773  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2774 }
    2775 
    2776 template<typename KeyT, typename ValueT>
    2777 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2778 {
    2779  PairType* it = VmaBinaryFindFirstNotLess(
    2780  m_Vector.data(),
    2781  m_Vector.data() + m_Vector.size(),
    2782  key,
    2783  VmaPairFirstLess<KeyT, ValueT>());
    2784  if((it != m_Vector.end()) && (it->first == key))
    2785  {
    2786  return it;
    2787  }
    2788  else
    2789  {
    2790  return m_Vector.end();
    2791  }
    2792 }
    2793 
    2794 template<typename KeyT, typename ValueT>
    2795 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2796 {
    2797  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2798 }
    2799 
    2800 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2801 
    2802 #endif // #if 0
    2803 
    2805 
    2806 class VmaDeviceMemoryBlock;
    2807 
    2808 enum VMA_BLOCK_VECTOR_TYPE
    2809 {
    2810  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2811  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2812  VMA_BLOCK_VECTOR_TYPE_COUNT
    2813 };
    2814 
    2815 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2816 {
    2817  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2818  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2819  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2820 }
    2821 
    2822 struct VmaAllocation_T
    2823 {
    2824 public:
    2825  enum ALLOCATION_TYPE
    2826  {
    2827  ALLOCATION_TYPE_NONE,
    2828  ALLOCATION_TYPE_BLOCK,
    2829  ALLOCATION_TYPE_OWN,
    2830  };
    2831 
    2832  VmaAllocation_T(uint32_t currentFrameIndex) :
    2833  m_Alignment(1),
    2834  m_Size(0),
    2835  m_pUserData(VMA_NULL),
    2836  m_Type(ALLOCATION_TYPE_NONE),
    2837  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2838  m_LastUseFrameIndex(currentFrameIndex)
    2839  {
    2840  }
    2841 
    2842  void InitBlockAllocation(
    2843  VmaPool hPool,
    2844  VmaDeviceMemoryBlock* block,
    2845  VkDeviceSize offset,
    2846  VkDeviceSize alignment,
    2847  VkDeviceSize size,
    2848  VmaSuballocationType suballocationType,
    2849  void* pUserData,
    2850  bool canBecomeLost)
    2851  {
    2852  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2853  VMA_ASSERT(block != VMA_NULL);
    2854  m_Type = ALLOCATION_TYPE_BLOCK;
    2855  m_Alignment = alignment;
    2856  m_Size = size;
    2857  m_pUserData = pUserData;
    2858  m_SuballocationType = suballocationType;
    2859  m_BlockAllocation.m_hPool = hPool;
    2860  m_BlockAllocation.m_Block = block;
    2861  m_BlockAllocation.m_Offset = offset;
    2862  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2863  }
    2864 
    2865  void InitLost()
    2866  {
    2867  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2868  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2869  m_Type = ALLOCATION_TYPE_BLOCK;
    2870  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2871  m_BlockAllocation.m_Block = VMA_NULL;
    2872  m_BlockAllocation.m_Offset = 0;
    2873  m_BlockAllocation.m_CanBecomeLost = true;
    2874  }
    2875 
    2876  void ChangeBlockAllocation(
    2877  VmaDeviceMemoryBlock* block,
    2878  VkDeviceSize offset)
    2879  {
    2880  VMA_ASSERT(block != VMA_NULL);
    2881  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2882  m_BlockAllocation.m_Block = block;
    2883  m_BlockAllocation.m_Offset = offset;
    2884  }
    2885 
    2886  void InitOwnAllocation(
    2887  uint32_t memoryTypeIndex,
    2888  VkDeviceMemory hMemory,
    2889  VmaSuballocationType suballocationType,
    2890  bool persistentMap,
    2891  void* pMappedData,
    2892  VkDeviceSize size,
    2893  void* pUserData)
    2894  {
    2895  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2896  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    2897  m_Type = ALLOCATION_TYPE_OWN;
    2898  m_Alignment = 0;
    2899  m_Size = size;
    2900  m_pUserData = pUserData;
    2901  m_SuballocationType = suballocationType;
    2902  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    2903  m_OwnAllocation.m_hMemory = hMemory;
    2904  m_OwnAllocation.m_PersistentMap = persistentMap;
    2905  m_OwnAllocation.m_pMappedData = pMappedData;
    2906  }
    2907 
    2908  ALLOCATION_TYPE GetType() const { return m_Type; }
    2909  VkDeviceSize GetAlignment() const { return m_Alignment; }
    2910  VkDeviceSize GetSize() const { return m_Size; }
    2911  void* GetUserData() const { return m_pUserData; }
    2912  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    2913  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    2914 
    2915  VmaDeviceMemoryBlock* GetBlock() const
    2916  {
    2917  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2918  return m_BlockAllocation.m_Block;
    2919  }
    2920  VkDeviceSize GetOffset() const;
    2921  VkDeviceMemory GetMemory() const;
    2922  uint32_t GetMemoryTypeIndex() const;
    2923  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    2924  void* GetMappedData() const;
    2925  bool CanBecomeLost() const;
    2926  VmaPool GetPool() const;
    2927 
    2928  VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2929  void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2930 
    2931  uint32_t GetLastUseFrameIndex() const
    2932  {
    2933  return m_LastUseFrameIndex.load();
    2934  }
    2935  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    2936  {
    2937  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    2938  }
    2939  /*
    2940  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    2941  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    2942  - Else, returns false.
    2943 
    2944  If hAllocation is already lost, assert - you should not call it then.
    2945  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    2946  */
    2947  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    2948 
    2949  void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo)
    2950  {
    2951  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
    2952  outInfo.blockCount = 1;
    2953  outInfo.allocationCount = 1;
    2954  outInfo.unusedRangeCount = 0;
    2955  outInfo.usedBytes = m_Size;
    2956  outInfo.unusedBytes = 0;
    2957  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    2958  outInfo.unusedRangeSizeMin = UINT64_MAX;
    2959  outInfo.unusedRangeSizeMax = 0;
    2960  }
    2961 
    2962 private:
    2963  VkDeviceSize m_Alignment;
    2964  VkDeviceSize m_Size;
    2965  void* m_pUserData;
    2966  ALLOCATION_TYPE m_Type;
    2967  VmaSuballocationType m_SuballocationType;
    2968  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    2969 
    2970  // Allocation out of VmaDeviceMemoryBlock.
    2971  struct BlockAllocation
    2972  {
    2973  VmaPool m_hPool; // Null if belongs to general memory.
    2974  VmaDeviceMemoryBlock* m_Block;
    2975  VkDeviceSize m_Offset;
    2976  bool m_CanBecomeLost;
    2977  };
    2978 
    2979  // Allocation for an object that has its own private VkDeviceMemory.
    2980  struct OwnAllocation
    2981  {
    2982  uint32_t m_MemoryTypeIndex;
    2983  VkDeviceMemory m_hMemory;
    2984  bool m_PersistentMap;
    2985  void* m_pMappedData;
    2986  };
    2987 
    2988  union
    2989  {
    2990  // Allocation out of VmaDeviceMemoryBlock.
    2991  BlockAllocation m_BlockAllocation;
    2992  // Allocation for an object that has its own private VkDeviceMemory.
    2993  OwnAllocation m_OwnAllocation;
    2994  };
    2995 };
    2996 
    2997 /*
    2998 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    2999 allocated memory block or free.
    3000 */
    3001 struct VmaSuballocation
    3002 {
    3003  VkDeviceSize offset;
    3004  VkDeviceSize size;
    3005  VmaAllocation hAllocation;
    3006  VmaSuballocationType type;
    3007 };
    3008 
    3009 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3010 
    3011 // Cost of one additional allocation lost, as equivalent in bytes.
    3012 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3013 
    3014 /*
    3015 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3016 
    3017 If canMakeOtherLost was false:
    3018 - item points to a FREE suballocation.
    3019 - itemsToMakeLostCount is 0.
    3020 
    3021 If canMakeOtherLost was true:
    3022 - item points to first of sequence of suballocations, which are either FREE,
    3023  or point to VmaAllocations that can become lost.
    3024 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3025  the requested allocation to succeed.
    3026 */
    3027 struct VmaAllocationRequest
    3028 {
    3029  VkDeviceSize offset;
    3030  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3031  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3032  VmaSuballocationList::iterator item;
    3033  size_t itemsToMakeLostCount;
    3034 
    3035  VkDeviceSize CalcCost() const
    3036  {
    3037  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3038  }
    3039 };
    3040 
    3041 /*
    3042 Data structure used for bookkeeping of allocations and unused ranges of memory
    3043 in a single VkDeviceMemory block.
    3044 */
    3045 class VmaBlockMetadata
    3046 {
    3047 public:
    3048  VmaBlockMetadata(VmaAllocator hAllocator);
    3049  ~VmaBlockMetadata();
    3050  void Init(VkDeviceSize size);
    3051 
    3052  // Validates all data structures inside this object. If not valid, returns false.
    3053  bool Validate() const;
    3054  VkDeviceSize GetSize() const { return m_Size; }
    3055  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3056  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3057  VkDeviceSize GetUnusedRangeSizeMax() const;
    3058  // Returns true if this block is empty - contains only single free suballocation.
    3059  bool IsEmpty() const;
    3060 
    3061  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3062  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3063 
    3064 #if VMA_STATS_STRING_ENABLED
    3065  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3066 #endif
    3067 
    3068  // Creates trivial request for case when block is empty.
    3069  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3070 
    3071  // Tries to find a place for suballocation with given parameters inside this block.
    3072  // If succeeded, fills pAllocationRequest and returns true.
    3073  // If failed, returns false.
    3074  bool CreateAllocationRequest(
    3075  uint32_t currentFrameIndex,
    3076  uint32_t frameInUseCount,
    3077  VkDeviceSize bufferImageGranularity,
    3078  VkDeviceSize allocSize,
    3079  VkDeviceSize allocAlignment,
    3080  VmaSuballocationType allocType,
    3081  bool canMakeOtherLost,
    3082  VmaAllocationRequest* pAllocationRequest);
    3083 
    3084  bool MakeRequestedAllocationsLost(
    3085  uint32_t currentFrameIndex,
    3086  uint32_t frameInUseCount,
    3087  VmaAllocationRequest* pAllocationRequest);
    3088 
    3089  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3090 
    3091  // Makes actual allocation based on request. Request must already be checked and valid.
    3092  void Alloc(
    3093  const VmaAllocationRequest& request,
    3094  VmaSuballocationType type,
    3095  VkDeviceSize allocSize,
    3096  VmaAllocation hAllocation);
    3097 
    3098  // Frees suballocation assigned to given memory region.
    3099  void Free(const VmaAllocation allocation);
    3100 
    3101 private:
    3102  VkDeviceSize m_Size;
    3103  uint32_t m_FreeCount;
    3104  VkDeviceSize m_SumFreeSize;
    3105  VmaSuballocationList m_Suballocations;
    3106  // Suballocations that are free and have size greater than certain threshold.
    3107  // Sorted by size, ascending.
    3108  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3109 
    3110  bool ValidateFreeSuballocationList() const;
    3111 
    3112  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3113  // If yes, fills pOffset and returns true. If no, returns false.
    3114  bool CheckAllocation(
    3115  uint32_t currentFrameIndex,
    3116  uint32_t frameInUseCount,
    3117  VkDeviceSize bufferImageGranularity,
    3118  VkDeviceSize allocSize,
    3119  VkDeviceSize allocAlignment,
    3120  VmaSuballocationType allocType,
    3121  VmaSuballocationList::const_iterator suballocItem,
    3122  bool canMakeOtherLost,
    3123  VkDeviceSize* pOffset,
    3124  size_t* itemsToMakeLostCount,
    3125  VkDeviceSize* pSumFreeSize,
    3126  VkDeviceSize* pSumItemSize) const;
    3127  // Given free suballocation, it merges it with following one, which must also be free.
    3128  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3129  // Releases given suballocation, making it free.
    3130  // Merges it with adjacent free suballocations if applicable.
    3131  // Returns iterator to new free suballocation at this place.
    3132  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3133  // Given free suballocation, it inserts it into sorted list of
    3134  // m_FreeSuballocationsBySize if it's suitable.
    3135  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3136  // Given free suballocation, it removes it from sorted list of
    3137  // m_FreeSuballocationsBySize if it's suitable.
    3138  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3139 };
    3140 
    3141 /*
    3142 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3143 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3144 
    3145 Thread-safety: This class must be externally synchronized.
    3146 */
    3147 class VmaDeviceMemoryBlock
    3148 {
    3149 public:
    3150  uint32_t m_MemoryTypeIndex;
    3151  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3152  VkDeviceMemory m_hMemory;
    3153  bool m_PersistentMap;
    3154  void* m_pMappedData;
    3155  VmaBlockMetadata m_Metadata;
    3156 
    3157  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3158 
    3159  ~VmaDeviceMemoryBlock()
    3160  {
    3161  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3162  }
    3163 
    3164  // Always call after construction.
    3165  void Init(
    3166  uint32_t newMemoryTypeIndex,
    3167  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3168  VkDeviceMemory newMemory,
    3169  VkDeviceSize newSize,
    3170  bool persistentMap,
    3171  void* pMappedData);
    3172  // Always call before destruction.
    3173  void Destroy(VmaAllocator allocator);
    3174 
    3175  // Validates all data structures inside this object. If not valid, returns false.
    3176  bool Validate() const;
    3177 };
    3178 
    3179 struct VmaPointerLess
    3180 {
    3181  bool operator()(const void* lhs, const void* rhs) const
    3182  {
    3183  return lhs < rhs;
    3184  }
    3185 };
    3186 
    3187 class VmaDefragmentator;
    3188 
    3189 /*
    3190 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3191 Vulkan memory type.
    3192 
    3193 Synchronized internally with a mutex.
    3194 */
    3195 struct VmaBlockVector
    3196 {
    3197  VmaBlockVector(
    3198  VmaAllocator hAllocator,
    3199  uint32_t memoryTypeIndex,
    3200  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3201  VkDeviceSize preferredBlockSize,
    3202  size_t minBlockCount,
    3203  size_t maxBlockCount,
    3204  VkDeviceSize bufferImageGranularity,
    3205  uint32_t frameInUseCount,
    3206  bool isCustomPool);
    3207  ~VmaBlockVector();
    3208 
    3209  VkResult CreateMinBlocks();
    3210 
    3211  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3212  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3213  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3214  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3215  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3216 
    3217  void GetPoolStats(VmaPoolStats* pStats);
    3218 
    3219  bool IsEmpty() const { return m_Blocks.empty(); }
    3220 
    3221  VkResult Allocate(
    3222  VmaPool hCurrentPool,
    3223  uint32_t currentFrameIndex,
    3224  const VkMemoryRequirements& vkMemReq,
    3225  const VmaAllocationCreateInfo& createInfo,
    3226  VmaSuballocationType suballocType,
    3227  VmaAllocation* pAllocation);
    3228 
    3229  void Free(
    3230  VmaAllocation hAllocation);
    3231 
    3232  // Adds statistics of this BlockVector to pStats.
    3233  void AddStats(VmaStats* pStats);
    3234 
    3235 #if VMA_STATS_STRING_ENABLED
    3236  void PrintDetailedMap(class VmaJsonWriter& json);
    3237 #endif
    3238 
    3239  void UnmapPersistentlyMappedMemory();
    3240  VkResult MapPersistentlyMappedMemory();
    3241 
    3242  void MakePoolAllocationsLost(
    3243  uint32_t currentFrameIndex,
    3244  size_t* pLostAllocationCount);
    3245 
    3246  VmaDefragmentator* EnsureDefragmentator(
    3247  VmaAllocator hAllocator,
    3248  uint32_t currentFrameIndex);
    3249 
    3250  VkResult Defragment(
    3251  VmaDefragmentationStats* pDefragmentationStats,
    3252  VkDeviceSize& maxBytesToMove,
    3253  uint32_t& maxAllocationsToMove);
    3254 
    3255  void DestroyDefragmentator();
    3256 
    3257 private:
    3258  friend class VmaDefragmentator;
    3259 
    3260  const VmaAllocator m_hAllocator;
    3261  const uint32_t m_MemoryTypeIndex;
    3262  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3263  const VkDeviceSize m_PreferredBlockSize;
    3264  const size_t m_MinBlockCount;
    3265  const size_t m_MaxBlockCount;
    3266  const VkDeviceSize m_BufferImageGranularity;
    3267  const uint32_t m_FrameInUseCount;
    3268  const bool m_IsCustomPool;
    3269  VMA_MUTEX m_Mutex;
    3270  // Incrementally sorted by sumFreeSize, ascending.
    3271  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3272  /* There can be at most one allocation that is completely empty - a
    3273  hysteresis to avoid pessimistic case of alternating creation and destruction
    3274  of a VkDeviceMemory. */
    3275  bool m_HasEmptyBlock;
    3276  VmaDefragmentator* m_pDefragmentator;
    3277 
    3278  // Finds and removes given block from vector.
    3279  void Remove(VmaDeviceMemoryBlock* pBlock);
    3280 
    3281  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3282  // after this call.
    3283  void IncrementallySortBlocks();
    3284 
    3285  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3286 };
    3287 
    3288 struct VmaPool_T
    3289 {
    3290 public:
    3291  VmaBlockVector m_BlockVector;
    3292 
    3293  // Takes ownership.
    3294  VmaPool_T(
    3295  VmaAllocator hAllocator,
    3296  const VmaPoolCreateInfo& createInfo);
    3297  ~VmaPool_T();
    3298 
    3299  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3300 
    3301 #if VMA_STATS_STRING_ENABLED
    3302  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3303 #endif
    3304 };
    3305 
    3306 class VmaDefragmentator
    3307 {
    3308  const VmaAllocator m_hAllocator;
    3309  VmaBlockVector* const m_pBlockVector;
    3310  uint32_t m_CurrentFrameIndex;
    3311  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3312  VkDeviceSize m_BytesMoved;
    3313  uint32_t m_AllocationsMoved;
    3314 
    3315  struct AllocationInfo
    3316  {
    3317  VmaAllocation m_hAllocation;
    3318  VkBool32* m_pChanged;
    3319 
    3320  AllocationInfo() :
    3321  m_hAllocation(VK_NULL_HANDLE),
    3322  m_pChanged(VMA_NULL)
    3323  {
    3324  }
    3325  };
    3326 
    3327  struct AllocationInfoSizeGreater
    3328  {
    3329  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3330  {
    3331  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3332  }
    3333  };
    3334 
    3335  // Used between AddAllocation and Defragment.
    3336  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3337 
    3338  struct BlockInfo
    3339  {
    3340  VmaDeviceMemoryBlock* m_pBlock;
    3341  bool m_HasNonMovableAllocations;
    3342  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3343 
    3344  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3345  m_pBlock(VMA_NULL),
    3346  m_HasNonMovableAllocations(true),
    3347  m_Allocations(pAllocationCallbacks),
    3348  m_pMappedDataForDefragmentation(VMA_NULL)
    3349  {
    3350  }
    3351 
    3352  void CalcHasNonMovableAllocations()
    3353  {
    3354  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3355  const size_t defragmentAllocCount = m_Allocations.size();
    3356  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3357  }
    3358 
    3359  void SortAllocationsBySizeDescecnding()
    3360  {
    3361  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3362  }
    3363 
    3364  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3365  void Unmap(VmaAllocator hAllocator);
    3366 
    3367  private:
    3368  // Not null if mapped for defragmentation only, not persistently mapped.
    3369  void* m_pMappedDataForDefragmentation;
    3370  };
    3371 
    3372  struct BlockPointerLess
    3373  {
    3374  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3375  {
    3376  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3377  }
    3378  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3379  {
    3380  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3381  }
    3382  };
    3383 
    3384  // 1. Blocks with some non-movable allocations go first.
    3385  // 2. Blocks with smaller sumFreeSize go first.
    3386  struct BlockInfoCompareMoveDestination
    3387  {
    3388  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3389  {
    3390  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3391  {
    3392  return true;
    3393  }
    3394  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3395  {
    3396  return false;
    3397  }
    3398  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3399  {
    3400  return true;
    3401  }
    3402  return false;
    3403  }
    3404  };
    3405 
    3406  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3407  BlockInfoVector m_Blocks;
    3408 
    3409  VkResult DefragmentRound(
    3410  VkDeviceSize maxBytesToMove,
    3411  uint32_t maxAllocationsToMove);
    3412 
    3413  static bool MoveMakesSense(
    3414  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3415  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3416 
    3417 public:
    3418  VmaDefragmentator(
    3419  VmaAllocator hAllocator,
    3420  VmaBlockVector* pBlockVector,
    3421  uint32_t currentFrameIndex);
    3422 
    3423  ~VmaDefragmentator();
    3424 
    3425  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3426  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3427 
    3428  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3429 
    3430  VkResult Defragment(
    3431  VkDeviceSize maxBytesToMove,
    3432  uint32_t maxAllocationsToMove);
    3433 };
    3434 
    3435 // Main allocator object.
    3436 struct VmaAllocator_T
    3437 {
    3438  bool m_UseMutex;
    3439  VkDevice m_hDevice;
    3440  bool m_AllocationCallbacksSpecified;
    3441  VkAllocationCallbacks m_AllocationCallbacks;
    3442  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3443  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3444  // Counter to allow nested calls to these functions.
    3445  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3446 
    3447  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3448  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3449  VMA_MUTEX m_HeapSizeLimitMutex;
    3450 
    3451  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3452  VkPhysicalDeviceMemoryProperties m_MemProps;
    3453 
    3454  // Default pools.
    3455  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3456 
    3457  // Each vector is sorted by memory (handle value).
    3458  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3459  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3460  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3461 
    3462  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3463  ~VmaAllocator_T();
    3464 
    3465  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3466  {
    3467  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3468  }
    3469  const VmaVulkanFunctions& GetVulkanFunctions() const
    3470  {
    3471  return m_VulkanFunctions;
    3472  }
    3473 
    3474  VkDeviceSize GetBufferImageGranularity() const
    3475  {
    3476  return VMA_MAX(
    3477  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3478  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3479  }
    3480 
    3481  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3482  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3483 
    3484  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3485  {
    3486  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3487  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3488  }
    3489 
    3490  // Main allocation function.
    3491  VkResult AllocateMemory(
    3492  const VkMemoryRequirements& vkMemReq,
    3493  const VmaAllocationCreateInfo& createInfo,
    3494  VmaSuballocationType suballocType,
    3495  VmaAllocation* pAllocation);
    3496 
    3497  // Main deallocation function.
    3498  void FreeMemory(const VmaAllocation allocation);
    3499 
    3500  void CalculateStats(VmaStats* pStats);
    3501 
    3502 #if VMA_STATS_STRING_ENABLED
    3503  void PrintDetailedMap(class VmaJsonWriter& json);
    3504 #endif
    3505 
    3506  void UnmapPersistentlyMappedMemory();
    3507  VkResult MapPersistentlyMappedMemory();
    3508 
    3509  VkResult Defragment(
    3510  VmaAllocation* pAllocations,
    3511  size_t allocationCount,
    3512  VkBool32* pAllocationsChanged,
    3513  const VmaDefragmentationInfo* pDefragmentationInfo,
    3514  VmaDefragmentationStats* pDefragmentationStats);
    3515 
    3516  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3517 
    3518  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3519  void DestroyPool(VmaPool pool);
    3520  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3521 
    3522  void SetCurrentFrameIndex(uint32_t frameIndex);
    3523 
    3524  void MakePoolAllocationsLost(
    3525  VmaPool hPool,
    3526  size_t* pLostAllocationCount);
    3527 
    3528  void CreateLostAllocation(VmaAllocation* pAllocation);
    3529 
    3530  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3531  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3532 
    3533 private:
    3534  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3535  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3536 
    3537  VkPhysicalDevice m_PhysicalDevice;
    3538  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3539 
    3540  VMA_MUTEX m_PoolsMutex;
    3541  // Protected by m_PoolsMutex. Sorted by pointer value.
    3542  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3543 
    3544  VmaVulkanFunctions m_VulkanFunctions;
    3545 
    3546  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3547 
    3548  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3549 
    3550  VkResult AllocateMemoryOfType(
    3551  const VkMemoryRequirements& vkMemReq,
    3552  const VmaAllocationCreateInfo& createInfo,
    3553  uint32_t memTypeIndex,
    3554  VmaSuballocationType suballocType,
    3555  VmaAllocation* pAllocation);
    3556 
    3557  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3558  VkResult AllocateOwnMemory(
    3559  VkDeviceSize size,
    3560  VmaSuballocationType suballocType,
    3561  uint32_t memTypeIndex,
    3562  bool map,
    3563  void* pUserData,
    3564  VmaAllocation* pAllocation);
    3565 
    3566  // Tries to free pMemory as Own Memory. Returns true if found and freed.
    3567  void FreeOwnMemory(VmaAllocation allocation);
    3568 };
    3569 
    3571 // Memory allocation #2 after VmaAllocator_T definition
    3572 
    3573 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3574 {
    3575  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3576 }
    3577 
    3578 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3579 {
    3580  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3581 }
    3582 
    3583 template<typename T>
    3584 static T* VmaAllocate(VmaAllocator hAllocator)
    3585 {
    3586  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3587 }
    3588 
    3589 template<typename T>
    3590 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3591 {
    3592  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3593 }
    3594 
    3595 template<typename T>
    3596 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3597 {
    3598  if(ptr != VMA_NULL)
    3599  {
    3600  ptr->~T();
    3601  VmaFree(hAllocator, ptr);
    3602  }
    3603 }
    3604 
    3605 template<typename T>
    3606 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3607 {
    3608  if(ptr != VMA_NULL)
    3609  {
    3610  for(size_t i = count; i--; )
    3611  ptr[i].~T();
    3612  VmaFree(hAllocator, ptr);
    3613  }
    3614 }
    3615 
    3617 // VmaStringBuilder
    3618 
    3619 #if VMA_STATS_STRING_ENABLED
    3620 
    3621 class VmaStringBuilder
    3622 {
    3623 public:
    3624  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3625  size_t GetLength() const { return m_Data.size(); }
    3626  const char* GetData() const { return m_Data.data(); }
    3627 
    3628  void Add(char ch) { m_Data.push_back(ch); }
    3629  void Add(const char* pStr);
    3630  void AddNewLine() { Add('\n'); }
    3631  void AddNumber(uint32_t num);
    3632  void AddNumber(uint64_t num);
    3633  void AddPointer(const void* ptr);
    3634 
    3635 private:
    3636  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3637 };
    3638 
    3639 void VmaStringBuilder::Add(const char* pStr)
    3640 {
    3641  const size_t strLen = strlen(pStr);
    3642  if(strLen > 0)
    3643  {
    3644  const size_t oldCount = m_Data.size();
    3645  m_Data.resize(oldCount + strLen);
    3646  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3647  }
    3648 }
    3649 
    3650 void VmaStringBuilder::AddNumber(uint32_t num)
    3651 {
    3652  char buf[11];
    3653  VmaUint32ToStr(buf, sizeof(buf), num);
    3654  Add(buf);
    3655 }
    3656 
    3657 void VmaStringBuilder::AddNumber(uint64_t num)
    3658 {
    3659  char buf[21];
    3660  VmaUint64ToStr(buf, sizeof(buf), num);
    3661  Add(buf);
    3662 }
    3663 
    3664 void VmaStringBuilder::AddPointer(const void* ptr)
    3665 {
    3666  char buf[21];
    3667  VmaPtrToStr(buf, sizeof(buf), ptr);
    3668  Add(buf);
    3669 }
    3670 
    3671 #endif // #if VMA_STATS_STRING_ENABLED
    3672 
    3674 // VmaJsonWriter
    3675 
    3676 #if VMA_STATS_STRING_ENABLED
    3677 
    3678 class VmaJsonWriter
    3679 {
    3680 public:
    3681  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3682  ~VmaJsonWriter();
    3683 
    3684  void BeginObject(bool singleLine = false);
    3685  void EndObject();
    3686 
    3687  void BeginArray(bool singleLine = false);
    3688  void EndArray();
    3689 
    3690  void WriteString(const char* pStr);
    3691  void BeginString(const char* pStr = VMA_NULL);
    3692  void ContinueString(const char* pStr);
    3693  void ContinueString(uint32_t n);
    3694  void ContinueString(uint64_t n);
    3695  void EndString(const char* pStr = VMA_NULL);
    3696 
    3697  void WriteNumber(uint32_t n);
    3698  void WriteNumber(uint64_t n);
    3699  void WriteBool(bool b);
    3700  void WriteNull();
    3701 
    3702 private:
    3703  static const char* const INDENT;
    3704 
    3705  enum COLLECTION_TYPE
    3706  {
    3707  COLLECTION_TYPE_OBJECT,
    3708  COLLECTION_TYPE_ARRAY,
    3709  };
    3710  struct StackItem
    3711  {
    3712  COLLECTION_TYPE type;
    3713  uint32_t valueCount;
    3714  bool singleLineMode;
    3715  };
    3716 
    3717  VmaStringBuilder& m_SB;
    3718  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3719  bool m_InsideString;
    3720 
    3721  void BeginValue(bool isString);
    3722  void WriteIndent(bool oneLess = false);
    3723 };
    3724 
    3725 const char* const VmaJsonWriter::INDENT = " ";
    3726 
    3727 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3728  m_SB(sb),
    3729  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3730  m_InsideString(false)
    3731 {
    3732 }
    3733 
    3734 VmaJsonWriter::~VmaJsonWriter()
    3735 {
    3736  VMA_ASSERT(!m_InsideString);
    3737  VMA_ASSERT(m_Stack.empty());
    3738 }
    3739 
    3740 void VmaJsonWriter::BeginObject(bool singleLine)
    3741 {
    3742  VMA_ASSERT(!m_InsideString);
    3743 
    3744  BeginValue(false);
    3745  m_SB.Add('{');
    3746 
    3747  StackItem item;
    3748  item.type = COLLECTION_TYPE_OBJECT;
    3749  item.valueCount = 0;
    3750  item.singleLineMode = singleLine;
    3751  m_Stack.push_back(item);
    3752 }
    3753 
    3754 void VmaJsonWriter::EndObject()
    3755 {
    3756  VMA_ASSERT(!m_InsideString);
    3757 
    3758  WriteIndent(true);
    3759  m_SB.Add('}');
    3760 
    3761  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3762  m_Stack.pop_back();
    3763 }
    3764 
    3765 void VmaJsonWriter::BeginArray(bool singleLine)
    3766 {
    3767  VMA_ASSERT(!m_InsideString);
    3768 
    3769  BeginValue(false);
    3770  m_SB.Add('[');
    3771 
    3772  StackItem item;
    3773  item.type = COLLECTION_TYPE_ARRAY;
    3774  item.valueCount = 0;
    3775  item.singleLineMode = singleLine;
    3776  m_Stack.push_back(item);
    3777 }
    3778 
    3779 void VmaJsonWriter::EndArray()
    3780 {
    3781  VMA_ASSERT(!m_InsideString);
    3782 
    3783  WriteIndent(true);
    3784  m_SB.Add(']');
    3785 
    3786  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3787  m_Stack.pop_back();
    3788 }
    3789 
    3790 void VmaJsonWriter::WriteString(const char* pStr)
    3791 {
    3792  BeginString(pStr);
    3793  EndString();
    3794 }
    3795 
    3796 void VmaJsonWriter::BeginString(const char* pStr)
    3797 {
    3798  VMA_ASSERT(!m_InsideString);
    3799 
    3800  BeginValue(true);
    3801  m_SB.Add('"');
    3802  m_InsideString = true;
    3803  if(pStr != VMA_NULL && pStr[0] != '\0')
    3804  {
    3805  ContinueString(pStr);
    3806  }
    3807 }
    3808 
    3809 void VmaJsonWriter::ContinueString(const char* pStr)
    3810 {
    3811  VMA_ASSERT(m_InsideString);
    3812 
    3813  const size_t strLen = strlen(pStr);
    3814  for(size_t i = 0; i < strLen; ++i)
    3815  {
    3816  char ch = pStr[i];
    3817  if(ch == '\'')
    3818  {
    3819  m_SB.Add("\\\\");
    3820  }
    3821  else if(ch == '"')
    3822  {
    3823  m_SB.Add("\\\"");
    3824  }
    3825  else if(ch >= 32)
    3826  {
    3827  m_SB.Add(ch);
    3828  }
    3829  else switch(ch)
    3830  {
    3831  case '\n':
    3832  m_SB.Add("\\n");
    3833  break;
    3834  case '\r':
    3835  m_SB.Add("\\r");
    3836  break;
    3837  case '\t':
    3838  m_SB.Add("\\t");
    3839  break;
    3840  default:
    3841  VMA_ASSERT(0 && "Character not currently supported.");
    3842  break;
    3843  }
    3844  }
    3845 }
    3846 
    3847 void VmaJsonWriter::ContinueString(uint32_t n)
    3848 {
    3849  VMA_ASSERT(m_InsideString);
    3850  m_SB.AddNumber(n);
    3851 }
    3852 
    3853 void VmaJsonWriter::ContinueString(uint64_t n)
    3854 {
    3855  VMA_ASSERT(m_InsideString);
    3856  m_SB.AddNumber(n);
    3857 }
    3858 
    3859 void VmaJsonWriter::EndString(const char* pStr)
    3860 {
    3861  VMA_ASSERT(m_InsideString);
    3862  if(pStr != VMA_NULL && pStr[0] != '\0')
    3863  {
    3864  ContinueString(pStr);
    3865  }
    3866  m_SB.Add('"');
    3867  m_InsideString = false;
    3868 }
    3869 
    3870 void VmaJsonWriter::WriteNumber(uint32_t n)
    3871 {
    3872  VMA_ASSERT(!m_InsideString);
    3873  BeginValue(false);
    3874  m_SB.AddNumber(n);
    3875 }
    3876 
    3877 void VmaJsonWriter::WriteNumber(uint64_t n)
    3878 {
    3879  VMA_ASSERT(!m_InsideString);
    3880  BeginValue(false);
    3881  m_SB.AddNumber(n);
    3882 }
    3883 
    3884 void VmaJsonWriter::WriteBool(bool b)
    3885 {
    3886  VMA_ASSERT(!m_InsideString);
    3887  BeginValue(false);
    3888  m_SB.Add(b ? "true" : "false");
    3889 }
    3890 
    3891 void VmaJsonWriter::WriteNull()
    3892 {
    3893  VMA_ASSERT(!m_InsideString);
    3894  BeginValue(false);
    3895  m_SB.Add("null");
    3896 }
    3897 
    3898 void VmaJsonWriter::BeginValue(bool isString)
    3899 {
    3900  if(!m_Stack.empty())
    3901  {
    3902  StackItem& currItem = m_Stack.back();
    3903  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3904  currItem.valueCount % 2 == 0)
    3905  {
    3906  VMA_ASSERT(isString);
    3907  }
    3908 
    3909  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3910  currItem.valueCount % 2 != 0)
    3911  {
    3912  m_SB.Add(": ");
    3913  }
    3914  else if(currItem.valueCount > 0)
    3915  {
    3916  m_SB.Add(", ");
    3917  WriteIndent();
    3918  }
    3919  else
    3920  {
    3921  WriteIndent();
    3922  }
    3923  ++currItem.valueCount;
    3924  }
    3925 }
    3926 
    3927 void VmaJsonWriter::WriteIndent(bool oneLess)
    3928 {
    3929  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    3930  {
    3931  m_SB.AddNewLine();
    3932 
    3933  size_t count = m_Stack.size();
    3934  if(count > 0 && oneLess)
    3935  {
    3936  --count;
    3937  }
    3938  for(size_t i = 0; i < count; ++i)
    3939  {
    3940  m_SB.Add(INDENT);
    3941  }
    3942  }
    3943 }
    3944 
    3945 #endif // #if VMA_STATS_STRING_ENABLED
    3946 
    3948 
    3949 VkDeviceSize VmaAllocation_T::GetOffset() const
    3950 {
    3951  switch(m_Type)
    3952  {
    3953  case ALLOCATION_TYPE_BLOCK:
    3954  return m_BlockAllocation.m_Offset;
    3955  case ALLOCATION_TYPE_OWN:
    3956  return 0;
    3957  default:
    3958  VMA_ASSERT(0);
    3959  return 0;
    3960  }
    3961 }
    3962 
    3963 VkDeviceMemory VmaAllocation_T::GetMemory() const
    3964 {
    3965  switch(m_Type)
    3966  {
    3967  case ALLOCATION_TYPE_BLOCK:
    3968  return m_BlockAllocation.m_Block->m_hMemory;
    3969  case ALLOCATION_TYPE_OWN:
    3970  return m_OwnAllocation.m_hMemory;
    3971  default:
    3972  VMA_ASSERT(0);
    3973  return VK_NULL_HANDLE;
    3974  }
    3975 }
    3976 
    3977 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    3978 {
    3979  switch(m_Type)
    3980  {
    3981  case ALLOCATION_TYPE_BLOCK:
    3982  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    3983  case ALLOCATION_TYPE_OWN:
    3984  return m_OwnAllocation.m_MemoryTypeIndex;
    3985  default:
    3986  VMA_ASSERT(0);
    3987  return UINT32_MAX;
    3988  }
    3989 }
    3990 
    3991 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    3992 {
    3993  switch(m_Type)
    3994  {
    3995  case ALLOCATION_TYPE_BLOCK:
    3996  return m_BlockAllocation.m_Block->m_BlockVectorType;
    3997  case ALLOCATION_TYPE_OWN:
    3998  return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    3999  default:
    4000  VMA_ASSERT(0);
    4001  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4002  }
    4003 }
    4004 
    4005 void* VmaAllocation_T::GetMappedData() const
    4006 {
    4007  switch(m_Type)
    4008  {
    4009  case ALLOCATION_TYPE_BLOCK:
    4010  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4011  {
    4012  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4013  }
    4014  else
    4015  {
    4016  return VMA_NULL;
    4017  }
    4018  break;
    4019  case ALLOCATION_TYPE_OWN:
    4020  return m_OwnAllocation.m_pMappedData;
    4021  default:
    4022  VMA_ASSERT(0);
    4023  return VMA_NULL;
    4024  }
    4025 }
    4026 
    4027 bool VmaAllocation_T::CanBecomeLost() const
    4028 {
    4029  switch(m_Type)
    4030  {
    4031  case ALLOCATION_TYPE_BLOCK:
    4032  return m_BlockAllocation.m_CanBecomeLost;
    4033  case ALLOCATION_TYPE_OWN:
    4034  return false;
    4035  default:
    4036  VMA_ASSERT(0);
    4037  return false;
    4038  }
    4039 }
    4040 
    4041 VmaPool VmaAllocation_T::GetPool() const
    4042 {
    4043  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4044  return m_BlockAllocation.m_hPool;
    4045 }
    4046 
    4047 VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4048 {
    4049  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
    4050  if(m_OwnAllocation.m_PersistentMap)
    4051  {
    4052  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4053  hAllocator->m_hDevice,
    4054  m_OwnAllocation.m_hMemory,
    4055  0,
    4056  VK_WHOLE_SIZE,
    4057  0,
    4058  &m_OwnAllocation.m_pMappedData);
    4059  }
    4060  return VK_SUCCESS;
    4061 }
    4062 void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4063 {
    4064  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
    4065  if(m_OwnAllocation.m_pMappedData)
    4066  {
    4067  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
    4068  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory);
    4069  m_OwnAllocation.m_pMappedData = VMA_NULL;
    4070  }
    4071 }
    4072 
    4073 
    4074 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4075 {
    4076  VMA_ASSERT(CanBecomeLost());
    4077 
    4078  /*
    4079  Warning: This is a carefully designed algorithm.
    4080  Do not modify unless you really know what you're doing :)
    4081  */
    4082  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4083  for(;;)
    4084  {
    4085  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4086  {
    4087  VMA_ASSERT(0);
    4088  return false;
    4089  }
    4090  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4091  {
    4092  return false;
    4093  }
    4094  else // Last use time earlier than current time.
    4095  {
    4096  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4097  {
    4098  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4099  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4100  return true;
    4101  }
    4102  }
    4103  }
    4104 }
    4105 
    4106 #if VMA_STATS_STRING_ENABLED
    4107 
    4108 // Correspond to values of enum VmaSuballocationType.
    4109 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4110  "FREE",
    4111  "UNKNOWN",
    4112  "BUFFER",
    4113  "IMAGE_UNKNOWN",
    4114  "IMAGE_LINEAR",
    4115  "IMAGE_OPTIMAL",
    4116 };
    4117 
    4118 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4119 {
    4120  json.BeginObject();
    4121 
    4122  json.WriteString("Blocks");
    4123  json.WriteNumber(stat.blockCount);
    4124 
    4125  json.WriteString("Allocations");
    4126  json.WriteNumber(stat.allocationCount);
    4127 
    4128  json.WriteString("UnusedRanges");
    4129  json.WriteNumber(stat.unusedRangeCount);
    4130 
    4131  json.WriteString("UsedBytes");
    4132  json.WriteNumber(stat.usedBytes);
    4133 
    4134  json.WriteString("UnusedBytes");
    4135  json.WriteNumber(stat.unusedBytes);
    4136 
    4137  if(stat.allocationCount > 1)
    4138  {
    4139  json.WriteString("AllocationSize");
    4140  json.BeginObject(true);
    4141  json.WriteString("Min");
    4142  json.WriteNumber(stat.allocationSizeMin);
    4143  json.WriteString("Avg");
    4144  json.WriteNumber(stat.allocationSizeAvg);
    4145  json.WriteString("Max");
    4146  json.WriteNumber(stat.allocationSizeMax);
    4147  json.EndObject();
    4148  }
    4149 
    4150  if(stat.unusedRangeCount > 1)
    4151  {
    4152  json.WriteString("UnusedRangeSize");
    4153  json.BeginObject(true);
    4154  json.WriteString("Min");
    4155  json.WriteNumber(stat.unusedRangeSizeMin);
    4156  json.WriteString("Avg");
    4157  json.WriteNumber(stat.unusedRangeSizeAvg);
    4158  json.WriteString("Max");
    4159  json.WriteNumber(stat.unusedRangeSizeMax);
    4160  json.EndObject();
    4161  }
    4162 
    4163  json.EndObject();
    4164 }
    4165 
    4166 #endif // #if VMA_STATS_STRING_ENABLED
    4167 
    4168 struct VmaSuballocationItemSizeLess
    4169 {
    4170  bool operator()(
    4171  const VmaSuballocationList::iterator lhs,
    4172  const VmaSuballocationList::iterator rhs) const
    4173  {
    4174  return lhs->size < rhs->size;
    4175  }
    4176  bool operator()(
    4177  const VmaSuballocationList::iterator lhs,
    4178  VkDeviceSize rhsSize) const
    4179  {
    4180  return lhs->size < rhsSize;
    4181  }
    4182 };
    4183 
    4185 // class VmaBlockMetadata
    4186 
    4187 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4188  m_Size(0),
    4189  m_FreeCount(0),
    4190  m_SumFreeSize(0),
    4191  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4192  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4193 {
    4194 }
    4195 
    4196 VmaBlockMetadata::~VmaBlockMetadata()
    4197 {
    4198 }
    4199 
    4200 void VmaBlockMetadata::Init(VkDeviceSize size)
    4201 {
    4202  m_Size = size;
    4203  m_FreeCount = 1;
    4204  m_SumFreeSize = size;
    4205 
    4206  VmaSuballocation suballoc = {};
    4207  suballoc.offset = 0;
    4208  suballoc.size = size;
    4209  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4210  suballoc.hAllocation = VK_NULL_HANDLE;
    4211 
    4212  m_Suballocations.push_back(suballoc);
    4213  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4214  --suballocItem;
    4215  m_FreeSuballocationsBySize.push_back(suballocItem);
    4216 }
    4217 
    4218 bool VmaBlockMetadata::Validate() const
    4219 {
    4220  if(m_Suballocations.empty())
    4221  {
    4222  return false;
    4223  }
    4224 
    4225  // Expected offset of new suballocation as calculates from previous ones.
    4226  VkDeviceSize calculatedOffset = 0;
    4227  // Expected number of free suballocations as calculated from traversing their list.
    4228  uint32_t calculatedFreeCount = 0;
    4229  // Expected sum size of free suballocations as calculated from traversing their list.
    4230  VkDeviceSize calculatedSumFreeSize = 0;
    4231  // Expected number of free suballocations that should be registered in
    4232  // m_FreeSuballocationsBySize calculated from traversing their list.
    4233  size_t freeSuballocationsToRegister = 0;
    4234  // True if previous visisted suballocation was free.
    4235  bool prevFree = false;
    4236 
    4237  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4238  suballocItem != m_Suballocations.cend();
    4239  ++suballocItem)
    4240  {
    4241  const VmaSuballocation& subAlloc = *suballocItem;
    4242 
    4243  // Actual offset of this suballocation doesn't match expected one.
    4244  if(subAlloc.offset != calculatedOffset)
    4245  {
    4246  return false;
    4247  }
    4248 
    4249  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4250  // Two adjacent free suballocations are invalid. They should be merged.
    4251  if(prevFree && currFree)
    4252  {
    4253  return false;
    4254  }
    4255  prevFree = currFree;
    4256 
    4257  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4258  {
    4259  return false;
    4260  }
    4261 
    4262  if(currFree)
    4263  {
    4264  calculatedSumFreeSize += subAlloc.size;
    4265  ++calculatedFreeCount;
    4266  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4267  {
    4268  ++freeSuballocationsToRegister;
    4269  }
    4270  }
    4271 
    4272  calculatedOffset += subAlloc.size;
    4273  }
    4274 
    4275  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4276  // match expected one.
    4277  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4278  {
    4279  return false;
    4280  }
    4281 
    4282  VkDeviceSize lastSize = 0;
    4283  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4284  {
    4285  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4286 
    4287  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4288  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4289  {
    4290  return false;
    4291  }
    4292  // They must be sorted by size ascending.
    4293  if(suballocItem->size < lastSize)
    4294  {
    4295  return false;
    4296  }
    4297 
    4298  lastSize = suballocItem->size;
    4299  }
    4300 
    4301  // Check if totals match calculacted values.
    4302  return
    4303  ValidateFreeSuballocationList() &&
    4304  (calculatedOffset == m_Size) &&
    4305  (calculatedSumFreeSize == m_SumFreeSize) &&
    4306  (calculatedFreeCount == m_FreeCount);
    4307 }
    4308 
    4309 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4310 {
    4311  if(!m_FreeSuballocationsBySize.empty())
    4312  {
    4313  return m_FreeSuballocationsBySize.back()->size;
    4314  }
    4315  else
    4316  {
    4317  return 0;
    4318  }
    4319 }
    4320 
    4321 bool VmaBlockMetadata::IsEmpty() const
    4322 {
    4323  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4324 }
    4325 
    4326 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4327 {
    4328  outInfo.blockCount = 1;
    4329 
    4330  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4331  outInfo.allocationCount = rangeCount - m_FreeCount;
    4332  outInfo.unusedRangeCount = m_FreeCount;
    4333 
    4334  outInfo.unusedBytes = m_SumFreeSize;
    4335  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4336 
    4337  outInfo.allocationSizeMin = UINT64_MAX;
    4338  outInfo.allocationSizeMax = 0;
    4339  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4340  outInfo.unusedRangeSizeMax = 0;
    4341 
    4342  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4343  suballocItem != m_Suballocations.cend();
    4344  ++suballocItem)
    4345  {
    4346  const VmaSuballocation& suballoc = *suballocItem;
    4347  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4348  {
    4349  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4350  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4351  }
    4352  else
    4353  {
    4354  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4355  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4356  }
    4357  }
    4358 }
    4359 
    4360 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4361 {
    4362  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4363 
    4364  inoutStats.size += m_Size;
    4365  inoutStats.unusedSize += m_SumFreeSize;
    4366  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4367  inoutStats.unusedRangeCount += m_FreeCount;
    4368  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4369 }
    4370 
    4371 #if VMA_STATS_STRING_ENABLED
    4372 
    4373 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4374 {
    4375  json.BeginObject();
    4376 
    4377  json.WriteString("TotalBytes");
    4378  json.WriteNumber(m_Size);
    4379 
    4380  json.WriteString("UnusedBytes");
    4381  json.WriteNumber(m_SumFreeSize);
    4382 
    4383  json.WriteString("Allocations");
    4384  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4385 
    4386  json.WriteString("UnusedRanges");
    4387  json.WriteNumber(m_FreeCount);
    4388 
    4389  json.WriteString("Suballocations");
    4390  json.BeginArray();
    4391  size_t i = 0;
    4392  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4393  suballocItem != m_Suballocations.cend();
    4394  ++suballocItem, ++i)
    4395  {
    4396  json.BeginObject(true);
    4397 
    4398  json.WriteString("Type");
    4399  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4400 
    4401  json.WriteString("Size");
    4402  json.WriteNumber(suballocItem->size);
    4403 
    4404  json.WriteString("Offset");
    4405  json.WriteNumber(suballocItem->offset);
    4406 
    4407  json.EndObject();
    4408  }
    4409  json.EndArray();
    4410 
    4411  json.EndObject();
    4412 }
    4413 
    4414 #endif // #if VMA_STATS_STRING_ENABLED
    4415 
    4416 /*
    4417 How many suitable free suballocations to analyze before choosing best one.
    4418 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4419  be chosen.
    4420 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4421  suballocations will be analized and best one will be chosen.
    4422 - Any other value is also acceptable.
    4423 */
    4424 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4425 
    4426 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4427 {
    4428  VMA_ASSERT(IsEmpty());
    4429  pAllocationRequest->offset = 0;
    4430  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4431  pAllocationRequest->sumItemSize = 0;
    4432  pAllocationRequest->item = m_Suballocations.begin();
    4433  pAllocationRequest->itemsToMakeLostCount = 0;
    4434 }
    4435 
    4436 bool VmaBlockMetadata::CreateAllocationRequest(
    4437  uint32_t currentFrameIndex,
    4438  uint32_t frameInUseCount,
    4439  VkDeviceSize bufferImageGranularity,
    4440  VkDeviceSize allocSize,
    4441  VkDeviceSize allocAlignment,
    4442  VmaSuballocationType allocType,
    4443  bool canMakeOtherLost,
    4444  VmaAllocationRequest* pAllocationRequest)
    4445 {
    4446  VMA_ASSERT(allocSize > 0);
    4447  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4448  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4449  VMA_HEAVY_ASSERT(Validate());
    4450 
    4451  // There is not enough total free space in this block to fullfill the request: Early return.
    4452  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4453  {
    4454  return false;
    4455  }
    4456 
    4457  // New algorithm, efficiently searching freeSuballocationsBySize.
    4458  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4459  if(freeSuballocCount > 0)
    4460  {
    4461  if(VMA_BEST_FIT)
    4462  {
    4463  // Find first free suballocation with size not less than allocSize.
    4464  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4465  m_FreeSuballocationsBySize.data(),
    4466  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4467  allocSize,
    4468  VmaSuballocationItemSizeLess());
    4469  size_t index = it - m_FreeSuballocationsBySize.data();
    4470  for(; index < freeSuballocCount; ++index)
    4471  {
    4472  if(CheckAllocation(
    4473  currentFrameIndex,
    4474  frameInUseCount,
    4475  bufferImageGranularity,
    4476  allocSize,
    4477  allocAlignment,
    4478  allocType,
    4479  m_FreeSuballocationsBySize[index],
    4480  false, // canMakeOtherLost
    4481  &pAllocationRequest->offset,
    4482  &pAllocationRequest->itemsToMakeLostCount,
    4483  &pAllocationRequest->sumFreeSize,
    4484  &pAllocationRequest->sumItemSize))
    4485  {
    4486  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4487  return true;
    4488  }
    4489  }
    4490  }
    4491  else
    4492  {
    4493  // Search staring from biggest suballocations.
    4494  for(size_t index = freeSuballocCount; index--; )
    4495  {
    4496  if(CheckAllocation(
    4497  currentFrameIndex,
    4498  frameInUseCount,
    4499  bufferImageGranularity,
    4500  allocSize,
    4501  allocAlignment,
    4502  allocType,
    4503  m_FreeSuballocationsBySize[index],
    4504  false, // canMakeOtherLost
    4505  &pAllocationRequest->offset,
    4506  &pAllocationRequest->itemsToMakeLostCount,
    4507  &pAllocationRequest->sumFreeSize,
    4508  &pAllocationRequest->sumItemSize))
    4509  {
    4510  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4511  return true;
    4512  }
    4513  }
    4514  }
    4515  }
    4516 
    4517  if(canMakeOtherLost)
    4518  {
    4519  // Brute-force algorithm. TODO: Come up with something better.
    4520 
    4521  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4522  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4523 
    4524  VmaAllocationRequest tmpAllocRequest = {};
    4525  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4526  suballocIt != m_Suballocations.end();
    4527  ++suballocIt)
    4528  {
    4529  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4530  suballocIt->hAllocation->CanBecomeLost())
    4531  {
    4532  if(CheckAllocation(
    4533  currentFrameIndex,
    4534  frameInUseCount,
    4535  bufferImageGranularity,
    4536  allocSize,
    4537  allocAlignment,
    4538  allocType,
    4539  suballocIt,
    4540  canMakeOtherLost,
    4541  &tmpAllocRequest.offset,
    4542  &tmpAllocRequest.itemsToMakeLostCount,
    4543  &tmpAllocRequest.sumFreeSize,
    4544  &tmpAllocRequest.sumItemSize))
    4545  {
    4546  tmpAllocRequest.item = suballocIt;
    4547 
    4548  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4549  {
    4550  *pAllocationRequest = tmpAllocRequest;
    4551  }
    4552  }
    4553  }
    4554  }
    4555 
    4556  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4557  {
    4558  return true;
    4559  }
    4560  }
    4561 
    4562  return false;
    4563 }
    4564 
    4565 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4566  uint32_t currentFrameIndex,
    4567  uint32_t frameInUseCount,
    4568  VmaAllocationRequest* pAllocationRequest)
    4569 {
    4570  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4571  {
    4572  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4573  {
    4574  ++pAllocationRequest->item;
    4575  }
    4576  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4577  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4578  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4579  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4580  {
    4581  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4582  --pAllocationRequest->itemsToMakeLostCount;
    4583  }
    4584  else
    4585  {
    4586  return false;
    4587  }
    4588  }
    4589 
    4590  VMA_HEAVY_ASSERT(Validate());
    4591  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4592  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4593 
    4594  return true;
    4595 }
    4596 
    4597 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4598 {
    4599  uint32_t lostAllocationCount = 0;
    4600  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4601  it != m_Suballocations.end();
    4602  ++it)
    4603  {
    4604  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4605  it->hAllocation->CanBecomeLost() &&
    4606  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4607  {
    4608  it = FreeSuballocation(it);
    4609  ++lostAllocationCount;
    4610  }
    4611  }
    4612  return lostAllocationCount;
    4613 }
    4614 
    4615 void VmaBlockMetadata::Alloc(
    4616  const VmaAllocationRequest& request,
    4617  VmaSuballocationType type,
    4618  VkDeviceSize allocSize,
    4619  VmaAllocation hAllocation)
    4620 {
    4621  VMA_ASSERT(request.item != m_Suballocations.end());
    4622  VmaSuballocation& suballoc = *request.item;
    4623  // Given suballocation is a free block.
    4624  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4625  // Given offset is inside this suballocation.
    4626  VMA_ASSERT(request.offset >= suballoc.offset);
    4627  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4628  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4629  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4630 
    4631  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4632  // it to become used.
    4633  UnregisterFreeSuballocation(request.item);
    4634 
    4635  suballoc.offset = request.offset;
    4636  suballoc.size = allocSize;
    4637  suballoc.type = type;
    4638  suballoc.hAllocation = hAllocation;
    4639 
    4640  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4641  if(paddingEnd)
    4642  {
    4643  VmaSuballocation paddingSuballoc = {};
    4644  paddingSuballoc.offset = request.offset + allocSize;
    4645  paddingSuballoc.size = paddingEnd;
    4646  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4647  VmaSuballocationList::iterator next = request.item;
    4648  ++next;
    4649  const VmaSuballocationList::iterator paddingEndItem =
    4650  m_Suballocations.insert(next, paddingSuballoc);
    4651  RegisterFreeSuballocation(paddingEndItem);
    4652  }
    4653 
    4654  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4655  if(paddingBegin)
    4656  {
    4657  VmaSuballocation paddingSuballoc = {};
    4658  paddingSuballoc.offset = request.offset - paddingBegin;
    4659  paddingSuballoc.size = paddingBegin;
    4660  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4661  const VmaSuballocationList::iterator paddingBeginItem =
    4662  m_Suballocations.insert(request.item, paddingSuballoc);
    4663  RegisterFreeSuballocation(paddingBeginItem);
    4664  }
    4665 
    4666  // Update totals.
    4667  m_FreeCount = m_FreeCount - 1;
    4668  if(paddingBegin > 0)
    4669  {
    4670  ++m_FreeCount;
    4671  }
    4672  if(paddingEnd > 0)
    4673  {
    4674  ++m_FreeCount;
    4675  }
    4676  m_SumFreeSize -= allocSize;
    4677 }
    4678 
    4679 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4680 {
    4681  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4682  suballocItem != m_Suballocations.end();
    4683  ++suballocItem)
    4684  {
    4685  VmaSuballocation& suballoc = *suballocItem;
    4686  if(suballoc.hAllocation == allocation)
    4687  {
    4688  FreeSuballocation(suballocItem);
    4689  VMA_HEAVY_ASSERT(Validate());
    4690  return;
    4691  }
    4692  }
    4693  VMA_ASSERT(0 && "Not found!");
    4694 }
    4695 
    4696 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4697 {
    4698  VkDeviceSize lastSize = 0;
    4699  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4700  {
    4701  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4702 
    4703  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4704  {
    4705  VMA_ASSERT(0);
    4706  return false;
    4707  }
    4708  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4709  {
    4710  VMA_ASSERT(0);
    4711  return false;
    4712  }
    4713  if(it->size < lastSize)
    4714  {
    4715  VMA_ASSERT(0);
    4716  return false;
    4717  }
    4718 
    4719  lastSize = it->size;
    4720  }
    4721  return true;
    4722 }
    4723 
    4724 bool VmaBlockMetadata::CheckAllocation(
    4725  uint32_t currentFrameIndex,
    4726  uint32_t frameInUseCount,
    4727  VkDeviceSize bufferImageGranularity,
    4728  VkDeviceSize allocSize,
    4729  VkDeviceSize allocAlignment,
    4730  VmaSuballocationType allocType,
    4731  VmaSuballocationList::const_iterator suballocItem,
    4732  bool canMakeOtherLost,
    4733  VkDeviceSize* pOffset,
    4734  size_t* itemsToMakeLostCount,
    4735  VkDeviceSize* pSumFreeSize,
    4736  VkDeviceSize* pSumItemSize) const
    4737 {
    4738  VMA_ASSERT(allocSize > 0);
    4739  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4740  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4741  VMA_ASSERT(pOffset != VMA_NULL);
    4742 
    4743  *itemsToMakeLostCount = 0;
    4744  *pSumFreeSize = 0;
    4745  *pSumItemSize = 0;
    4746 
    4747  if(canMakeOtherLost)
    4748  {
    4749  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4750  {
    4751  *pSumFreeSize = suballocItem->size;
    4752  }
    4753  else
    4754  {
    4755  if(suballocItem->hAllocation->CanBecomeLost() &&
    4756  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4757  {
    4758  ++*itemsToMakeLostCount;
    4759  *pSumItemSize = suballocItem->size;
    4760  }
    4761  else
    4762  {
    4763  return false;
    4764  }
    4765  }
    4766 
    4767  // Remaining size is too small for this request: Early return.
    4768  if(m_Size - suballocItem->offset < allocSize)
    4769  {
    4770  return false;
    4771  }
    4772 
    4773  // Start from offset equal to beginning of this suballocation.
    4774  *pOffset = suballocItem->offset;
    4775 
    4776  // Apply VMA_DEBUG_MARGIN at the beginning.
    4777  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4778  {
    4779  *pOffset += VMA_DEBUG_MARGIN;
    4780  }
    4781 
    4782  // Apply alignment.
    4783  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4784  *pOffset = VmaAlignUp(*pOffset, alignment);
    4785 
    4786  // Check previous suballocations for BufferImageGranularity conflicts.
    4787  // Make bigger alignment if necessary.
    4788  if(bufferImageGranularity > 1)
    4789  {
    4790  bool bufferImageGranularityConflict = false;
    4791  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4792  while(prevSuballocItem != m_Suballocations.cbegin())
    4793  {
    4794  --prevSuballocItem;
    4795  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4796  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4797  {
    4798  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4799  {
    4800  bufferImageGranularityConflict = true;
    4801  break;
    4802  }
    4803  }
    4804  else
    4805  // Already on previous page.
    4806  break;
    4807  }
    4808  if(bufferImageGranularityConflict)
    4809  {
    4810  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4811  }
    4812  }
    4813 
    4814  // Now that we have final *pOffset, check if we are past suballocItem.
    4815  // If yes, return false - this function should be called for another suballocItem as starting point.
    4816  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4817  {
    4818  return false;
    4819  }
    4820 
    4821  // Calculate padding at the beginning based on current offset.
    4822  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4823 
    4824  // Calculate required margin at the end if this is not last suballocation.
    4825  VmaSuballocationList::const_iterator next = suballocItem;
    4826  ++next;
    4827  const VkDeviceSize requiredEndMargin =
    4828  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4829 
    4830  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4831  // Another early return check.
    4832  if(suballocItem->offset + totalSize > m_Size)
    4833  {
    4834  return false;
    4835  }
    4836 
    4837  // Advance lastSuballocItem until desired size is reached.
    4838  // Update itemsToMakeLostCount.
    4839  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4840  if(totalSize > suballocItem->size)
    4841  {
    4842  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4843  while(remainingSize > 0)
    4844  {
    4845  ++lastSuballocItem;
    4846  if(lastSuballocItem == m_Suballocations.cend())
    4847  {
    4848  return false;
    4849  }
    4850  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4851  {
    4852  *pSumFreeSize += lastSuballocItem->size;
    4853  }
    4854  else
    4855  {
    4856  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4857  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4858  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4859  {
    4860  ++*itemsToMakeLostCount;
    4861  *pSumItemSize += lastSuballocItem->size;
    4862  }
    4863  else
    4864  {
    4865  return false;
    4866  }
    4867  }
    4868  remainingSize = (lastSuballocItem->size < remainingSize) ?
    4869  remainingSize - lastSuballocItem->size : 0;
    4870  }
    4871  }
    4872 
    4873  // Check next suballocations for BufferImageGranularity conflicts.
    4874  // If conflict exists, we must mark more allocations lost or fail.
    4875  if(bufferImageGranularity > 1)
    4876  {
    4877  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    4878  ++nextSuballocItem;
    4879  while(nextSuballocItem != m_Suballocations.cend())
    4880  {
    4881  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    4882  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    4883  {
    4884  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    4885  {
    4886  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    4887  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    4888  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4889  {
    4890  ++*itemsToMakeLostCount;
    4891  }
    4892  else
    4893  {
    4894  return false;
    4895  }
    4896  }
    4897  }
    4898  else
    4899  {
    4900  // Already on next page.
    4901  break;
    4902  }
    4903  ++nextSuballocItem;
    4904  }
    4905  }
    4906  }
    4907  else
    4908  {
    4909  const VmaSuballocation& suballoc = *suballocItem;
    4910  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4911 
    4912  *pSumFreeSize = suballoc.size;
    4913 
    4914  // Size of this suballocation is too small for this request: Early return.
    4915  if(suballoc.size < allocSize)
    4916  {
    4917  return false;
    4918  }
    4919 
    4920  // Start from offset equal to beginning of this suballocation.
    4921  *pOffset = suballoc.offset;
    4922 
    4923  // Apply VMA_DEBUG_MARGIN at the beginning.
    4924  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4925  {
    4926  *pOffset += VMA_DEBUG_MARGIN;
    4927  }
    4928 
    4929  // Apply alignment.
    4930  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4931  *pOffset = VmaAlignUp(*pOffset, alignment);
    4932 
    4933  // Check previous suballocations for BufferImageGranularity conflicts.
    4934  // Make bigger alignment if necessary.
    4935  if(bufferImageGranularity > 1)
    4936  {
    4937  bool bufferImageGranularityConflict = false;
    4938  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4939  while(prevSuballocItem != m_Suballocations.cbegin())
    4940  {
    4941  --prevSuballocItem;
    4942  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4943  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4944  {
    4945  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4946  {
    4947  bufferImageGranularityConflict = true;
    4948  break;
    4949  }
    4950  }
    4951  else
    4952  // Already on previous page.
    4953  break;
    4954  }
    4955  if(bufferImageGranularityConflict)
    4956  {
    4957  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4958  }
    4959  }
    4960 
    4961  // Calculate padding at the beginning based on current offset.
    4962  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    4963 
    4964  // Calculate required margin at the end if this is not last suballocation.
    4965  VmaSuballocationList::const_iterator next = suballocItem;
    4966  ++next;
    4967  const VkDeviceSize requiredEndMargin =
    4968  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4969 
    4970  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    4971  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    4972  {
    4973  return false;
    4974  }
    4975 
    4976  // Check next suballocations for BufferImageGranularity conflicts.
    4977  // If conflict exists, allocation cannot be made here.
    4978  if(bufferImageGranularity > 1)
    4979  {
    4980  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    4981  ++nextSuballocItem;
    4982  while(nextSuballocItem != m_Suballocations.cend())
    4983  {
    4984  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    4985  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    4986  {
    4987  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    4988  {
    4989  return false;
    4990  }
    4991  }
    4992  else
    4993  {
    4994  // Already on next page.
    4995  break;
    4996  }
    4997  ++nextSuballocItem;
    4998  }
    4999  }
    5000  }
    5001 
    5002  // All tests passed: Success. pOffset is already filled.
    5003  return true;
    5004 }
    5005 
    5006 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5007 {
    5008  VMA_ASSERT(item != m_Suballocations.end());
    5009  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5010 
    5011  VmaSuballocationList::iterator nextItem = item;
    5012  ++nextItem;
    5013  VMA_ASSERT(nextItem != m_Suballocations.end());
    5014  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5015 
    5016  item->size += nextItem->size;
    5017  --m_FreeCount;
    5018  m_Suballocations.erase(nextItem);
    5019 }
    5020 
    5021 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5022 {
    5023  // Change this suballocation to be marked as free.
    5024  VmaSuballocation& suballoc = *suballocItem;
    5025  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5026  suballoc.hAllocation = VK_NULL_HANDLE;
    5027 
    5028  // Update totals.
    5029  ++m_FreeCount;
    5030  m_SumFreeSize += suballoc.size;
    5031 
    5032  // Merge with previous and/or next suballocation if it's also free.
    5033  bool mergeWithNext = false;
    5034  bool mergeWithPrev = false;
    5035 
    5036  VmaSuballocationList::iterator nextItem = suballocItem;
    5037  ++nextItem;
    5038  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5039  {
    5040  mergeWithNext = true;
    5041  }
    5042 
    5043  VmaSuballocationList::iterator prevItem = suballocItem;
    5044  if(suballocItem != m_Suballocations.begin())
    5045  {
    5046  --prevItem;
    5047  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5048  {
    5049  mergeWithPrev = true;
    5050  }
    5051  }
    5052 
    5053  if(mergeWithNext)
    5054  {
    5055  UnregisterFreeSuballocation(nextItem);
    5056  MergeFreeWithNext(suballocItem);
    5057  }
    5058 
    5059  if(mergeWithPrev)
    5060  {
    5061  UnregisterFreeSuballocation(prevItem);
    5062  MergeFreeWithNext(prevItem);
    5063  RegisterFreeSuballocation(prevItem);
    5064  return prevItem;
    5065  }
    5066  else
    5067  {
    5068  RegisterFreeSuballocation(suballocItem);
    5069  return suballocItem;
    5070  }
    5071 }
    5072 
    5073 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5074 {
    5075  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5076  VMA_ASSERT(item->size > 0);
    5077 
    5078  // You may want to enable this validation at the beginning or at the end of
    5079  // this function, depending on what do you want to check.
    5080  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5081 
    5082  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5083  {
    5084  if(m_FreeSuballocationsBySize.empty())
    5085  {
    5086  m_FreeSuballocationsBySize.push_back(item);
    5087  }
    5088  else
    5089  {
    5090  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5091  }
    5092  }
    5093 
    5094  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5095 }
    5096 
    5097 
    5098 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5099 {
    5100  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5101  VMA_ASSERT(item->size > 0);
    5102 
    5103  // You may want to enable this validation at the beginning or at the end of
    5104  // this function, depending on what do you want to check.
    5105  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5106 
    5107  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5108  {
    5109  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5110  m_FreeSuballocationsBySize.data(),
    5111  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5112  item,
    5113  VmaSuballocationItemSizeLess());
    5114  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5115  index < m_FreeSuballocationsBySize.size();
    5116  ++index)
    5117  {
    5118  if(m_FreeSuballocationsBySize[index] == item)
    5119  {
    5120  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5121  return;
    5122  }
    5123  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5124  }
    5125  VMA_ASSERT(0 && "Not found.");
    5126  }
    5127 
    5128  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5129 }
    5130 
    5132 // class VmaDeviceMemoryBlock
    5133 
    5134 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5135  m_MemoryTypeIndex(UINT32_MAX),
    5136  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5137  m_hMemory(VK_NULL_HANDLE),
    5138  m_PersistentMap(false),
    5139  m_pMappedData(VMA_NULL),
    5140  m_Metadata(hAllocator)
    5141 {
    5142 }
    5143 
    5144 void VmaDeviceMemoryBlock::Init(
    5145  uint32_t newMemoryTypeIndex,
    5146  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5147  VkDeviceMemory newMemory,
    5148  VkDeviceSize newSize,
    5149  bool persistentMap,
    5150  void* pMappedData)
    5151 {
    5152  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5153 
    5154  m_MemoryTypeIndex = newMemoryTypeIndex;
    5155  m_BlockVectorType = newBlockVectorType;
    5156  m_hMemory = newMemory;
    5157  m_PersistentMap = persistentMap;
    5158  m_pMappedData = pMappedData;
    5159 
    5160  m_Metadata.Init(newSize);
    5161 }
    5162 
    5163 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5164 {
    5165  // This is the most important assert in the entire library.
    5166  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5167  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5168 
    5169  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5170  if(m_pMappedData != VMA_NULL)
    5171  {
    5172  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5173  m_pMappedData = VMA_NULL;
    5174  }
    5175 
    5176  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5177  m_hMemory = VK_NULL_HANDLE;
    5178 }
    5179 
    5180 bool VmaDeviceMemoryBlock::Validate() const
    5181 {
    5182  if((m_hMemory == VK_NULL_HANDLE) ||
    5183  (m_Metadata.GetSize() == 0))
    5184  {
    5185  return false;
    5186  }
    5187 
    5188  return m_Metadata.Validate();
    5189 }
    5190 
    5191 static void InitStatInfo(VmaStatInfo& outInfo)
    5192 {
    5193  memset(&outInfo, 0, sizeof(outInfo));
    5194  outInfo.allocationSizeMin = UINT64_MAX;
    5195  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5196 }
    5197 
    5198 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5199 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5200 {
    5201  inoutInfo.blockCount += srcInfo.blockCount;
    5202  inoutInfo.allocationCount += srcInfo.allocationCount;
    5203  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5204  inoutInfo.usedBytes += srcInfo.usedBytes;
    5205  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5206  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5207  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5208  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5209  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5210 }
    5211 
    5212 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5213 {
    5214  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5215  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5216  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5217  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5218 }
    5219 
    5220 VmaPool_T::VmaPool_T(
    5221  VmaAllocator hAllocator,
    5222  const VmaPoolCreateInfo& createInfo) :
    5223  m_BlockVector(
    5224  hAllocator,
    5225  createInfo.memoryTypeIndex,
    5226  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5227  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5228  createInfo.blockSize,
    5229  createInfo.minBlockCount,
    5230  createInfo.maxBlockCount,
    5231  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5232  createInfo.frameInUseCount,
    5233  true) // isCustomPool
    5234 {
    5235 }
    5236 
    5237 VmaPool_T::~VmaPool_T()
    5238 {
    5239 }
    5240 
    5241 #if VMA_STATS_STRING_ENABLED
    5242 
    5243 #endif // #if VMA_STATS_STRING_ENABLED
    5244 
    5245 VmaBlockVector::VmaBlockVector(
    5246  VmaAllocator hAllocator,
    5247  uint32_t memoryTypeIndex,
    5248  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5249  VkDeviceSize preferredBlockSize,
    5250  size_t minBlockCount,
    5251  size_t maxBlockCount,
    5252  VkDeviceSize bufferImageGranularity,
    5253  uint32_t frameInUseCount,
    5254  bool isCustomPool) :
    5255  m_hAllocator(hAllocator),
    5256  m_MemoryTypeIndex(memoryTypeIndex),
    5257  m_BlockVectorType(blockVectorType),
    5258  m_PreferredBlockSize(preferredBlockSize),
    5259  m_MinBlockCount(minBlockCount),
    5260  m_MaxBlockCount(maxBlockCount),
    5261  m_BufferImageGranularity(bufferImageGranularity),
    5262  m_FrameInUseCount(frameInUseCount),
    5263  m_IsCustomPool(isCustomPool),
    5264  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5265  m_HasEmptyBlock(false),
    5266  m_pDefragmentator(VMA_NULL)
    5267 {
    5268 }
    5269 
    5270 VmaBlockVector::~VmaBlockVector()
    5271 {
    5272  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5273 
    5274  for(size_t i = m_Blocks.size(); i--; )
    5275  {
    5276  m_Blocks[i]->Destroy(m_hAllocator);
    5277  vma_delete(m_hAllocator, m_Blocks[i]);
    5278  }
    5279 }
    5280 
    5281 VkResult VmaBlockVector::CreateMinBlocks()
    5282 {
    5283  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5284  {
    5285  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5286  if(res != VK_SUCCESS)
    5287  {
    5288  return res;
    5289  }
    5290  }
    5291  return VK_SUCCESS;
    5292 }
    5293 
    5294 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5295 {
    5296  pStats->size = 0;
    5297  pStats->unusedSize = 0;
    5298  pStats->allocationCount = 0;
    5299  pStats->unusedRangeCount = 0;
    5300  pStats->unusedRangeSizeMax = 0;
    5301 
    5302  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5303 
    5304  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5305  {
    5306  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5307  VMA_ASSERT(pBlock);
    5308  VMA_HEAVY_ASSERT(pBlock->Validate());
    5309  pBlock->m_Metadata.AddPoolStats(*pStats);
    5310  }
    5311 }
    5312 
    5313 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5314 
    5315 VkResult VmaBlockVector::Allocate(
    5316  VmaPool hCurrentPool,
    5317  uint32_t currentFrameIndex,
    5318  const VkMemoryRequirements& vkMemReq,
    5319  const VmaAllocationCreateInfo& createInfo,
    5320  VmaSuballocationType suballocType,
    5321  VmaAllocation* pAllocation)
    5322 {
    5323  // Validate flags.
    5324  if(createInfo.pool != VK_NULL_HANDLE &&
    5325  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5326  {
    5327  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5329  }
    5330 
    5331  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5332 
    5333  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5334  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5335  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5336  {
    5337  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5338  VMA_ASSERT(pCurrBlock);
    5339  VmaAllocationRequest currRequest = {};
    5340  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5341  currentFrameIndex,
    5342  m_FrameInUseCount,
    5343  m_BufferImageGranularity,
    5344  vkMemReq.size,
    5345  vkMemReq.alignment,
    5346  suballocType,
    5347  false, // canMakeOtherLost
    5348  &currRequest))
    5349  {
    5350  // Allocate from pCurrBlock.
    5351  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5352 
    5353  // We no longer have an empty Allocation.
    5354  if(pCurrBlock->m_Metadata.IsEmpty())
    5355  {
    5356  m_HasEmptyBlock = false;
    5357  }
    5358 
    5359  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5360  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5361  (*pAllocation)->InitBlockAllocation(
    5362  hCurrentPool,
    5363  pCurrBlock,
    5364  currRequest.offset,
    5365  vkMemReq.alignment,
    5366  vkMemReq.size,
    5367  suballocType,
    5368  createInfo.pUserData,
    5369  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5370  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5371  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5372  return VK_SUCCESS;
    5373  }
    5374  }
    5375 
    5376  const bool canCreateNewBlock =
    5377  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5378  (m_Blocks.size() < m_MaxBlockCount);
    5379 
    5380  // 2. Try to create new block.
    5381  if(canCreateNewBlock)
    5382  {
    5383  // 2.1. Start with full preferredBlockSize.
    5384  VkDeviceSize blockSize = m_PreferredBlockSize;
    5385  size_t newBlockIndex = 0;
    5386  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5387  // Allocating blocks of other sizes is allowed only in default pools.
    5388  // In custom pools block size is fixed.
    5389  if(res < 0 && m_IsCustomPool == false)
    5390  {
    5391  // 2.2. Try half the size.
    5392  blockSize /= 2;
    5393  if(blockSize >= vkMemReq.size)
    5394  {
    5395  res = CreateBlock(blockSize, &newBlockIndex);
    5396  if(res < 0)
    5397  {
    5398  // 2.3. Try quarter the size.
    5399  blockSize /= 2;
    5400  if(blockSize >= vkMemReq.size)
    5401  {
    5402  res = CreateBlock(blockSize, &newBlockIndex);
    5403  }
    5404  }
    5405  }
    5406  }
    5407  if(res == VK_SUCCESS)
    5408  {
    5409  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5410  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5411 
    5412  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5413  VmaAllocationRequest allocRequest;
    5414  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5415  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5416  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5417  (*pAllocation)->InitBlockAllocation(
    5418  hCurrentPool,
    5419  pBlock,
    5420  allocRequest.offset,
    5421  vkMemReq.alignment,
    5422  vkMemReq.size,
    5423  suballocType,
    5424  createInfo.pUserData,
    5425  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5426  VMA_HEAVY_ASSERT(pBlock->Validate());
    5427  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5428 
    5429  return VK_SUCCESS;
    5430  }
    5431  }
    5432 
    5433  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5434 
    5435  // 3. Try to allocate from existing blocks with making other allocations lost.
    5436  if(canMakeOtherLost)
    5437  {
    5438  uint32_t tryIndex = 0;
    5439  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5440  {
    5441  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5442  VmaAllocationRequest bestRequest = {};
    5443  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5444 
    5445  // 1. Search existing allocations.
    5446  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5447  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5448  {
    5449  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5450  VMA_ASSERT(pCurrBlock);
    5451  VmaAllocationRequest currRequest = {};
    5452  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5453  currentFrameIndex,
    5454  m_FrameInUseCount,
    5455  m_BufferImageGranularity,
    5456  vkMemReq.size,
    5457  vkMemReq.alignment,
    5458  suballocType,
    5459  canMakeOtherLost,
    5460  &currRequest))
    5461  {
    5462  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5463  if(pBestRequestBlock == VMA_NULL ||
    5464  currRequestCost < bestRequestCost)
    5465  {
    5466  pBestRequestBlock = pCurrBlock;
    5467  bestRequest = currRequest;
    5468  bestRequestCost = currRequestCost;
    5469 
    5470  if(bestRequestCost == 0)
    5471  {
    5472  break;
    5473  }
    5474  }
    5475  }
    5476  }
    5477 
    5478  if(pBestRequestBlock != VMA_NULL)
    5479  {
    5480  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5481  currentFrameIndex,
    5482  m_FrameInUseCount,
    5483  &bestRequest))
    5484  {
    5485  // We no longer have an empty Allocation.
    5486  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5487  {
    5488  m_HasEmptyBlock = false;
    5489  }
    5490  // Allocate from this pBlock.
    5491  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5492  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5493  (*pAllocation)->InitBlockAllocation(
    5494  hCurrentPool,
    5495  pBestRequestBlock,
    5496  bestRequest.offset,
    5497  vkMemReq.alignment,
    5498  vkMemReq.size,
    5499  suballocType,
    5500  createInfo.pUserData,
    5501  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5502  VMA_HEAVY_ASSERT(pBlock->Validate());
    5503  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5504  return VK_SUCCESS;
    5505  }
    5506  // else: Some allocations must have been touched while we are here. Next try.
    5507  }
    5508  else
    5509  {
    5510  // Could not find place in any of the blocks - break outer loop.
    5511  break;
    5512  }
    5513  }
    5514  /* Maximum number of tries exceeded - a very unlike event when many other
    5515  threads are simultaneously touching allocations making it impossible to make
    5516  lost at the same time as we try to allocate. */
    5517  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5518  {
    5519  return VK_ERROR_TOO_MANY_OBJECTS;
    5520  }
    5521  }
    5522 
    5523  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5524 }
    5525 
    5526 void VmaBlockVector::Free(
    5527  VmaAllocation hAllocation)
    5528 {
    5529  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5530 
    5531  // Scope for lock.
    5532  {
    5533  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5534 
    5535  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5536 
    5537  pBlock->m_Metadata.Free(hAllocation);
    5538  VMA_HEAVY_ASSERT(pBlock->Validate());
    5539 
    5540  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5541 
    5542  // pBlock became empty after this deallocation.
    5543  if(pBlock->m_Metadata.IsEmpty())
    5544  {
    5545  // Already has empty Allocation. We don't want to have two, so delete this one.
    5546  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5547  {
    5548  pBlockToDelete = pBlock;
    5549  Remove(pBlock);
    5550  }
    5551  // We now have first empty Allocation.
    5552  else
    5553  {
    5554  m_HasEmptyBlock = true;
    5555  }
    5556  }
    5557  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5558  // (This is optional, heuristics.)
    5559  else if(m_HasEmptyBlock)
    5560  {
    5561  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5562  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5563  {
    5564  pBlockToDelete = pLastBlock;
    5565  m_Blocks.pop_back();
    5566  m_HasEmptyBlock = false;
    5567  }
    5568  }
    5569 
    5570  IncrementallySortBlocks();
    5571  }
    5572 
    5573  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5574  // lock, for performance reason.
    5575  if(pBlockToDelete != VMA_NULL)
    5576  {
    5577  VMA_DEBUG_LOG(" Deleted empty allocation");
    5578  pBlockToDelete->Destroy(m_hAllocator);
    5579  vma_delete(m_hAllocator, pBlockToDelete);
    5580  }
    5581 }
    5582 
    5583 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5584 {
    5585  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5586  {
    5587  if(m_Blocks[blockIndex] == pBlock)
    5588  {
    5589  VmaVectorRemove(m_Blocks, blockIndex);
    5590  return;
    5591  }
    5592  }
    5593  VMA_ASSERT(0);
    5594 }
    5595 
    5596 void VmaBlockVector::IncrementallySortBlocks()
    5597 {
    5598  // Bubble sort only until first swap.
    5599  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5600  {
    5601  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5602  {
    5603  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5604  return;
    5605  }
    5606  }
    5607 }
    5608 
    5609 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5610 {
    5611  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5612  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5613  allocInfo.allocationSize = blockSize;
    5614  VkDeviceMemory mem = VK_NULL_HANDLE;
    5615  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5616  if(res < 0)
    5617  {
    5618  return res;
    5619  }
    5620 
    5621  // New VkDeviceMemory successfully created.
    5622 
    5623  // Map memory if needed.
    5624  void* pMappedData = VMA_NULL;
    5625  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5626  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5627  {
    5628  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5629  m_hAllocator->m_hDevice,
    5630  mem,
    5631  0,
    5632  VK_WHOLE_SIZE,
    5633  0,
    5634  &pMappedData);
    5635  if(res < 0)
    5636  {
    5637  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5638  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5639  return res;
    5640  }
    5641  }
    5642 
    5643  // Create new Allocation for it.
    5644  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5645  pBlock->Init(
    5646  m_MemoryTypeIndex,
    5647  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5648  mem,
    5649  allocInfo.allocationSize,
    5650  persistentMap,
    5651  pMappedData);
    5652 
    5653  m_Blocks.push_back(pBlock);
    5654  if(pNewBlockIndex != VMA_NULL)
    5655  {
    5656  *pNewBlockIndex = m_Blocks.size() - 1;
    5657  }
    5658 
    5659  return VK_SUCCESS;
    5660 }
    5661 
    5662 #if VMA_STATS_STRING_ENABLED
    5663 
    5664 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5665 {
    5666  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5667 
    5668  json.BeginObject();
    5669 
    5670  if(m_IsCustomPool)
    5671  {
    5672  json.WriteString("MemoryTypeIndex");
    5673  json.WriteNumber(m_MemoryTypeIndex);
    5674 
    5675  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5676  {
    5677  json.WriteString("Mapped");
    5678  json.WriteBool(true);
    5679  }
    5680 
    5681  json.WriteString("BlockSize");
    5682  json.WriteNumber(m_PreferredBlockSize);
    5683 
    5684  json.WriteString("BlockCount");
    5685  json.BeginObject(true);
    5686  if(m_MinBlockCount > 0)
    5687  {
    5688  json.WriteString("Min");
    5689  json.WriteNumber(m_MinBlockCount);
    5690  }
    5691  if(m_MaxBlockCount < SIZE_MAX)
    5692  {
    5693  json.WriteString("Max");
    5694  json.WriteNumber(m_MaxBlockCount);
    5695  }
    5696  json.WriteString("Cur");
    5697  json.WriteNumber(m_Blocks.size());
    5698  json.EndObject();
    5699 
    5700  if(m_FrameInUseCount > 0)
    5701  {
    5702  json.WriteString("FrameInUseCount");
    5703  json.WriteNumber(m_FrameInUseCount);
    5704  }
    5705  }
    5706  else
    5707  {
    5708  json.WriteString("PreferredBlockSize");
    5709  json.WriteNumber(m_PreferredBlockSize);
    5710  }
    5711 
    5712  json.WriteString("Blocks");
    5713  json.BeginArray();
    5714  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5715  {
    5716  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5717  }
    5718  json.EndArray();
    5719 
    5720  json.EndObject();
    5721 }
    5722 
    5723 #endif // #if VMA_STATS_STRING_ENABLED
    5724 
    5725 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5726 {
    5727  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5728 
    5729  for(size_t i = m_Blocks.size(); i--; )
    5730  {
    5731  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5732  if(pBlock->m_pMappedData != VMA_NULL)
    5733  {
    5734  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5735  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5736  pBlock->m_pMappedData = VMA_NULL;
    5737  }
    5738  }
    5739 }
    5740 
    5741 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5742 {
    5743  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5744 
    5745  VkResult finalResult = VK_SUCCESS;
    5746  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5747  {
    5748  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5749  if(pBlock->m_PersistentMap)
    5750  {
    5751  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5752  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5753  m_hAllocator->m_hDevice,
    5754  pBlock->m_hMemory,
    5755  0,
    5756  VK_WHOLE_SIZE,
    5757  0,
    5758  &pBlock->m_pMappedData);
    5759  if(localResult != VK_SUCCESS)
    5760  {
    5761  finalResult = localResult;
    5762  }
    5763  }
    5764  }
    5765  return finalResult;
    5766 }
    5767 
    5768 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5769  VmaAllocator hAllocator,
    5770  uint32_t currentFrameIndex)
    5771 {
    5772  if(m_pDefragmentator == VMA_NULL)
    5773  {
    5774  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5775  hAllocator,
    5776  this,
    5777  currentFrameIndex);
    5778  }
    5779 
    5780  return m_pDefragmentator;
    5781 }
    5782 
    5783 VkResult VmaBlockVector::Defragment(
    5784  VmaDefragmentationStats* pDefragmentationStats,
    5785  VkDeviceSize& maxBytesToMove,
    5786  uint32_t& maxAllocationsToMove)
    5787 {
    5788  if(m_pDefragmentator == VMA_NULL)
    5789  {
    5790  return VK_SUCCESS;
    5791  }
    5792 
    5793  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5794 
    5795  // Defragment.
    5796  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5797 
    5798  // Accumulate statistics.
    5799  if(pDefragmentationStats != VMA_NULL)
    5800  {
    5801  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5802  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5803  pDefragmentationStats->bytesMoved += bytesMoved;
    5804  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5805  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5806  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5807  maxBytesToMove -= bytesMoved;
    5808  maxAllocationsToMove -= allocationsMoved;
    5809  }
    5810 
    5811  // Free empty blocks.
    5812  m_HasEmptyBlock = false;
    5813  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5814  {
    5815  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5816  if(pBlock->m_Metadata.IsEmpty())
    5817  {
    5818  if(m_Blocks.size() > m_MinBlockCount)
    5819  {
    5820  if(pDefragmentationStats != VMA_NULL)
    5821  {
    5822  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5823  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5824  }
    5825 
    5826  VmaVectorRemove(m_Blocks, blockIndex);
    5827  pBlock->Destroy(m_hAllocator);
    5828  vma_delete(m_hAllocator, pBlock);
    5829  }
    5830  else
    5831  {
    5832  m_HasEmptyBlock = true;
    5833  }
    5834  }
    5835  }
    5836 
    5837  return result;
    5838 }
    5839 
    5840 void VmaBlockVector::DestroyDefragmentator()
    5841 {
    5842  if(m_pDefragmentator != VMA_NULL)
    5843  {
    5844  vma_delete(m_hAllocator, m_pDefragmentator);
    5845  m_pDefragmentator = VMA_NULL;
    5846  }
    5847 }
    5848 
    5849 void VmaBlockVector::MakePoolAllocationsLost(
    5850  uint32_t currentFrameIndex,
    5851  size_t* pLostAllocationCount)
    5852 {
    5853  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5854 
    5855  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5856  {
    5857  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5858  VMA_ASSERT(pBlock);
    5859  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5860  }
    5861 }
    5862 
    5863 void VmaBlockVector::AddStats(VmaStats* pStats)
    5864 {
    5865  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    5866  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    5867 
    5868  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5869 
    5870  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5871  {
    5872  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5873  VMA_ASSERT(pBlock);
    5874  VMA_HEAVY_ASSERT(pBlock->Validate());
    5875  VmaStatInfo allocationStatInfo;
    5876  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    5877  VmaAddStatInfo(pStats->total, allocationStatInfo);
    5878  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    5879  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    5880  }
    5881 }
    5882 
    5884 // VmaDefragmentator members definition
    5885 
    5886 VmaDefragmentator::VmaDefragmentator(
    5887  VmaAllocator hAllocator,
    5888  VmaBlockVector* pBlockVector,
    5889  uint32_t currentFrameIndex) :
    5890  m_hAllocator(hAllocator),
    5891  m_pBlockVector(pBlockVector),
    5892  m_CurrentFrameIndex(currentFrameIndex),
    5893  m_BytesMoved(0),
    5894  m_AllocationsMoved(0),
    5895  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    5896  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    5897 {
    5898 }
    5899 
    5900 VmaDefragmentator::~VmaDefragmentator()
    5901 {
    5902  for(size_t i = m_Blocks.size(); i--; )
    5903  {
    5904  vma_delete(m_hAllocator, m_Blocks[i]);
    5905  }
    5906 }
    5907 
    5908 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    5909 {
    5910  AllocationInfo allocInfo;
    5911  allocInfo.m_hAllocation = hAlloc;
    5912  allocInfo.m_pChanged = pChanged;
    5913  m_Allocations.push_back(allocInfo);
    5914 }
    5915 
    5916 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    5917 {
    5918  // It has already been mapped for defragmentation.
    5919  if(m_pMappedDataForDefragmentation)
    5920  {
    5921  *ppMappedData = m_pMappedDataForDefragmentation;
    5922  return VK_SUCCESS;
    5923  }
    5924 
    5925  // It is persistently mapped.
    5926  if(m_pBlock->m_PersistentMap)
    5927  {
    5928  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    5929  *ppMappedData = m_pBlock->m_pMappedData;
    5930  return VK_SUCCESS;
    5931  }
    5932 
    5933  // Map on first usage.
    5934  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5935  hAllocator->m_hDevice,
    5936  m_pBlock->m_hMemory,
    5937  0,
    5938  VK_WHOLE_SIZE,
    5939  0,
    5940  &m_pMappedDataForDefragmentation);
    5941  *ppMappedData = m_pMappedDataForDefragmentation;
    5942  return res;
    5943 }
    5944 
    5945 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    5946 {
    5947  if(m_pMappedDataForDefragmentation != VMA_NULL)
    5948  {
    5949  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    5950  }
    5951 }
    5952 
    5953 VkResult VmaDefragmentator::DefragmentRound(
    5954  VkDeviceSize maxBytesToMove,
    5955  uint32_t maxAllocationsToMove)
    5956 {
    5957  if(m_Blocks.empty())
    5958  {
    5959  return VK_SUCCESS;
    5960  }
    5961 
    5962  size_t srcBlockIndex = m_Blocks.size() - 1;
    5963  size_t srcAllocIndex = SIZE_MAX;
    5964  for(;;)
    5965  {
    5966  // 1. Find next allocation to move.
    5967  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    5968  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    5969  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    5970  {
    5971  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    5972  {
    5973  // Finished: no more allocations to process.
    5974  if(srcBlockIndex == 0)
    5975  {
    5976  return VK_SUCCESS;
    5977  }
    5978  else
    5979  {
    5980  --srcBlockIndex;
    5981  srcAllocIndex = SIZE_MAX;
    5982  }
    5983  }
    5984  else
    5985  {
    5986  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    5987  }
    5988  }
    5989 
    5990  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    5991  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    5992 
    5993  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    5994  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    5995  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    5996  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    5997 
    5998  // 2. Try to find new place for this allocation in preceding or current block.
    5999  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6000  {
    6001  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6002  VmaAllocationRequest dstAllocRequest;
    6003  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6004  m_CurrentFrameIndex,
    6005  m_pBlockVector->GetFrameInUseCount(),
    6006  m_pBlockVector->GetBufferImageGranularity(),
    6007  size,
    6008  alignment,
    6009  suballocType,
    6010  false, // canMakeOtherLost
    6011  &dstAllocRequest) &&
    6012  MoveMakesSense(
    6013  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6014  {
    6015  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6016 
    6017  // Reached limit on number of allocations or bytes to move.
    6018  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6019  (m_BytesMoved + size > maxBytesToMove))
    6020  {
    6021  return VK_INCOMPLETE;
    6022  }
    6023 
    6024  void* pDstMappedData = VMA_NULL;
    6025  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6026  if(res != VK_SUCCESS)
    6027  {
    6028  return res;
    6029  }
    6030 
    6031  void* pSrcMappedData = VMA_NULL;
    6032  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6033  if(res != VK_SUCCESS)
    6034  {
    6035  return res;
    6036  }
    6037 
    6038  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6039  memcpy(
    6040  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6041  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6042  static_cast<size_t>(size));
    6043 
    6044  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6045  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6046 
    6047  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6048 
    6049  if(allocInfo.m_pChanged != VMA_NULL)
    6050  {
    6051  *allocInfo.m_pChanged = VK_TRUE;
    6052  }
    6053 
    6054  ++m_AllocationsMoved;
    6055  m_BytesMoved += size;
    6056 
    6057  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6058 
    6059  break;
    6060  }
    6061  }
    6062 
    6063  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6064 
    6065  if(srcAllocIndex > 0)
    6066  {
    6067  --srcAllocIndex;
    6068  }
    6069  else
    6070  {
    6071  if(srcBlockIndex > 0)
    6072  {
    6073  --srcBlockIndex;
    6074  srcAllocIndex = SIZE_MAX;
    6075  }
    6076  else
    6077  {
    6078  return VK_SUCCESS;
    6079  }
    6080  }
    6081  }
    6082 }
    6083 
    6084 VkResult VmaDefragmentator::Defragment(
    6085  VkDeviceSize maxBytesToMove,
    6086  uint32_t maxAllocationsToMove)
    6087 {
    6088  if(m_Allocations.empty())
    6089  {
    6090  return VK_SUCCESS;
    6091  }
    6092 
    6093  // Create block info for each block.
    6094  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6095  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6096  {
    6097  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6098  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6099  m_Blocks.push_back(pBlockInfo);
    6100  }
    6101 
    6102  // Sort them by m_pBlock pointer value.
    6103  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6104 
    6105  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6106  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6107  {
    6108  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6109  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6110  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6111  {
    6112  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6113  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6114  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6115  {
    6116  (*it)->m_Allocations.push_back(allocInfo);
    6117  }
    6118  else
    6119  {
    6120  VMA_ASSERT(0);
    6121  }
    6122  }
    6123  }
    6124  m_Allocations.clear();
    6125 
    6126  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6127  {
    6128  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6129  pBlockInfo->CalcHasNonMovableAllocations();
    6130  pBlockInfo->SortAllocationsBySizeDescecnding();
    6131  }
    6132 
    6133  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6134  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6135 
    6136  // Execute defragmentation rounds (the main part).
    6137  VkResult result = VK_SUCCESS;
    6138  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6139  {
    6140  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6141  }
    6142 
    6143  // Unmap blocks that were mapped for defragmentation.
    6144  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6145  {
    6146  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6147  }
    6148 
    6149  return result;
    6150 }
    6151 
    6152 bool VmaDefragmentator::MoveMakesSense(
    6153  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6154  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6155 {
    6156  if(dstBlockIndex < srcBlockIndex)
    6157  {
    6158  return true;
    6159  }
    6160  if(dstBlockIndex > srcBlockIndex)
    6161  {
    6162  return false;
    6163  }
    6164  if(dstOffset < srcOffset)
    6165  {
    6166  return true;
    6167  }
    6168  return false;
    6169 }
    6170 
    6172 // VmaAllocator_T
    6173 
    6174 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6175  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6176  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6177  m_hDevice(pCreateInfo->device),
    6178  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6179  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6180  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6181  m_UnmapPersistentlyMappedMemoryCounter(0),
    6182  m_PreferredLargeHeapBlockSize(0),
    6183  m_PreferredSmallHeapBlockSize(0),
    6184  m_CurrentFrameIndex(0),
    6185  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6186 {
    6187  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6188 
    6189  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6190  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6191  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6192 
    6193  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6194  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
    6195 
    6196  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6197  {
    6198  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6199  }
    6200 
    6201  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6202  {
    6203  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6204  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6205  }
    6206 
    6207  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6208 
    6209  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6210  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6211 
    6212  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6213  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6214  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6215  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6216 
    6217  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6218  {
    6219  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6220  {
    6221  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6222  if(limit != VK_WHOLE_SIZE)
    6223  {
    6224  m_HeapSizeLimit[heapIndex] = limit;
    6225  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6226  {
    6227  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6228  }
    6229  }
    6230  }
    6231  }
    6232 
    6233  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6234  {
    6235  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6236 
    6237  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6238  {
    6239  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6240  this,
    6241  memTypeIndex,
    6242  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6243  preferredBlockSize,
    6244  0,
    6245  SIZE_MAX,
    6246  GetBufferImageGranularity(),
    6247  pCreateInfo->frameInUseCount,
    6248  false); // isCustomPool
    6249  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6250  // becase minBlockCount is 0.
    6251  m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6252  }
    6253  }
    6254 }
    6255 
    6256 VmaAllocator_T::~VmaAllocator_T()
    6257 {
    6258  VMA_ASSERT(m_Pools.empty());
    6259 
    6260  for(size_t i = GetMemoryTypeCount(); i--; )
    6261  {
    6262  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6263  {
    6264  vma_delete(this, m_pOwnAllocations[i][j]);
    6265  vma_delete(this, m_pBlockVectors[i][j]);
    6266  }
    6267  }
    6268 }
    6269 
    6270 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6271 {
    6272 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6273  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6274  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6275  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6276  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6277  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6278  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6279  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6280  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6281  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6282  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6283  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6284  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6285  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6286  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6287 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6288 
    6289  if(pVulkanFunctions != VMA_NULL)
    6290  {
    6291  m_VulkanFunctions = *pVulkanFunctions;
    6292  }
    6293 
    6294  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6295  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6296  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6297  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6298  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6299  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6300  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6301  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6302  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6303  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6304  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6305  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6306  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6307  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6308  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6309  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6310 }
    6311 
    6312 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6313 {
    6314  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6315  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6316  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6317  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6318 }
    6319 
    6320 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6321  const VkMemoryRequirements& vkMemReq,
    6322  const VmaAllocationCreateInfo& createInfo,
    6323  uint32_t memTypeIndex,
    6324  VmaSuballocationType suballocType,
    6325  VmaAllocation* pAllocation)
    6326 {
    6327  VMA_ASSERT(pAllocation != VMA_NULL);
    6328  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6329 
    6330  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6331  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6332  VMA_ASSERT(blockVector);
    6333 
    6334  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6335 
    6336  if(VMA_DEBUG_ALWAYS_OWN_MEMORY)
    6337  {
    6338  finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT;
    6339  }
    6340 
    6341  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
    6342  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6343  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6344  vkMemReq.size > preferredBlockSize / 2)
    6345  {
    6346  finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT;
    6347  }
    6348 
    6349  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6350  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6351  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6352  {
    6353  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6354  }
    6355 
    6356  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0)
    6357  {
    6358  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6359  {
    6360  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6361  }
    6362  else
    6363  {
    6364  return AllocateOwnMemory(
    6365  vkMemReq.size,
    6366  suballocType,
    6367  memTypeIndex,
    6368  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6369  finalCreateInfo.pUserData,
    6370  pAllocation);
    6371  }
    6372  }
    6373  else
    6374  {
    6375  VkResult res = blockVector->Allocate(
    6376  VK_NULL_HANDLE, // hCurrentPool
    6377  m_CurrentFrameIndex.load(),
    6378  vkMemReq,
    6379  finalCreateInfo,
    6380  suballocType,
    6381  pAllocation);
    6382  if(res == VK_SUCCESS)
    6383  {
    6384  return res;
    6385  }
    6386 
    6387  // 5. Try own memory.
    6388  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6389  {
    6390  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6391  }
    6392  else
    6393  {
    6394  res = AllocateOwnMemory(
    6395  vkMemReq.size,
    6396  suballocType,
    6397  memTypeIndex,
    6398  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6399  finalCreateInfo.pUserData,
    6400  pAllocation);
    6401  if(res == VK_SUCCESS)
    6402  {
    6403  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
    6404  VMA_DEBUG_LOG(" Allocated as OwnMemory");
    6405  return VK_SUCCESS;
    6406  }
    6407  else
    6408  {
    6409  // Everything failed: Return error code.
    6410  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6411  return res;
    6412  }
    6413  }
    6414  }
    6415 }
    6416 
    6417 VkResult VmaAllocator_T::AllocateOwnMemory(
    6418  VkDeviceSize size,
    6419  VmaSuballocationType suballocType,
    6420  uint32_t memTypeIndex,
    6421  bool map,
    6422  void* pUserData,
    6423  VmaAllocation* pAllocation)
    6424 {
    6425  VMA_ASSERT(pAllocation);
    6426 
    6427  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6428  allocInfo.memoryTypeIndex = memTypeIndex;
    6429  allocInfo.allocationSize = size;
    6430 
    6431  // Allocate VkDeviceMemory.
    6432  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6433  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6434  if(res < 0)
    6435  {
    6436  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6437  return res;
    6438  }
    6439 
    6440  void* pMappedData = nullptr;
    6441  if(map)
    6442  {
    6443  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6444  {
    6445  res = (*m_VulkanFunctions.vkMapMemory)(
    6446  m_hDevice,
    6447  hMemory,
    6448  0,
    6449  VK_WHOLE_SIZE,
    6450  0,
    6451  &pMappedData);
    6452  if(res < 0)
    6453  {
    6454  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6455  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6456  return res;
    6457  }
    6458  }
    6459  }
    6460 
    6461  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6462  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6463 
    6464  // Register it in m_pOwnAllocations.
    6465  {
    6466  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    6467  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6468  VMA_ASSERT(pOwnAllocations);
    6469  VmaVectorInsertSorted<VmaPointerLess>(*pOwnAllocations, *pAllocation);
    6470  }
    6471 
    6472  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
    6473 
    6474  return VK_SUCCESS;
    6475 }
    6476 
    6477 VkResult VmaAllocator_T::AllocateMemory(
    6478  const VkMemoryRequirements& vkMemReq,
    6479  const VmaAllocationCreateInfo& createInfo,
    6480  VmaSuballocationType suballocType,
    6481  VmaAllocation* pAllocation)
    6482 {
    6483  if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 &&
    6484  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6485  {
    6486  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6487  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6488  }
    6489  if((createInfo.pool != VK_NULL_HANDLE) &&
    6490  ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0))
    6491  {
    6492  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid.");
    6493  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6494  }
    6495 
    6496  if(createInfo.pool != VK_NULL_HANDLE)
    6497  {
    6498  return createInfo.pool->m_BlockVector.Allocate(
    6499  createInfo.pool,
    6500  m_CurrentFrameIndex.load(),
    6501  vkMemReq,
    6502  createInfo,
    6503  suballocType,
    6504  pAllocation);
    6505  }
    6506  else
    6507  {
    6508  // Bit mask of memory Vulkan types acceptable for this allocation.
    6509  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6510  uint32_t memTypeIndex = UINT32_MAX;
    6511  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6512  if(res == VK_SUCCESS)
    6513  {
    6514  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
    6515  // Succeeded on first try.
    6516  if(res == VK_SUCCESS)
    6517  {
    6518  return res;
    6519  }
    6520  // Allocation from this memory type failed. Try other compatible memory types.
    6521  else
    6522  {
    6523  for(;;)
    6524  {
    6525  // Remove old memTypeIndex from list of possibilities.
    6526  memoryTypeBits &= ~(1u << memTypeIndex);
    6527  // Find alternative memTypeIndex.
    6528  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6529  if(res == VK_SUCCESS)
    6530  {
    6531  res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation);
    6532  // Allocation from this alternative memory type succeeded.
    6533  if(res == VK_SUCCESS)
    6534  {
    6535  return res;
    6536  }
    6537  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6538  }
    6539  // No other matching memory type index could be found.
    6540  else
    6541  {
    6542  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6543  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6544  }
    6545  }
    6546  }
    6547  }
    6548  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6549  else
    6550  return res;
    6551  }
    6552 }
    6553 
    6554 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6555 {
    6556  VMA_ASSERT(allocation);
    6557 
    6558  if(allocation->CanBecomeLost() == false ||
    6559  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6560  {
    6561  switch(allocation->GetType())
    6562  {
    6563  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6564  {
    6565  VmaBlockVector* pBlockVector = VMA_NULL;
    6566  VmaPool hPool = allocation->GetPool();
    6567  if(hPool != VK_NULL_HANDLE)
    6568  {
    6569  pBlockVector = &hPool->m_BlockVector;
    6570  }
    6571  else
    6572  {
    6573  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6574  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6575  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6576  }
    6577  pBlockVector->Free(allocation);
    6578  }
    6579  break;
    6580  case VmaAllocation_T::ALLOCATION_TYPE_OWN:
    6581  FreeOwnMemory(allocation);
    6582  break;
    6583  default:
    6584  VMA_ASSERT(0);
    6585  }
    6586  }
    6587 
    6588  vma_delete(this, allocation);
    6589 }
    6590 
    6591 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6592 {
    6593  // Initialize.
    6594  InitStatInfo(pStats->total);
    6595  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6596  InitStatInfo(pStats->memoryType[i]);
    6597  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6598  InitStatInfo(pStats->memoryHeap[i]);
    6599 
    6600  // Process default pools.
    6601  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6602  {
    6603  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6604  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6605  {
    6606  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6607  VMA_ASSERT(pBlockVector);
    6608  pBlockVector->AddStats(pStats);
    6609  }
    6610  }
    6611 
    6612  // Process custom pools.
    6613  {
    6614  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6615  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6616  {
    6617  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6618  }
    6619  }
    6620 
    6621  // Process own allocations.
    6622  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6623  {
    6624  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6625  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    6626  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6627  {
    6628  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
    6629  VMA_ASSERT(pOwnAllocVector);
    6630  for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6631  {
    6632  VmaStatInfo allocationStatInfo;
    6633  (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo);
    6634  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6635  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6636  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6637  }
    6638  }
    6639  }
    6640 
    6641  // Postprocess.
    6642  VmaPostprocessCalcStatInfo(pStats->total);
    6643  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6644  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6645  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6646  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6647 }
    6648 
    6649 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6650 
    6651 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6652 {
    6653  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6654  {
    6655  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6656  {
    6657  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6658  {
    6659  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6660  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6661  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6662  {
    6663  // Process OwnAllocations.
    6664  {
    6665  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    6666  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6667  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
    6668  {
    6669  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
    6670  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this);
    6671  }
    6672  }
    6673 
    6674  // Process normal Allocations.
    6675  {
    6676  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6677  pBlockVector->UnmapPersistentlyMappedMemory();
    6678  }
    6679  }
    6680  }
    6681 
    6682  // Process custom pools.
    6683  {
    6684  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6685  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6686  {
    6687  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6688  }
    6689  }
    6690  }
    6691  }
    6692 }
    6693 
    6694 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6695 {
    6696  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6697  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6698  {
    6699  VkResult finalResult = VK_SUCCESS;
    6700  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6701  {
    6702  // Process custom pools.
    6703  {
    6704  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6705  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6706  {
    6707  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6708  }
    6709  }
    6710 
    6711  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6712  {
    6713  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6714  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6715  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6716  {
    6717  // Process OwnAllocations.
    6718  {
    6719  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    6720  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6721  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
    6722  {
    6723  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
    6724  hAlloc->OwnAllocMapPersistentlyMappedMemory(this);
    6725  }
    6726  }
    6727 
    6728  // Process normal Allocations.
    6729  {
    6730  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6731  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    6732  if(localResult != VK_SUCCESS)
    6733  {
    6734  finalResult = localResult;
    6735  }
    6736  }
    6737  }
    6738  }
    6739  }
    6740  return finalResult;
    6741  }
    6742  else
    6743  return VK_SUCCESS;
    6744 }
    6745 
    6746 VkResult VmaAllocator_T::Defragment(
    6747  VmaAllocation* pAllocations,
    6748  size_t allocationCount,
    6749  VkBool32* pAllocationsChanged,
    6750  const VmaDefragmentationInfo* pDefragmentationInfo,
    6751  VmaDefragmentationStats* pDefragmentationStats)
    6752 {
    6753  if(pAllocationsChanged != VMA_NULL)
    6754  {
    6755  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    6756  }
    6757  if(pDefragmentationStats != VMA_NULL)
    6758  {
    6759  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    6760  }
    6761 
    6762  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    6763  {
    6764  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    6765  return VK_ERROR_MEMORY_MAP_FAILED;
    6766  }
    6767 
    6768  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    6769 
    6770  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    6771 
    6772  const size_t poolCount = m_Pools.size();
    6773 
    6774  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    6775  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    6776  {
    6777  VmaAllocation hAlloc = pAllocations[allocIndex];
    6778  VMA_ASSERT(hAlloc);
    6779  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    6780  // OwnAlloc cannot be defragmented.
    6781  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    6782  // Only HOST_VISIBLE memory types can be defragmented.
    6783  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    6784  // Lost allocation cannot be defragmented.
    6785  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    6786  {
    6787  VmaBlockVector* pAllocBlockVector = nullptr;
    6788 
    6789  const VmaPool hAllocPool = hAlloc->GetPool();
    6790  // This allocation belongs to custom pool.
    6791  if(hAllocPool != VK_NULL_HANDLE)
    6792  {
    6793  pAllocBlockVector = &hAllocPool->GetBlockVector();
    6794  }
    6795  // This allocation belongs to general pool.
    6796  else
    6797  {
    6798  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    6799  }
    6800 
    6801  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    6802 
    6803  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    6804  &pAllocationsChanged[allocIndex] : VMA_NULL;
    6805  pDefragmentator->AddAllocation(hAlloc, pChanged);
    6806  }
    6807  }
    6808 
    6809  VkResult result = VK_SUCCESS;
    6810 
    6811  // ======== Main processing.
    6812 
    6813  VkDeviceSize maxBytesToMove = SIZE_MAX;
    6814  uint32_t maxAllocationsToMove = UINT32_MAX;
    6815  if(pDefragmentationInfo != VMA_NULL)
    6816  {
    6817  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    6818  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    6819  }
    6820 
    6821  // Process standard memory.
    6822  for(uint32_t memTypeIndex = 0;
    6823  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    6824  ++memTypeIndex)
    6825  {
    6826  // Only HOST_VISIBLE memory types can be defragmented.
    6827  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    6828  {
    6829  for(uint32_t blockVectorType = 0;
    6830  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    6831  ++blockVectorType)
    6832  {
    6833  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    6834  pDefragmentationStats,
    6835  maxBytesToMove,
    6836  maxAllocationsToMove);
    6837  }
    6838  }
    6839  }
    6840 
    6841  // Process custom pools.
    6842  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    6843  {
    6844  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    6845  pDefragmentationStats,
    6846  maxBytesToMove,
    6847  maxAllocationsToMove);
    6848  }
    6849 
    6850  // ======== Destroy defragmentators.
    6851 
    6852  // Process custom pools.
    6853  for(size_t poolIndex = poolCount; poolIndex--; )
    6854  {
    6855  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    6856  }
    6857 
    6858  // Process standard memory.
    6859  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    6860  {
    6861  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    6862  {
    6863  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    6864  {
    6865  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    6866  }
    6867  }
    6868  }
    6869 
    6870  return result;
    6871 }
    6872 
    6873 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    6874 {
    6875  if(hAllocation->CanBecomeLost())
    6876  {
    6877  /*
    6878  Warning: This is a carefully designed algorithm.
    6879  Do not modify unless you really know what you're doing :)
    6880  */
    6881  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    6882  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    6883  for(;;)
    6884  {
    6885  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6886  {
    6887  pAllocationInfo->memoryType = UINT32_MAX;
    6888  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    6889  pAllocationInfo->offset = 0;
    6890  pAllocationInfo->size = hAllocation->GetSize();
    6891  pAllocationInfo->pMappedData = VMA_NULL;
    6892  pAllocationInfo->pUserData = hAllocation->GetUserData();
    6893  return;
    6894  }
    6895  else if(localLastUseFrameIndex == localCurrFrameIndex)
    6896  {
    6897  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    6898  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    6899  pAllocationInfo->offset = hAllocation->GetOffset();
    6900  pAllocationInfo->size = hAllocation->GetSize();
    6901  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    6902  pAllocationInfo->pUserData = hAllocation->GetUserData();
    6903  return;
    6904  }
    6905  else // Last use time earlier than current time.
    6906  {
    6907  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    6908  {
    6909  localLastUseFrameIndex = localCurrFrameIndex;
    6910  }
    6911  }
    6912  }
    6913  }
    6914  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    6915  else
    6916  {
    6917  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    6918  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    6919  pAllocationInfo->offset = hAllocation->GetOffset();
    6920  pAllocationInfo->size = hAllocation->GetSize();
    6921  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    6922  pAllocationInfo->pUserData = hAllocation->GetUserData();
    6923  }
    6924 }
    6925 
    6926 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    6927 {
    6928  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    6929 
    6930  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    6931 
    6932  if(newCreateInfo.maxBlockCount == 0)
    6933  {
    6934  newCreateInfo.maxBlockCount = SIZE_MAX;
    6935  }
    6936  if(newCreateInfo.blockSize == 0)
    6937  {
    6938  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    6939  }
    6940 
    6941  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    6942 
    6943  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    6944  if(res != VK_SUCCESS)
    6945  {
    6946  vma_delete(this, *pPool);
    6947  *pPool = VMA_NULL;
    6948  return res;
    6949  }
    6950 
    6951  // Add to m_Pools.
    6952  {
    6953  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6954  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    6955  }
    6956 
    6957  return VK_SUCCESS;
    6958 }
    6959 
    6960 void VmaAllocator_T::DestroyPool(VmaPool pool)
    6961 {
    6962  // Remove from m_Pools.
    6963  {
    6964  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6965  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    6966  VMA_ASSERT(success && "Pool not found in Allocator.");
    6967  }
    6968 
    6969  vma_delete(this, pool);
    6970 }
    6971 
    6972 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    6973 {
    6974  pool->m_BlockVector.GetPoolStats(pPoolStats);
    6975 }
    6976 
    6977 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    6978 {
    6979  m_CurrentFrameIndex.store(frameIndex);
    6980 }
    6981 
    6982 void VmaAllocator_T::MakePoolAllocationsLost(
    6983  VmaPool hPool,
    6984  size_t* pLostAllocationCount)
    6985 {
    6986  hPool->m_BlockVector.MakePoolAllocationsLost(
    6987  m_CurrentFrameIndex.load(),
    6988  pLostAllocationCount);
    6989 }
    6990 
    6991 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    6992 {
    6993  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    6994  (*pAllocation)->InitLost();
    6995 }
    6996 
    6997 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    6998 {
    6999  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7000 
    7001  VkResult res;
    7002  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7003  {
    7004  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7005  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7006  {
    7007  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7008  if(res == VK_SUCCESS)
    7009  {
    7010  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7011  }
    7012  }
    7013  else
    7014  {
    7015  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7016  }
    7017  }
    7018  else
    7019  {
    7020  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7021  }
    7022 
    7023  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7024  {
    7025  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7026  }
    7027 
    7028  return res;
    7029 }
    7030 
    7031 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7032 {
    7033  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7034  {
    7035  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7036  }
    7037 
    7038  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7039 
    7040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7041  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7042  {
    7043  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7044  m_HeapSizeLimit[heapIndex] += size;
    7045  }
    7046 }
    7047 
    7048 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
    7049 {
    7050  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
    7051 
    7052  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7053  {
    7054  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    7055  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7056  VMA_ASSERT(pOwnAllocations);
    7057  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pOwnAllocations, allocation);
    7058  VMA_ASSERT(success);
    7059  }
    7060 
    7061  VkDeviceMemory hMemory = allocation->GetMemory();
    7062 
    7063  if(allocation->GetMappedData() != VMA_NULL)
    7064  {
    7065  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7066  }
    7067 
    7068  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7069 
    7070  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
    7071 }
    7072 
    7073 #if VMA_STATS_STRING_ENABLED
    7074 
    7075 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7076 {
    7077  bool ownAllocationsStarted = false;
    7078  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7079  {
    7080  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
    7081  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7082  {
    7083  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
    7084  VMA_ASSERT(pOwnAllocVector);
    7085  if(pOwnAllocVector->empty() == false)
    7086  {
    7087  if(ownAllocationsStarted == false)
    7088  {
    7089  ownAllocationsStarted = true;
    7090  json.WriteString("OwnAllocations");
    7091  json.BeginObject();
    7092  }
    7093 
    7094  json.BeginString("Type ");
    7095  json.ContinueString(memTypeIndex);
    7096  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7097  {
    7098  json.ContinueString(" Mapped");
    7099  }
    7100  json.EndString();
    7101 
    7102  json.BeginArray();
    7103 
    7104  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
    7105  {
    7106  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
    7107  json.BeginObject(true);
    7108 
    7109  json.WriteString("Size");
    7110  json.WriteNumber(hAlloc->GetSize());
    7111 
    7112  json.WriteString("Type");
    7113  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7114 
    7115  json.EndObject();
    7116  }
    7117 
    7118  json.EndArray();
    7119  }
    7120  }
    7121  }
    7122  if(ownAllocationsStarted)
    7123  {
    7124  json.EndObject();
    7125  }
    7126 
    7127  {
    7128  bool allocationsStarted = false;
    7129  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7130  {
    7131  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7132  {
    7133  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7134  {
    7135  if(allocationsStarted == false)
    7136  {
    7137  allocationsStarted = true;
    7138  json.WriteString("DefaultPools");
    7139  json.BeginObject();
    7140  }
    7141 
    7142  json.BeginString("Type ");
    7143  json.ContinueString(memTypeIndex);
    7144  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7145  {
    7146  json.ContinueString(" Mapped");
    7147  }
    7148  json.EndString();
    7149 
    7150  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7151  }
    7152  }
    7153  }
    7154  if(allocationsStarted)
    7155  {
    7156  json.EndObject();
    7157  }
    7158  }
    7159 
    7160  {
    7161  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7162  const size_t poolCount = m_Pools.size();
    7163  if(poolCount > 0)
    7164  {
    7165  json.WriteString("Pools");
    7166  json.BeginArray();
    7167  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7168  {
    7169  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7170  }
    7171  json.EndArray();
    7172  }
    7173  }
    7174 }
    7175 
    7176 #endif // #if VMA_STATS_STRING_ENABLED
    7177 
    7178 static VkResult AllocateMemoryForImage(
    7179  VmaAllocator allocator,
    7180  VkImage image,
    7181  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7182  VmaSuballocationType suballocType,
    7183  VmaAllocation* pAllocation)
    7184 {
    7185  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7186 
    7187  VkMemoryRequirements vkMemReq = {};
    7188  (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq);
    7189 
    7190  return allocator->AllocateMemory(
    7191  vkMemReq,
    7192  *pAllocationCreateInfo,
    7193  suballocType,
    7194  pAllocation);
    7195 }
    7196 
    7198 // Public interface
    7199 
    7200 VkResult vmaCreateAllocator(
    7201  const VmaAllocatorCreateInfo* pCreateInfo,
    7202  VmaAllocator* pAllocator)
    7203 {
    7204  VMA_ASSERT(pCreateInfo && pAllocator);
    7205  VMA_DEBUG_LOG("vmaCreateAllocator");
    7206  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7207  return VK_SUCCESS;
    7208 }
    7209 
    7210 void vmaDestroyAllocator(
    7211  VmaAllocator allocator)
    7212 {
    7213  if(allocator != VK_NULL_HANDLE)
    7214  {
    7215  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7216  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7217  vma_delete(&allocationCallbacks, allocator);
    7218  }
    7219 }
    7220 
    7222  VmaAllocator allocator,
    7223  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7224 {
    7225  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7226  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7227 }
    7228 
    7230  VmaAllocator allocator,
    7231  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7232 {
    7233  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7234  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7235 }
    7236 
    7238  VmaAllocator allocator,
    7239  uint32_t memoryTypeIndex,
    7240  VkMemoryPropertyFlags* pFlags)
    7241 {
    7242  VMA_ASSERT(allocator && pFlags);
    7243  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7244  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7245 }
    7246 
    7248  VmaAllocator allocator,
    7249  uint32_t frameIndex)
    7250 {
    7251  VMA_ASSERT(allocator);
    7252  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7253 
    7254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7255 
    7256  allocator->SetCurrentFrameIndex(frameIndex);
    7257 }
    7258 
    7259 void vmaCalculateStats(
    7260  VmaAllocator allocator,
    7261  VmaStats* pStats)
    7262 {
    7263  VMA_ASSERT(allocator && pStats);
    7264  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7265  allocator->CalculateStats(pStats);
    7266 }
    7267 
    7268 #if VMA_STATS_STRING_ENABLED
    7269 
    7270 void vmaBuildStatsString(
    7271  VmaAllocator allocator,
    7272  char** ppStatsString,
    7273  VkBool32 detailedMap)
    7274 {
    7275  VMA_ASSERT(allocator && ppStatsString);
    7276  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7277 
    7278  VmaStringBuilder sb(allocator);
    7279  {
    7280  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7281  json.BeginObject();
    7282 
    7283  VmaStats stats;
    7284  allocator->CalculateStats(&stats);
    7285 
    7286  json.WriteString("Total");
    7287  VmaPrintStatInfo(json, stats.total);
    7288 
    7289  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7290  {
    7291  json.BeginString("Heap ");
    7292  json.ContinueString(heapIndex);
    7293  json.EndString();
    7294  json.BeginObject();
    7295 
    7296  json.WriteString("Size");
    7297  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7298 
    7299  json.WriteString("Flags");
    7300  json.BeginArray(true);
    7301  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7302  {
    7303  json.WriteString("DEVICE_LOCAL");
    7304  }
    7305  json.EndArray();
    7306 
    7307  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7308  {
    7309  json.WriteString("Stats");
    7310  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7311  }
    7312 
    7313  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7314  {
    7315  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7316  {
    7317  json.BeginString("Type ");
    7318  json.ContinueString(typeIndex);
    7319  json.EndString();
    7320 
    7321  json.BeginObject();
    7322 
    7323  json.WriteString("Flags");
    7324  json.BeginArray(true);
    7325  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7326  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7327  {
    7328  json.WriteString("DEVICE_LOCAL");
    7329  }
    7330  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7331  {
    7332  json.WriteString("HOST_VISIBLE");
    7333  }
    7334  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7335  {
    7336  json.WriteString("HOST_COHERENT");
    7337  }
    7338  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7339  {
    7340  json.WriteString("HOST_CACHED");
    7341  }
    7342  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7343  {
    7344  json.WriteString("LAZILY_ALLOCATED");
    7345  }
    7346  json.EndArray();
    7347 
    7348  if(stats.memoryType[typeIndex].blockCount > 0)
    7349  {
    7350  json.WriteString("Stats");
    7351  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7352  }
    7353 
    7354  json.EndObject();
    7355  }
    7356  }
    7357 
    7358  json.EndObject();
    7359  }
    7360  if(detailedMap == VK_TRUE)
    7361  {
    7362  allocator->PrintDetailedMap(json);
    7363  }
    7364 
    7365  json.EndObject();
    7366  }
    7367 
    7368  const size_t len = sb.GetLength();
    7369  char* const pChars = vma_new_array(allocator, char, len + 1);
    7370  if(len > 0)
    7371  {
    7372  memcpy(pChars, sb.GetData(), len);
    7373  }
    7374  pChars[len] = '\0';
    7375  *ppStatsString = pChars;
    7376 }
    7377 
    7378 void vmaFreeStatsString(
    7379  VmaAllocator allocator,
    7380  char* pStatsString)
    7381 {
    7382  if(pStatsString != VMA_NULL)
    7383  {
    7384  VMA_ASSERT(allocator);
    7385  size_t len = strlen(pStatsString);
    7386  vma_delete_array(allocator, pStatsString, len + 1);
    7387  }
    7388 }
    7389 
    7390 #endif // #if VMA_STATS_STRING_ENABLED
    7391 
    7394 VkResult vmaFindMemoryTypeIndex(
    7395  VmaAllocator allocator,
    7396  uint32_t memoryTypeBits,
    7397  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7398  uint32_t* pMemoryTypeIndex)
    7399 {
    7400  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7401  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7402  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7403 
    7404  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7405  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7406  if(preferredFlags == 0)
    7407  {
    7408  preferredFlags = requiredFlags;
    7409  }
    7410  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7411  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7412 
    7413  // Convert usage to requiredFlags and preferredFlags.
    7414  switch(pAllocationCreateInfo->usage)
    7415  {
    7417  break;
    7419  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7420  break;
    7422  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7423  break;
    7425  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7426  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7427  break;
    7429  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7430  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7431  break;
    7432  default:
    7433  break;
    7434  }
    7435 
    7436  *pMemoryTypeIndex = UINT32_MAX;
    7437  uint32_t minCost = UINT32_MAX;
    7438  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7439  memTypeIndex < allocator->GetMemoryTypeCount();
    7440  ++memTypeIndex, memTypeBit <<= 1)
    7441  {
    7442  // This memory type is acceptable according to memoryTypeBits bitmask.
    7443  if((memTypeBit & memoryTypeBits) != 0)
    7444  {
    7445  const VkMemoryPropertyFlags currFlags =
    7446  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7447  // This memory type contains requiredFlags.
    7448  if((requiredFlags & ~currFlags) == 0)
    7449  {
    7450  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7451  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7452  // Remember memory type with lowest cost.
    7453  if(currCost < minCost)
    7454  {
    7455  *pMemoryTypeIndex = memTypeIndex;
    7456  if(currCost == 0)
    7457  {
    7458  return VK_SUCCESS;
    7459  }
    7460  minCost = currCost;
    7461  }
    7462  }
    7463  }
    7464  }
    7465  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7466 }
    7467 
    7468 VkResult vmaCreatePool(
    7469  VmaAllocator allocator,
    7470  const VmaPoolCreateInfo* pCreateInfo,
    7471  VmaPool* pPool)
    7472 {
    7473  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7474 
    7475  VMA_DEBUG_LOG("vmaCreatePool");
    7476 
    7477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7478 
    7479  return allocator->CreatePool(pCreateInfo, pPool);
    7480 }
    7481 
    7482 void vmaDestroyPool(
    7483  VmaAllocator allocator,
    7484  VmaPool pool)
    7485 {
    7486  VMA_ASSERT(allocator && pool);
    7487 
    7488  VMA_DEBUG_LOG("vmaDestroyPool");
    7489 
    7490  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7491 
    7492  allocator->DestroyPool(pool);
    7493 }
    7494 
    7495 void vmaGetPoolStats(
    7496  VmaAllocator allocator,
    7497  VmaPool pool,
    7498  VmaPoolStats* pPoolStats)
    7499 {
    7500  VMA_ASSERT(allocator && pool && pPoolStats);
    7501 
    7502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7503 
    7504  allocator->GetPoolStats(pool, pPoolStats);
    7505 }
    7506 
    7508  VmaAllocator allocator,
    7509  VmaPool pool,
    7510  size_t* pLostAllocationCount)
    7511 {
    7512  VMA_ASSERT(allocator && pool);
    7513 
    7514  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7515 
    7516  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7517 }
    7518 
    7519 VkResult vmaAllocateMemory(
    7520  VmaAllocator allocator,
    7521  const VkMemoryRequirements* pVkMemoryRequirements,
    7522  const VmaAllocationCreateInfo* pCreateInfo,
    7523  VmaAllocation* pAllocation,
    7524  VmaAllocationInfo* pAllocationInfo)
    7525 {
    7526  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7527 
    7528  VMA_DEBUG_LOG("vmaAllocateMemory");
    7529 
    7530  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7531 
    7532  VkResult result = allocator->AllocateMemory(
    7533  *pVkMemoryRequirements,
    7534  *pCreateInfo,
    7535  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7536  pAllocation);
    7537 
    7538  if(pAllocationInfo && result == VK_SUCCESS)
    7539  {
    7540  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7541  }
    7542 
    7543  return result;
    7544 }
    7545 
    7547  VmaAllocator allocator,
    7548  VkBuffer buffer,
    7549  const VmaAllocationCreateInfo* pCreateInfo,
    7550  VmaAllocation* pAllocation,
    7551  VmaAllocationInfo* pAllocationInfo)
    7552 {
    7553  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7554 
    7555  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7556 
    7557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7558 
    7559  VkMemoryRequirements vkMemReq = {};
    7560  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq);
    7561 
    7562  VkResult result = allocator->AllocateMemory(
    7563  vkMemReq,
    7564  *pCreateInfo,
    7565  VMA_SUBALLOCATION_TYPE_BUFFER,
    7566  pAllocation);
    7567 
    7568  if(pAllocationInfo && result == VK_SUCCESS)
    7569  {
    7570  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7571  }
    7572 
    7573  return result;
    7574 }
    7575 
    7576 VkResult vmaAllocateMemoryForImage(
    7577  VmaAllocator allocator,
    7578  VkImage image,
    7579  const VmaAllocationCreateInfo* pCreateInfo,
    7580  VmaAllocation* pAllocation,
    7581  VmaAllocationInfo* pAllocationInfo)
    7582 {
    7583  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7584 
    7585  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7586 
    7587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7588 
    7589  VkResult result = AllocateMemoryForImage(
    7590  allocator,
    7591  image,
    7592  pCreateInfo,
    7593  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7594  pAllocation);
    7595 
    7596  if(pAllocationInfo && result == VK_SUCCESS)
    7597  {
    7598  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7599  }
    7600 
    7601  return result;
    7602 }
    7603 
    7604 void vmaFreeMemory(
    7605  VmaAllocator allocator,
    7606  VmaAllocation allocation)
    7607 {
    7608  VMA_ASSERT(allocator && allocation);
    7609 
    7610  VMA_DEBUG_LOG("vmaFreeMemory");
    7611 
    7612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7613 
    7614  allocator->FreeMemory(allocation);
    7615 }
    7616 
    7618  VmaAllocator allocator,
    7619  VmaAllocation allocation,
    7620  VmaAllocationInfo* pAllocationInfo)
    7621 {
    7622  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7623 
    7624  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7625 
    7626  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7627 }
    7628 
    7630  VmaAllocator allocator,
    7631  VmaAllocation allocation,
    7632  void* pUserData)
    7633 {
    7634  VMA_ASSERT(allocator && allocation);
    7635 
    7636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7637 
    7638  allocation->SetUserData(pUserData);
    7639 }
    7640 
    7642  VmaAllocator allocator,
    7643  VmaAllocation* pAllocation)
    7644 {
    7645  VMA_ASSERT(allocator && pAllocation);
    7646 
    7647  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7648 
    7649  allocator->CreateLostAllocation(pAllocation);
    7650 }
    7651 
    7652 VkResult vmaMapMemory(
    7653  VmaAllocator allocator,
    7654  VmaAllocation allocation,
    7655  void** ppData)
    7656 {
    7657  VMA_ASSERT(allocator && allocation && ppData);
    7658 
    7659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7660 
    7661  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7662  allocator->m_hDevice,
    7663  allocation->GetMemory(),
    7664  allocation->GetOffset(),
    7665  allocation->GetSize(),
    7666  0,
    7667  ppData);
    7668 }
    7669 
    7670 void vmaUnmapMemory(
    7671  VmaAllocator allocator,
    7672  VmaAllocation allocation)
    7673 {
    7674  VMA_ASSERT(allocator && allocation);
    7675 
    7676  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7677 
    7678  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7679 }
    7680 
    7681 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7682 {
    7683  VMA_ASSERT(allocator);
    7684 
    7685  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7686 
    7687  allocator->UnmapPersistentlyMappedMemory();
    7688 }
    7689 
    7690 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7691 {
    7692  VMA_ASSERT(allocator);
    7693 
    7694  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7695 
    7696  return allocator->MapPersistentlyMappedMemory();
    7697 }
    7698 
    7699 VkResult vmaDefragment(
    7700  VmaAllocator allocator,
    7701  VmaAllocation* pAllocations,
    7702  size_t allocationCount,
    7703  VkBool32* pAllocationsChanged,
    7704  const VmaDefragmentationInfo *pDefragmentationInfo,
    7705  VmaDefragmentationStats* pDefragmentationStats)
    7706 {
    7707  VMA_ASSERT(allocator && pAllocations);
    7708 
    7709  VMA_DEBUG_LOG("vmaDefragment");
    7710 
    7711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7712 
    7713  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    7714 }
    7715 
    7716 VkResult vmaCreateBuffer(
    7717  VmaAllocator allocator,
    7718  const VkBufferCreateInfo* pBufferCreateInfo,
    7719  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7720  VkBuffer* pBuffer,
    7721  VmaAllocation* pAllocation,
    7722  VmaAllocationInfo* pAllocationInfo)
    7723 {
    7724  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    7725 
    7726  VMA_DEBUG_LOG("vmaCreateBuffer");
    7727 
    7728  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7729 
    7730  *pBuffer = VK_NULL_HANDLE;
    7731  *pAllocation = VK_NULL_HANDLE;
    7732 
    7733  // 1. Create VkBuffer.
    7734  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    7735  allocator->m_hDevice,
    7736  pBufferCreateInfo,
    7737  allocator->GetAllocationCallbacks(),
    7738  pBuffer);
    7739  if(res >= 0)
    7740  {
    7741  // 2. vkGetBufferMemoryRequirements.
    7742  VkMemoryRequirements vkMemReq = {};
    7743  (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq);
    7744 
    7745  // 3. Allocate memory using allocator.
    7746  res = allocator->AllocateMemory(
    7747  vkMemReq,
    7748  *pAllocationCreateInfo,
    7749  VMA_SUBALLOCATION_TYPE_BUFFER,
    7750  pAllocation);
    7751  if(res >= 0)
    7752  {
    7753  // 3. Bind buffer with memory.
    7754  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    7755  allocator->m_hDevice,
    7756  *pBuffer,
    7757  (*pAllocation)->GetMemory(),
    7758  (*pAllocation)->GetOffset());
    7759  if(res >= 0)
    7760  {
    7761  // All steps succeeded.
    7762  if(pAllocationInfo != VMA_NULL)
    7763  {
    7764  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7765  }
    7766  return VK_SUCCESS;
    7767  }
    7768  allocator->FreeMemory(*pAllocation);
    7769  *pAllocation = VK_NULL_HANDLE;
    7770  return res;
    7771  }
    7772  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    7773  *pBuffer = VK_NULL_HANDLE;
    7774  return res;
    7775  }
    7776  return res;
    7777 }
    7778 
    7779 void vmaDestroyBuffer(
    7780  VmaAllocator allocator,
    7781  VkBuffer buffer,
    7782  VmaAllocation allocation)
    7783 {
    7784  if(buffer != VK_NULL_HANDLE)
    7785  {
    7786  VMA_ASSERT(allocator);
    7787 
    7788  VMA_DEBUG_LOG("vmaDestroyBuffer");
    7789 
    7790  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7791 
    7792  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    7793 
    7794  allocator->FreeMemory(allocation);
    7795  }
    7796 }
    7797 
    7798 VkResult vmaCreateImage(
    7799  VmaAllocator allocator,
    7800  const VkImageCreateInfo* pImageCreateInfo,
    7801  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7802  VkImage* pImage,
    7803  VmaAllocation* pAllocation,
    7804  VmaAllocationInfo* pAllocationInfo)
    7805 {
    7806  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    7807 
    7808  VMA_DEBUG_LOG("vmaCreateImage");
    7809 
    7810  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7811 
    7812  *pImage = VK_NULL_HANDLE;
    7813  *pAllocation = VK_NULL_HANDLE;
    7814 
    7815  // 1. Create VkImage.
    7816  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    7817  allocator->m_hDevice,
    7818  pImageCreateInfo,
    7819  allocator->GetAllocationCallbacks(),
    7820  pImage);
    7821  if(res >= 0)
    7822  {
    7823  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    7824  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    7825  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    7826 
    7827  // 2. Allocate memory using allocator.
    7828  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    7829  if(res >= 0)
    7830  {
    7831  // 3. Bind image with memory.
    7832  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    7833  allocator->m_hDevice,
    7834  *pImage,
    7835  (*pAllocation)->GetMemory(),
    7836  (*pAllocation)->GetOffset());
    7837  if(res >= 0)
    7838  {
    7839  // All steps succeeded.
    7840  if(pAllocationInfo != VMA_NULL)
    7841  {
    7842  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7843  }
    7844  return VK_SUCCESS;
    7845  }
    7846  allocator->FreeMemory(*pAllocation);
    7847  *pAllocation = VK_NULL_HANDLE;
    7848  return res;
    7849  }
    7850  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    7851  *pImage = VK_NULL_HANDLE;
    7852  return res;
    7853  }
    7854  return res;
    7855 }
    7856 
    7857 void vmaDestroyImage(
    7858  VmaAllocator allocator,
    7859  VkImage image,
    7860  VmaAllocation allocation)
    7861 {
    7862  if(image != VK_NULL_HANDLE)
    7863  {
    7864  VMA_ASSERT(allocator);
    7865 
    7866  VMA_DEBUG_LOG("vmaDestroyImage");
    7867 
    7868  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7869 
    7870  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    7871 
    7872  allocator->FreeMemory(allocation);
    7873  }
    7874 }
    7875 
    7876 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:446
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:469
    -
    Definition: vk_mem_alloc.h:806
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    393 #include <vulkan/vulkan.h>
    394 
    396 
    400 VK_DEFINE_HANDLE(VmaAllocator)
    401 
    402 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    404  VmaAllocator allocator,
    405  uint32_t memoryType,
    406  VkDeviceMemory memory,
    407  VkDeviceSize size);
    409 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    410  VmaAllocator allocator,
    411  uint32_t memoryType,
    412  VkDeviceMemory memory,
    413  VkDeviceSize size);
    414 
    422 typedef struct VmaDeviceMemoryCallbacks {
    428 
    430 typedef enum VmaAllocatorFlagBits {
    464 
    467 typedef VkFlags VmaAllocatorFlags;
    468 
    473 typedef struct VmaVulkanFunctions {
    474  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    475  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    476  PFN_vkAllocateMemory vkAllocateMemory;
    477  PFN_vkFreeMemory vkFreeMemory;
    478  PFN_vkMapMemory vkMapMemory;
    479  PFN_vkUnmapMemory vkUnmapMemory;
    480  PFN_vkBindBufferMemory vkBindBufferMemory;
    481  PFN_vkBindImageMemory vkBindImageMemory;
    482  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    483  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    484  PFN_vkCreateBuffer vkCreateBuffer;
    485  PFN_vkDestroyBuffer vkDestroyBuffer;
    486  PFN_vkCreateImage vkCreateImage;
    487  PFN_vkDestroyImage vkDestroyImage;
    488  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    489  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    491 
    494 {
    496  VmaAllocatorFlags flags;
    498 
    499  VkPhysicalDevice physicalDevice;
    501 
    502  VkDevice device;
    504 
    507 
    510 
    511  const VkAllocationCallbacks* pAllocationCallbacks;
    513 
    528  uint32_t frameInUseCount;
    546  const VkDeviceSize* pHeapSizeLimit;
    560 
    562 VkResult vmaCreateAllocator(
    563  const VmaAllocatorCreateInfo* pCreateInfo,
    564  VmaAllocator* pAllocator);
    565 
    568  VmaAllocator allocator);
    569 
    575  VmaAllocator allocator,
    576  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    577 
    583  VmaAllocator allocator,
    584  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    585 
    593  VmaAllocator allocator,
    594  uint32_t memoryTypeIndex,
    595  VkMemoryPropertyFlags* pFlags);
    596 
    606  VmaAllocator allocator,
    607  uint32_t frameIndex);
    608 
    611 typedef struct VmaStatInfo
    612 {
    614  uint32_t blockCount;
    616  uint32_t allocationCount;
    620  VkDeviceSize usedBytes;
    622  VkDeviceSize unusedBytes;
    623  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    624  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    625 } VmaStatInfo;
    626 
    628 typedef struct VmaStats
    629 {
    630  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    631  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    633 } VmaStats;
    634 
    636 void vmaCalculateStats(
    637  VmaAllocator allocator,
    638  VmaStats* pStats);
    639 
    640 #define VMA_STATS_STRING_ENABLED 1
    641 
    642 #if VMA_STATS_STRING_ENABLED
    643 
    645 
    648  VmaAllocator allocator,
    649  char** ppStatsString,
    650  VkBool32 detailedMap);
    651 
    652 void vmaFreeStatsString(
    653  VmaAllocator allocator,
    654  char* pStatsString);
    655 
    656 #endif // #if VMA_STATS_STRING_ENABLED
    657 
    660 
    665 VK_DEFINE_HANDLE(VmaPool)
    666 
    667 typedef enum VmaMemoryUsage
    668 {
    674 
    677 
    680 
    684 
    699 
    744 
    747 typedef VkFlags VmaAllocationCreateFlags;
    748 
    750 {
    752  VmaAllocationCreateFlags flags;
    763  VkMemoryPropertyFlags requiredFlags;
    769  VkMemoryPropertyFlags preferredFlags;
    771  void* pUserData;
    776  VmaPool pool;
    778 
    793 VkResult vmaFindMemoryTypeIndex(
    794  VmaAllocator allocator,
    795  uint32_t memoryTypeBits,
    796  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    797  uint32_t* pMemoryTypeIndex);
    798 
    801 
    806 typedef enum VmaPoolCreateFlagBits {
    835 
    838 typedef VkFlags VmaPoolCreateFlags;
    839 
    842 typedef struct VmaPoolCreateInfo {
    845  uint32_t memoryTypeIndex;
    848  VmaPoolCreateFlags flags;
    853  VkDeviceSize blockSize;
    880  uint32_t frameInUseCount;
    882 
    885 typedef struct VmaPoolStats {
    888  VkDeviceSize size;
    891  VkDeviceSize unusedSize;
    904  VkDeviceSize unusedRangeSizeMax;
    905 } VmaPoolStats;
    906 
    913 VkResult vmaCreatePool(
    914  VmaAllocator allocator,
    915  const VmaPoolCreateInfo* pCreateInfo,
    916  VmaPool* pPool);
    917 
    920 void vmaDestroyPool(
    921  VmaAllocator allocator,
    922  VmaPool pool);
    923 
    930 void vmaGetPoolStats(
    931  VmaAllocator allocator,
    932  VmaPool pool,
    933  VmaPoolStats* pPoolStats);
    934 
    942  VmaAllocator allocator,
    943  VmaPool pool,
    944  size_t* pLostAllocationCount);
    945 
    946 VK_DEFINE_HANDLE(VmaAllocation)
    947 
    948 
    950 typedef struct VmaAllocationInfo {
    955  uint32_t memoryType;
    964  VkDeviceMemory deviceMemory;
    969  VkDeviceSize offset;
    974  VkDeviceSize size;
    980  void* pMappedData;
    985  void* pUserData;
    987 
    998 VkResult vmaAllocateMemory(
    999  VmaAllocator allocator,
    1000  const VkMemoryRequirements* pVkMemoryRequirements,
    1001  const VmaAllocationCreateInfo* pCreateInfo,
    1002  VmaAllocation* pAllocation,
    1003  VmaAllocationInfo* pAllocationInfo);
    1004 
    1012  VmaAllocator allocator,
    1013  VkBuffer buffer,
    1014  const VmaAllocationCreateInfo* pCreateInfo,
    1015  VmaAllocation* pAllocation,
    1016  VmaAllocationInfo* pAllocationInfo);
    1017 
    1019 VkResult vmaAllocateMemoryForImage(
    1020  VmaAllocator allocator,
    1021  VkImage image,
    1022  const VmaAllocationCreateInfo* pCreateInfo,
    1023  VmaAllocation* pAllocation,
    1024  VmaAllocationInfo* pAllocationInfo);
    1025 
    1027 void vmaFreeMemory(
    1028  VmaAllocator allocator,
    1029  VmaAllocation allocation);
    1030 
    1033  VmaAllocator allocator,
    1034  VmaAllocation allocation,
    1035  VmaAllocationInfo* pAllocationInfo);
    1036 
    1039  VmaAllocator allocator,
    1040  VmaAllocation allocation,
    1041  void* pUserData);
    1042 
    1054  VmaAllocator allocator,
    1055  VmaAllocation* pAllocation);
    1056 
    1065 VkResult vmaMapMemory(
    1066  VmaAllocator allocator,
    1067  VmaAllocation allocation,
    1068  void** ppData);
    1069 
    1070 void vmaUnmapMemory(
    1071  VmaAllocator allocator,
    1072  VmaAllocation allocation);
    1073 
    1095 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1096 
    1104 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1105 
    1107 typedef struct VmaDefragmentationInfo {
    1112  VkDeviceSize maxBytesToMove;
    1119 
    1121 typedef struct VmaDefragmentationStats {
    1123  VkDeviceSize bytesMoved;
    1125  VkDeviceSize bytesFreed;
    1131 
    1202 VkResult vmaDefragment(
    1203  VmaAllocator allocator,
    1204  VmaAllocation* pAllocations,
    1205  size_t allocationCount,
    1206  VkBool32* pAllocationsChanged,
    1207  const VmaDefragmentationInfo *pDefragmentationInfo,
    1208  VmaDefragmentationStats* pDefragmentationStats);
    1209 
    1212 
    1235 VkResult vmaCreateBuffer(
    1236  VmaAllocator allocator,
    1237  const VkBufferCreateInfo* pBufferCreateInfo,
    1238  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1239  VkBuffer* pBuffer,
    1240  VmaAllocation* pAllocation,
    1241  VmaAllocationInfo* pAllocationInfo);
    1242 
    1251 void vmaDestroyBuffer(
    1252  VmaAllocator allocator,
    1253  VkBuffer buffer,
    1254  VmaAllocation allocation);
    1255 
    1257 VkResult vmaCreateImage(
    1258  VmaAllocator allocator,
    1259  const VkImageCreateInfo* pImageCreateInfo,
    1260  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1261  VkImage* pImage,
    1262  VmaAllocation* pAllocation,
    1263  VmaAllocationInfo* pAllocationInfo);
    1264 
    1273 void vmaDestroyImage(
    1274  VmaAllocator allocator,
    1275  VkImage image,
    1276  VmaAllocation allocation);
    1277 
    1280 #ifdef __cplusplus
    1281 }
    1282 #endif
    1283 
    1284 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1285 
    1286 // For Visual Studio IntelliSense.
    1287 #ifdef __INTELLISENSE__
    1288 #define VMA_IMPLEMENTATION
    1289 #endif
    1290 
    1291 #ifdef VMA_IMPLEMENTATION
    1292 #undef VMA_IMPLEMENTATION
    1293 
    1294 #include <cstdint>
    1295 #include <cstdlib>
    1296 #include <cstring>
    1297 
    1298 /*******************************************************************************
    1299 CONFIGURATION SECTION
    1300 
    1301 Define some of these macros before each #include of this header or change them
    1302 here if you need other then default behavior depending on your environment.
    1303 */
    1304 
    1305 /*
    1306 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1307 internally, like:
    1308 
    1309  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1310 
    1311 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1312 VmaAllocatorCreateInfo::pVulkanFunctions.
    1313 */
    1314 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1315 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1316 #endif
    1317 
    1318 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1319 //#define VMA_USE_STL_CONTAINERS 1
    1320 
    1321 /* Set this macro to 1 to make the library including and using STL containers:
    1322 std::pair, std::vector, std::list, std::unordered_map.
    1323 
    1324 Set it to 0 or undefined to make the library using its own implementation of
    1325 the containers.
    1326 */
    1327 #if VMA_USE_STL_CONTAINERS
    1328  #define VMA_USE_STL_VECTOR 1
    1329  #define VMA_USE_STL_UNORDERED_MAP 1
    1330  #define VMA_USE_STL_LIST 1
    1331 #endif
    1332 
    1333 #if VMA_USE_STL_VECTOR
    1334  #include <vector>
    1335 #endif
    1336 
    1337 #if VMA_USE_STL_UNORDERED_MAP
    1338  #include <unordered_map>
    1339 #endif
    1340 
    1341 #if VMA_USE_STL_LIST
    1342  #include <list>
    1343 #endif
    1344 
    1345 /*
    1346 Following headers are used in this CONFIGURATION section only, so feel free to
    1347 remove them if not needed.
    1348 */
    1349 #include <cassert> // for assert
    1350 #include <algorithm> // for min, max
    1351 #include <mutex> // for std::mutex
    1352 #include <atomic> // for std::atomic
    1353 
    1354 #if !defined(_WIN32)
    1355  #include <malloc.h> // for aligned_alloc()
    1356 #endif
    1357 
    1358 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1359 #ifndef VMA_ASSERT
    1360  #ifdef _DEBUG
    1361  #define VMA_ASSERT(expr) assert(expr)
    1362  #else
    1363  #define VMA_ASSERT(expr)
    1364  #endif
    1365 #endif
    1366 
    1367 // Assert that will be called very often, like inside data structures e.g. operator[].
    1368 // Making it non-empty can make program slow.
    1369 #ifndef VMA_HEAVY_ASSERT
    1370  #ifdef _DEBUG
    1371  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1372  #else
    1373  #define VMA_HEAVY_ASSERT(expr)
    1374  #endif
    1375 #endif
    1376 
    1377 #ifndef VMA_NULL
    1378  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1379  #define VMA_NULL nullptr
    1380 #endif
    1381 
    1382 #ifndef VMA_ALIGN_OF
    1383  #define VMA_ALIGN_OF(type) (__alignof(type))
    1384 #endif
    1385 
    1386 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1387  #if defined(_WIN32)
    1388  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1389  #else
    1390  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1391  #endif
    1392 #endif
    1393 
    1394 #ifndef VMA_SYSTEM_FREE
    1395  #if defined(_WIN32)
    1396  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1397  #else
    1398  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1399  #endif
    1400 #endif
    1401 
    1402 #ifndef VMA_MIN
    1403  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1404 #endif
    1405 
    1406 #ifndef VMA_MAX
    1407  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1408 #endif
    1409 
    1410 #ifndef VMA_SWAP
    1411  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1412 #endif
    1413 
    1414 #ifndef VMA_SORT
    1415  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1416 #endif
    1417 
    1418 #ifndef VMA_DEBUG_LOG
    1419  #define VMA_DEBUG_LOG(format, ...)
    1420  /*
    1421  #define VMA_DEBUG_LOG(format, ...) do { \
    1422  printf(format, __VA_ARGS__); \
    1423  printf("\n"); \
    1424  } while(false)
    1425  */
    1426 #endif
    1427 
    1428 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1429 #if VMA_STATS_STRING_ENABLED
    1430  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1431  {
    1432  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1433  }
    1434  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1435  {
    1436  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1437  }
    1438  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1439  {
    1440  snprintf(outStr, strLen, "%p", ptr);
    1441  }
    1442 #endif
    1443 
    1444 #ifndef VMA_MUTEX
    1445  class VmaMutex
    1446  {
    1447  public:
    1448  VmaMutex() { }
    1449  ~VmaMutex() { }
    1450  void Lock() { m_Mutex.lock(); }
    1451  void Unlock() { m_Mutex.unlock(); }
    1452  private:
    1453  std::mutex m_Mutex;
    1454  };
    1455  #define VMA_MUTEX VmaMutex
    1456 #endif
    1457 
    1458 /*
    1459 If providing your own implementation, you need to implement a subset of std::atomic:
    1460 
    1461 - Constructor(uint32_t desired)
    1462 - uint32_t load() const
    1463 - void store(uint32_t desired)
    1464 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1465 */
    1466 #ifndef VMA_ATOMIC_UINT32
    1467  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1468 #endif
    1469 
    1470 #ifndef VMA_BEST_FIT
    1471 
    1483  #define VMA_BEST_FIT (1)
    1484 #endif
    1485 
    1486 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1487 
    1491  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1492 #endif
    1493 
    1494 #ifndef VMA_DEBUG_ALIGNMENT
    1495 
    1499  #define VMA_DEBUG_ALIGNMENT (1)
    1500 #endif
    1501 
    1502 #ifndef VMA_DEBUG_MARGIN
    1503 
    1507  #define VMA_DEBUG_MARGIN (0)
    1508 #endif
    1509 
    1510 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1511 
    1515  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1516 #endif
    1517 
    1518 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1519 
    1523  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1524 #endif
    1525 
    1526 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1527  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1529 #endif
    1530 
    1531 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1532  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1534 #endif
    1535 
    1536 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1537  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1539 #endif
    1540 
    1541 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1542 
    1543 /*******************************************************************************
    1544 END OF CONFIGURATION
    1545 */
    1546 
    1547 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1548  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1549 
    1550 // Returns number of bits set to 1 in (v).
    1551 static inline uint32_t CountBitsSet(uint32_t v)
    1552 {
    1553  uint32_t c = v - ((v >> 1) & 0x55555555);
    1554  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1555  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1556  c = ((c >> 8) + c) & 0x00FF00FF;
    1557  c = ((c >> 16) + c) & 0x0000FFFF;
    1558  return c;
    1559 }
    1560 
    1561 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1562 // Use types like uint32_t, uint64_t as T.
    1563 template <typename T>
    1564 static inline T VmaAlignUp(T val, T align)
    1565 {
    1566  return (val + align - 1) / align * align;
    1567 }
    1568 
    1569 // Division with mathematical rounding to nearest number.
    1570 template <typename T>
    1571 inline T VmaRoundDiv(T x, T y)
    1572 {
    1573  return (x + (y / (T)2)) / y;
    1574 }
    1575 
    1576 #ifndef VMA_SORT
    1577 
    1578 template<typename Iterator, typename Compare>
    1579 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1580 {
    1581  Iterator centerValue = end; --centerValue;
    1582  Iterator insertIndex = beg;
    1583  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1584  {
    1585  if(cmp(*memTypeIndex, *centerValue))
    1586  {
    1587  if(insertIndex != memTypeIndex)
    1588  {
    1589  VMA_SWAP(*memTypeIndex, *insertIndex);
    1590  }
    1591  ++insertIndex;
    1592  }
    1593  }
    1594  if(insertIndex != centerValue)
    1595  {
    1596  VMA_SWAP(*insertIndex, *centerValue);
    1597  }
    1598  return insertIndex;
    1599 }
    1600 
    1601 template<typename Iterator, typename Compare>
    1602 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1603 {
    1604  if(beg < end)
    1605  {
    1606  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1607  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1608  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1609  }
    1610 }
    1611 
    1612 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1613 
    1614 #endif // #ifndef VMA_SORT
    1615 
    1616 /*
    1617 Returns true if two memory blocks occupy overlapping pages.
    1618 ResourceA must be in less memory offset than ResourceB.
    1619 
    1620 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1621 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1622 */
    1623 static inline bool VmaBlocksOnSamePage(
    1624  VkDeviceSize resourceAOffset,
    1625  VkDeviceSize resourceASize,
    1626  VkDeviceSize resourceBOffset,
    1627  VkDeviceSize pageSize)
    1628 {
    1629  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1630  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1631  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1632  VkDeviceSize resourceBStart = resourceBOffset;
    1633  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1634  return resourceAEndPage == resourceBStartPage;
    1635 }
    1636 
    1637 enum VmaSuballocationType
    1638 {
    1639  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1640  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1641  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1642  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1643  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1644  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1645  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1646 };
    1647 
    1648 /*
    1649 Returns true if given suballocation types could conflict and must respect
    1650 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1651 or linear image and another one is optimal image. If type is unknown, behave
    1652 conservatively.
    1653 */
    1654 static inline bool VmaIsBufferImageGranularityConflict(
    1655  VmaSuballocationType suballocType1,
    1656  VmaSuballocationType suballocType2)
    1657 {
    1658  if(suballocType1 > suballocType2)
    1659  {
    1660  VMA_SWAP(suballocType1, suballocType2);
    1661  }
    1662 
    1663  switch(suballocType1)
    1664  {
    1665  case VMA_SUBALLOCATION_TYPE_FREE:
    1666  return false;
    1667  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1668  return true;
    1669  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1670  return
    1671  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1672  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1673  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1674  return
    1675  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1676  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1677  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1678  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1679  return
    1680  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1681  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1682  return false;
    1683  default:
    1684  VMA_ASSERT(0);
    1685  return true;
    1686  }
    1687 }
    1688 
    1689 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1690 struct VmaMutexLock
    1691 {
    1692 public:
    1693  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1694  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1695  {
    1696  if(m_pMutex)
    1697  {
    1698  m_pMutex->Lock();
    1699  }
    1700  }
    1701 
    1702  ~VmaMutexLock()
    1703  {
    1704  if(m_pMutex)
    1705  {
    1706  m_pMutex->Unlock();
    1707  }
    1708  }
    1709 
    1710 private:
    1711  VMA_MUTEX* m_pMutex;
    1712 };
    1713 
    1714 #if VMA_DEBUG_GLOBAL_MUTEX
    1715  static VMA_MUTEX gDebugGlobalMutex;
    1716  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1717 #else
    1718  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1719 #endif
    1720 
    1721 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1722 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1723 
    1724 /*
    1725 Performs binary search and returns iterator to first element that is greater or
    1726 equal to (key), according to comparison (cmp).
    1727 
    1728 Cmp should return true if first argument is less than second argument.
    1729 
    1730 Returned value is the found element, if present in the collection or place where
    1731 new element with value (key) should be inserted.
    1732 */
    1733 template <typename IterT, typename KeyT, typename CmpT>
    1734 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1735 {
    1736  size_t down = 0, up = (end - beg);
    1737  while(down < up)
    1738  {
    1739  const size_t mid = (down + up) / 2;
    1740  if(cmp(*(beg+mid), key))
    1741  {
    1742  down = mid + 1;
    1743  }
    1744  else
    1745  {
    1746  up = mid;
    1747  }
    1748  }
    1749  return beg + down;
    1750 }
    1751 
    1753 // Memory allocation
    1754 
    1755 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1756 {
    1757  if((pAllocationCallbacks != VMA_NULL) &&
    1758  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1759  {
    1760  return (*pAllocationCallbacks->pfnAllocation)(
    1761  pAllocationCallbacks->pUserData,
    1762  size,
    1763  alignment,
    1764  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1765  }
    1766  else
    1767  {
    1768  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1769  }
    1770 }
    1771 
    1772 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1773 {
    1774  if((pAllocationCallbacks != VMA_NULL) &&
    1775  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1776  {
    1777  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1778  }
    1779  else
    1780  {
    1781  VMA_SYSTEM_FREE(ptr);
    1782  }
    1783 }
    1784 
    1785 template<typename T>
    1786 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1787 {
    1788  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1789 }
    1790 
    1791 template<typename T>
    1792 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1793 {
    1794  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1795 }
    1796 
    1797 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1798 
    1799 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1800 
    1801 template<typename T>
    1802 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1803 {
    1804  ptr->~T();
    1805  VmaFree(pAllocationCallbacks, ptr);
    1806 }
    1807 
    1808 template<typename T>
    1809 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1810 {
    1811  if(ptr != VMA_NULL)
    1812  {
    1813  for(size_t i = count; i--; )
    1814  {
    1815  ptr[i].~T();
    1816  }
    1817  VmaFree(pAllocationCallbacks, ptr);
    1818  }
    1819 }
    1820 
    1821 // STL-compatible allocator.
    1822 template<typename T>
    1823 class VmaStlAllocator
    1824 {
    1825 public:
    1826  const VkAllocationCallbacks* const m_pCallbacks;
    1827  typedef T value_type;
    1828 
    1829  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1830  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1831 
    1832  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1833  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1834 
    1835  template<typename U>
    1836  bool operator==(const VmaStlAllocator<U>& rhs) const
    1837  {
    1838  return m_pCallbacks == rhs.m_pCallbacks;
    1839  }
    1840  template<typename U>
    1841  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1842  {
    1843  return m_pCallbacks != rhs.m_pCallbacks;
    1844  }
    1845 
    1846  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1847 };
    1848 
    1849 #if VMA_USE_STL_VECTOR
    1850 
    1851 #define VmaVector std::vector
    1852 
    1853 template<typename T, typename allocatorT>
    1854 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1855 {
    1856  vec.insert(vec.begin() + index, item);
    1857 }
    1858 
    1859 template<typename T, typename allocatorT>
    1860 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1861 {
    1862  vec.erase(vec.begin() + index);
    1863 }
    1864 
    1865 #else // #if VMA_USE_STL_VECTOR
    1866 
    1867 /* Class with interface compatible with subset of std::vector.
    1868 T must be POD because constructors and destructors are not called and memcpy is
    1869 used for these objects. */
    1870 template<typename T, typename AllocatorT>
    1871 class VmaVector
    1872 {
    1873 public:
    1874  typedef T value_type;
    1875 
    1876  VmaVector(const AllocatorT& allocator) :
    1877  m_Allocator(allocator),
    1878  m_pArray(VMA_NULL),
    1879  m_Count(0),
    1880  m_Capacity(0)
    1881  {
    1882  }
    1883 
    1884  VmaVector(size_t count, const AllocatorT& allocator) :
    1885  m_Allocator(allocator),
    1886  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1887  m_Count(count),
    1888  m_Capacity(count)
    1889  {
    1890  }
    1891 
    1892  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1893  m_Allocator(src.m_Allocator),
    1894  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1895  m_Count(src.m_Count),
    1896  m_Capacity(src.m_Count)
    1897  {
    1898  if(m_Count != 0)
    1899  {
    1900  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1901  }
    1902  }
    1903 
    1904  ~VmaVector()
    1905  {
    1906  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1907  }
    1908 
    1909  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1910  {
    1911  if(&rhs != this)
    1912  {
    1913  resize(rhs.m_Count);
    1914  if(m_Count != 0)
    1915  {
    1916  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    1917  }
    1918  }
    1919  return *this;
    1920  }
    1921 
    1922  bool empty() const { return m_Count == 0; }
    1923  size_t size() const { return m_Count; }
    1924  T* data() { return m_pArray; }
    1925  const T* data() const { return m_pArray; }
    1926 
    1927  T& operator[](size_t index)
    1928  {
    1929  VMA_HEAVY_ASSERT(index < m_Count);
    1930  return m_pArray[index];
    1931  }
    1932  const T& operator[](size_t index) const
    1933  {
    1934  VMA_HEAVY_ASSERT(index < m_Count);
    1935  return m_pArray[index];
    1936  }
    1937 
    1938  T& front()
    1939  {
    1940  VMA_HEAVY_ASSERT(m_Count > 0);
    1941  return m_pArray[0];
    1942  }
    1943  const T& front() const
    1944  {
    1945  VMA_HEAVY_ASSERT(m_Count > 0);
    1946  return m_pArray[0];
    1947  }
    1948  T& back()
    1949  {
    1950  VMA_HEAVY_ASSERT(m_Count > 0);
    1951  return m_pArray[m_Count - 1];
    1952  }
    1953  const T& back() const
    1954  {
    1955  VMA_HEAVY_ASSERT(m_Count > 0);
    1956  return m_pArray[m_Count - 1];
    1957  }
    1958 
    1959  void reserve(size_t newCapacity, bool freeMemory = false)
    1960  {
    1961  newCapacity = VMA_MAX(newCapacity, m_Count);
    1962 
    1963  if((newCapacity < m_Capacity) && !freeMemory)
    1964  {
    1965  newCapacity = m_Capacity;
    1966  }
    1967 
    1968  if(newCapacity != m_Capacity)
    1969  {
    1970  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    1971  if(m_Count != 0)
    1972  {
    1973  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    1974  }
    1975  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1976  m_Capacity = newCapacity;
    1977  m_pArray = newArray;
    1978  }
    1979  }
    1980 
    1981  void resize(size_t newCount, bool freeMemory = false)
    1982  {
    1983  size_t newCapacity = m_Capacity;
    1984  if(newCount > m_Capacity)
    1985  {
    1986  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    1987  }
    1988  else if(freeMemory)
    1989  {
    1990  newCapacity = newCount;
    1991  }
    1992 
    1993  if(newCapacity != m_Capacity)
    1994  {
    1995  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    1996  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    1997  if(elementsToCopy != 0)
    1998  {
    1999  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2000  }
    2001  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2002  m_Capacity = newCapacity;
    2003  m_pArray = newArray;
    2004  }
    2005 
    2006  m_Count = newCount;
    2007  }
    2008 
    2009  void clear(bool freeMemory = false)
    2010  {
    2011  resize(0, freeMemory);
    2012  }
    2013 
    2014  void insert(size_t index, const T& src)
    2015  {
    2016  VMA_HEAVY_ASSERT(index <= m_Count);
    2017  const size_t oldCount = size();
    2018  resize(oldCount + 1);
    2019  if(index < oldCount)
    2020  {
    2021  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2022  }
    2023  m_pArray[index] = src;
    2024  }
    2025 
    2026  void remove(size_t index)
    2027  {
    2028  VMA_HEAVY_ASSERT(index < m_Count);
    2029  const size_t oldCount = size();
    2030  if(index < oldCount - 1)
    2031  {
    2032  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2033  }
    2034  resize(oldCount - 1);
    2035  }
    2036 
    2037  void push_back(const T& src)
    2038  {
    2039  const size_t newIndex = size();
    2040  resize(newIndex + 1);
    2041  m_pArray[newIndex] = src;
    2042  }
    2043 
    2044  void pop_back()
    2045  {
    2046  VMA_HEAVY_ASSERT(m_Count > 0);
    2047  resize(size() - 1);
    2048  }
    2049 
    2050  void push_front(const T& src)
    2051  {
    2052  insert(0, src);
    2053  }
    2054 
    2055  void pop_front()
    2056  {
    2057  VMA_HEAVY_ASSERT(m_Count > 0);
    2058  remove(0);
    2059  }
    2060 
    2061  typedef T* iterator;
    2062 
    2063  iterator begin() { return m_pArray; }
    2064  iterator end() { return m_pArray + m_Count; }
    2065 
    2066 private:
    2067  AllocatorT m_Allocator;
    2068  T* m_pArray;
    2069  size_t m_Count;
    2070  size_t m_Capacity;
    2071 };
    2072 
    2073 template<typename T, typename allocatorT>
    2074 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2075 {
    2076  vec.insert(index, item);
    2077 }
    2078 
    2079 template<typename T, typename allocatorT>
    2080 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2081 {
    2082  vec.remove(index);
    2083 }
    2084 
    2085 #endif // #if VMA_USE_STL_VECTOR
    2086 
    2087 template<typename CmpLess, typename VectorT>
    2088 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2089 {
    2090  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2091  vector.data(),
    2092  vector.data() + vector.size(),
    2093  value,
    2094  CmpLess()) - vector.data();
    2095  VmaVectorInsert(vector, indexToInsert, value);
    2096  return indexToInsert;
    2097 }
    2098 
    2099 template<typename CmpLess, typename VectorT>
    2100 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2101 {
    2102  CmpLess comparator;
    2103  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2104  vector.begin(),
    2105  vector.end(),
    2106  value,
    2107  comparator);
    2108  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2109  {
    2110  size_t indexToRemove = it - vector.begin();
    2111  VmaVectorRemove(vector, indexToRemove);
    2112  return true;
    2113  }
    2114  return false;
    2115 }
    2116 
    2117 template<typename CmpLess, typename VectorT>
    2118 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2119 {
    2120  CmpLess comparator;
    2121  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2122  vector.data(),
    2123  vector.data() + vector.size(),
    2124  value,
    2125  comparator);
    2126  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2127  {
    2128  return it - vector.begin();
    2129  }
    2130  else
    2131  {
    2132  return vector.size();
    2133  }
    2134 }
    2135 
    2137 // class VmaPoolAllocator
    2138 
    2139 /*
    2140 Allocator for objects of type T using a list of arrays (pools) to speed up
    2141 allocation. Number of elements that can be allocated is not bounded because
    2142 allocator can create multiple blocks.
    2143 */
    2144 template<typename T>
    2145 class VmaPoolAllocator
    2146 {
    2147 public:
    2148  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2149  ~VmaPoolAllocator();
    2150  void Clear();
    2151  T* Alloc();
    2152  void Free(T* ptr);
    2153 
    2154 private:
    2155  union Item
    2156  {
    2157  uint32_t NextFreeIndex;
    2158  T Value;
    2159  };
    2160 
    2161  struct ItemBlock
    2162  {
    2163  Item* pItems;
    2164  uint32_t FirstFreeIndex;
    2165  };
    2166 
    2167  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2168  size_t m_ItemsPerBlock;
    2169  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2170 
    2171  ItemBlock& CreateNewBlock();
    2172 };
    2173 
    2174 template<typename T>
    2175 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2176  m_pAllocationCallbacks(pAllocationCallbacks),
    2177  m_ItemsPerBlock(itemsPerBlock),
    2178  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2179 {
    2180  VMA_ASSERT(itemsPerBlock > 0);
    2181 }
    2182 
    2183 template<typename T>
    2184 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2185 {
    2186  Clear();
    2187 }
    2188 
    2189 template<typename T>
    2190 void VmaPoolAllocator<T>::Clear()
    2191 {
    2192  for(size_t i = m_ItemBlocks.size(); i--; )
    2193  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2194  m_ItemBlocks.clear();
    2195 }
    2196 
    2197 template<typename T>
    2198 T* VmaPoolAllocator<T>::Alloc()
    2199 {
    2200  for(size_t i = m_ItemBlocks.size(); i--; )
    2201  {
    2202  ItemBlock& block = m_ItemBlocks[i];
    2203  // This block has some free items: Use first one.
    2204  if(block.FirstFreeIndex != UINT32_MAX)
    2205  {
    2206  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2207  block.FirstFreeIndex = pItem->NextFreeIndex;
    2208  return &pItem->Value;
    2209  }
    2210  }
    2211 
    2212  // No block has free item: Create new one and use it.
    2213  ItemBlock& newBlock = CreateNewBlock();
    2214  Item* const pItem = &newBlock.pItems[0];
    2215  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2216  return &pItem->Value;
    2217 }
    2218 
    2219 template<typename T>
    2220 void VmaPoolAllocator<T>::Free(T* ptr)
    2221 {
    2222  // Search all memory blocks to find ptr.
    2223  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2224  {
    2225  ItemBlock& block = m_ItemBlocks[i];
    2226 
    2227  // Casting to union.
    2228  Item* pItemPtr;
    2229  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2230 
    2231  // Check if pItemPtr is in address range of this block.
    2232  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2233  {
    2234  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2235  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2236  block.FirstFreeIndex = index;
    2237  return;
    2238  }
    2239  }
    2240  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2241 }
    2242 
    2243 template<typename T>
    2244 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2245 {
    2246  ItemBlock newBlock = {
    2247  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2248 
    2249  m_ItemBlocks.push_back(newBlock);
    2250 
    2251  // Setup singly-linked list of all free items in this block.
    2252  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2253  newBlock.pItems[i].NextFreeIndex = i + 1;
    2254  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2255  return m_ItemBlocks.back();
    2256 }
    2257 
    2259 // class VmaRawList, VmaList
    2260 
    2261 #if VMA_USE_STL_LIST
    2262 
    2263 #define VmaList std::list
    2264 
    2265 #else // #if VMA_USE_STL_LIST
    2266 
    2267 template<typename T>
    2268 struct VmaListItem
    2269 {
    2270  VmaListItem* pPrev;
    2271  VmaListItem* pNext;
    2272  T Value;
    2273 };
    2274 
    2275 // Doubly linked list.
    2276 template<typename T>
    2277 class VmaRawList
    2278 {
    2279 public:
    2280  typedef VmaListItem<T> ItemType;
    2281 
    2282  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2283  ~VmaRawList();
    2284  void Clear();
    2285 
    2286  size_t GetCount() const { return m_Count; }
    2287  bool IsEmpty() const { return m_Count == 0; }
    2288 
    2289  ItemType* Front() { return m_pFront; }
    2290  const ItemType* Front() const { return m_pFront; }
    2291  ItemType* Back() { return m_pBack; }
    2292  const ItemType* Back() const { return m_pBack; }
    2293 
    2294  ItemType* PushBack();
    2295  ItemType* PushFront();
    2296  ItemType* PushBack(const T& value);
    2297  ItemType* PushFront(const T& value);
    2298  void PopBack();
    2299  void PopFront();
    2300 
    2301  // Item can be null - it means PushBack.
    2302  ItemType* InsertBefore(ItemType* pItem);
    2303  // Item can be null - it means PushFront.
    2304  ItemType* InsertAfter(ItemType* pItem);
    2305 
    2306  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2307  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2308 
    2309  void Remove(ItemType* pItem);
    2310 
    2311 private:
    2312  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2313  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2314  ItemType* m_pFront;
    2315  ItemType* m_pBack;
    2316  size_t m_Count;
    2317 
    2318  // Declared not defined, to block copy constructor and assignment operator.
    2319  VmaRawList(const VmaRawList<T>& src);
    2320  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2321 };
    2322 
    2323 template<typename T>
    2324 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2325  m_pAllocationCallbacks(pAllocationCallbacks),
    2326  m_ItemAllocator(pAllocationCallbacks, 128),
    2327  m_pFront(VMA_NULL),
    2328  m_pBack(VMA_NULL),
    2329  m_Count(0)
    2330 {
    2331 }
    2332 
    2333 template<typename T>
    2334 VmaRawList<T>::~VmaRawList()
    2335 {
    2336  // Intentionally not calling Clear, because that would be unnecessary
    2337  // computations to return all items to m_ItemAllocator as free.
    2338 }
    2339 
    2340 template<typename T>
    2341 void VmaRawList<T>::Clear()
    2342 {
    2343  if(IsEmpty() == false)
    2344  {
    2345  ItemType* pItem = m_pBack;
    2346  while(pItem != VMA_NULL)
    2347  {
    2348  ItemType* const pPrevItem = pItem->pPrev;
    2349  m_ItemAllocator.Free(pItem);
    2350  pItem = pPrevItem;
    2351  }
    2352  m_pFront = VMA_NULL;
    2353  m_pBack = VMA_NULL;
    2354  m_Count = 0;
    2355  }
    2356 }
    2357 
    2358 template<typename T>
    2359 VmaListItem<T>* VmaRawList<T>::PushBack()
    2360 {
    2361  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2362  pNewItem->pNext = VMA_NULL;
    2363  if(IsEmpty())
    2364  {
    2365  pNewItem->pPrev = VMA_NULL;
    2366  m_pFront = pNewItem;
    2367  m_pBack = pNewItem;
    2368  m_Count = 1;
    2369  }
    2370  else
    2371  {
    2372  pNewItem->pPrev = m_pBack;
    2373  m_pBack->pNext = pNewItem;
    2374  m_pBack = pNewItem;
    2375  ++m_Count;
    2376  }
    2377  return pNewItem;
    2378 }
    2379 
    2380 template<typename T>
    2381 VmaListItem<T>* VmaRawList<T>::PushFront()
    2382 {
    2383  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2384  pNewItem->pPrev = VMA_NULL;
    2385  if(IsEmpty())
    2386  {
    2387  pNewItem->pNext = VMA_NULL;
    2388  m_pFront = pNewItem;
    2389  m_pBack = pNewItem;
    2390  m_Count = 1;
    2391  }
    2392  else
    2393  {
    2394  pNewItem->pNext = m_pFront;
    2395  m_pFront->pPrev = pNewItem;
    2396  m_pFront = pNewItem;
    2397  ++m_Count;
    2398  }
    2399  return pNewItem;
    2400 }
    2401 
    2402 template<typename T>
    2403 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2404 {
    2405  ItemType* const pNewItem = PushBack();
    2406  pNewItem->Value = value;
    2407  return pNewItem;
    2408 }
    2409 
    2410 template<typename T>
    2411 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2412 {
    2413  ItemType* const pNewItem = PushFront();
    2414  pNewItem->Value = value;
    2415  return pNewItem;
    2416 }
    2417 
    2418 template<typename T>
    2419 void VmaRawList<T>::PopBack()
    2420 {
    2421  VMA_HEAVY_ASSERT(m_Count > 0);
    2422  ItemType* const pBackItem = m_pBack;
    2423  ItemType* const pPrevItem = pBackItem->pPrev;
    2424  if(pPrevItem != VMA_NULL)
    2425  {
    2426  pPrevItem->pNext = VMA_NULL;
    2427  }
    2428  m_pBack = pPrevItem;
    2429  m_ItemAllocator.Free(pBackItem);
    2430  --m_Count;
    2431 }
    2432 
    2433 template<typename T>
    2434 void VmaRawList<T>::PopFront()
    2435 {
    2436  VMA_HEAVY_ASSERT(m_Count > 0);
    2437  ItemType* const pFrontItem = m_pFront;
    2438  ItemType* const pNextItem = pFrontItem->pNext;
    2439  if(pNextItem != VMA_NULL)
    2440  {
    2441  pNextItem->pPrev = VMA_NULL;
    2442  }
    2443  m_pFront = pNextItem;
    2444  m_ItemAllocator.Free(pFrontItem);
    2445  --m_Count;
    2446 }
    2447 
    2448 template<typename T>
    2449 void VmaRawList<T>::Remove(ItemType* pItem)
    2450 {
    2451  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2452  VMA_HEAVY_ASSERT(m_Count > 0);
    2453 
    2454  if(pItem->pPrev != VMA_NULL)
    2455  {
    2456  pItem->pPrev->pNext = pItem->pNext;
    2457  }
    2458  else
    2459  {
    2460  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2461  m_pFront = pItem->pNext;
    2462  }
    2463 
    2464  if(pItem->pNext != VMA_NULL)
    2465  {
    2466  pItem->pNext->pPrev = pItem->pPrev;
    2467  }
    2468  else
    2469  {
    2470  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2471  m_pBack = pItem->pPrev;
    2472  }
    2473 
    2474  m_ItemAllocator.Free(pItem);
    2475  --m_Count;
    2476 }
    2477 
    2478 template<typename T>
    2479 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2480 {
    2481  if(pItem != VMA_NULL)
    2482  {
    2483  ItemType* const prevItem = pItem->pPrev;
    2484  ItemType* const newItem = m_ItemAllocator.Alloc();
    2485  newItem->pPrev = prevItem;
    2486  newItem->pNext = pItem;
    2487  pItem->pPrev = newItem;
    2488  if(prevItem != VMA_NULL)
    2489  {
    2490  prevItem->pNext = newItem;
    2491  }
    2492  else
    2493  {
    2494  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2495  m_pFront = newItem;
    2496  }
    2497  ++m_Count;
    2498  return newItem;
    2499  }
    2500  else
    2501  return PushBack();
    2502 }
    2503 
    2504 template<typename T>
    2505 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2506 {
    2507  if(pItem != VMA_NULL)
    2508  {
    2509  ItemType* const nextItem = pItem->pNext;
    2510  ItemType* const newItem = m_ItemAllocator.Alloc();
    2511  newItem->pNext = nextItem;
    2512  newItem->pPrev = pItem;
    2513  pItem->pNext = newItem;
    2514  if(nextItem != VMA_NULL)
    2515  {
    2516  nextItem->pPrev = newItem;
    2517  }
    2518  else
    2519  {
    2520  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2521  m_pBack = newItem;
    2522  }
    2523  ++m_Count;
    2524  return newItem;
    2525  }
    2526  else
    2527  return PushFront();
    2528 }
    2529 
    2530 template<typename T>
    2531 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2532 {
    2533  ItemType* const newItem = InsertBefore(pItem);
    2534  newItem->Value = value;
    2535  return newItem;
    2536 }
    2537 
    2538 template<typename T>
    2539 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2540 {
    2541  ItemType* const newItem = InsertAfter(pItem);
    2542  newItem->Value = value;
    2543  return newItem;
    2544 }
    2545 
    2546 template<typename T, typename AllocatorT>
    2547 class VmaList
    2548 {
    2549 public:
    2550  class iterator
    2551  {
    2552  public:
    2553  iterator() :
    2554  m_pList(VMA_NULL),
    2555  m_pItem(VMA_NULL)
    2556  {
    2557  }
    2558 
    2559  T& operator*() const
    2560  {
    2561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2562  return m_pItem->Value;
    2563  }
    2564  T* operator->() const
    2565  {
    2566  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2567  return &m_pItem->Value;
    2568  }
    2569 
    2570  iterator& operator++()
    2571  {
    2572  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2573  m_pItem = m_pItem->pNext;
    2574  return *this;
    2575  }
    2576  iterator& operator--()
    2577  {
    2578  if(m_pItem != VMA_NULL)
    2579  {
    2580  m_pItem = m_pItem->pPrev;
    2581  }
    2582  else
    2583  {
    2584  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2585  m_pItem = m_pList->Back();
    2586  }
    2587  return *this;
    2588  }
    2589 
    2590  iterator operator++(int)
    2591  {
    2592  iterator result = *this;
    2593  ++*this;
    2594  return result;
    2595  }
    2596  iterator operator--(int)
    2597  {
    2598  iterator result = *this;
    2599  --*this;
    2600  return result;
    2601  }
    2602 
    2603  bool operator==(const iterator& rhs) const
    2604  {
    2605  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2606  return m_pItem == rhs.m_pItem;
    2607  }
    2608  bool operator!=(const iterator& rhs) const
    2609  {
    2610  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2611  return m_pItem != rhs.m_pItem;
    2612  }
    2613 
    2614  private:
    2615  VmaRawList<T>* m_pList;
    2616  VmaListItem<T>* m_pItem;
    2617 
    2618  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2619  m_pList(pList),
    2620  m_pItem(pItem)
    2621  {
    2622  }
    2623 
    2624  friend class VmaList<T, AllocatorT>;
    2625  };
    2626 
    2627  class const_iterator
    2628  {
    2629  public:
    2630  const_iterator() :
    2631  m_pList(VMA_NULL),
    2632  m_pItem(VMA_NULL)
    2633  {
    2634  }
    2635 
    2636  const_iterator(const iterator& src) :
    2637  m_pList(src.m_pList),
    2638  m_pItem(src.m_pItem)
    2639  {
    2640  }
    2641 
    2642  const T& operator*() const
    2643  {
    2644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2645  return m_pItem->Value;
    2646  }
    2647  const T* operator->() const
    2648  {
    2649  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2650  return &m_pItem->Value;
    2651  }
    2652 
    2653  const_iterator& operator++()
    2654  {
    2655  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2656  m_pItem = m_pItem->pNext;
    2657  return *this;
    2658  }
    2659  const_iterator& operator--()
    2660  {
    2661  if(m_pItem != VMA_NULL)
    2662  {
    2663  m_pItem = m_pItem->pPrev;
    2664  }
    2665  else
    2666  {
    2667  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2668  m_pItem = m_pList->Back();
    2669  }
    2670  return *this;
    2671  }
    2672 
    2673  const_iterator operator++(int)
    2674  {
    2675  const_iterator result = *this;
    2676  ++*this;
    2677  return result;
    2678  }
    2679  const_iterator operator--(int)
    2680  {
    2681  const_iterator result = *this;
    2682  --*this;
    2683  return result;
    2684  }
    2685 
    2686  bool operator==(const const_iterator& rhs) const
    2687  {
    2688  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2689  return m_pItem == rhs.m_pItem;
    2690  }
    2691  bool operator!=(const const_iterator& rhs) const
    2692  {
    2693  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2694  return m_pItem != rhs.m_pItem;
    2695  }
    2696 
    2697  private:
    2698  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2699  m_pList(pList),
    2700  m_pItem(pItem)
    2701  {
    2702  }
    2703 
    2704  const VmaRawList<T>* m_pList;
    2705  const VmaListItem<T>* m_pItem;
    2706 
    2707  friend class VmaList<T, AllocatorT>;
    2708  };
    2709 
    2710  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2711 
    2712  bool empty() const { return m_RawList.IsEmpty(); }
    2713  size_t size() const { return m_RawList.GetCount(); }
    2714 
    2715  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2716  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2717 
    2718  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2719  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2720 
    2721  void clear() { m_RawList.Clear(); }
    2722  void push_back(const T& value) { m_RawList.PushBack(value); }
    2723  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2724  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2725 
    2726 private:
    2727  VmaRawList<T> m_RawList;
    2728 };
    2729 
    2730 #endif // #if VMA_USE_STL_LIST
    2731 
    2733 // class VmaMap
    2734 
    2735 // Unused in this version.
    2736 #if 0
    2737 
    2738 #if VMA_USE_STL_UNORDERED_MAP
    2739 
    2740 #define VmaPair std::pair
    2741 
    2742 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2743  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2744 
    2745 #else // #if VMA_USE_STL_UNORDERED_MAP
    2746 
    2747 template<typename T1, typename T2>
    2748 struct VmaPair
    2749 {
    2750  T1 first;
    2751  T2 second;
    2752 
    2753  VmaPair() : first(), second() { }
    2754  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2755 };
    2756 
    2757 /* Class compatible with subset of interface of std::unordered_map.
    2758 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2759 */
    2760 template<typename KeyT, typename ValueT>
    2761 class VmaMap
    2762 {
    2763 public:
    2764  typedef VmaPair<KeyT, ValueT> PairType;
    2765  typedef PairType* iterator;
    2766 
    2767  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2768 
    2769  iterator begin() { return m_Vector.begin(); }
    2770  iterator end() { return m_Vector.end(); }
    2771 
    2772  void insert(const PairType& pair);
    2773  iterator find(const KeyT& key);
    2774  void erase(iterator it);
    2775 
    2776 private:
    2777  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2778 };
    2779 
    2780 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2781 
    2782 template<typename FirstT, typename SecondT>
    2783 struct VmaPairFirstLess
    2784 {
    2785  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2786  {
    2787  return lhs.first < rhs.first;
    2788  }
    2789  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2790  {
    2791  return lhs.first < rhsFirst;
    2792  }
    2793 };
    2794 
    2795 template<typename KeyT, typename ValueT>
    2796 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2797 {
    2798  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2799  m_Vector.data(),
    2800  m_Vector.data() + m_Vector.size(),
    2801  pair,
    2802  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2803  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2804 }
    2805 
    2806 template<typename KeyT, typename ValueT>
    2807 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2808 {
    2809  PairType* it = VmaBinaryFindFirstNotLess(
    2810  m_Vector.data(),
    2811  m_Vector.data() + m_Vector.size(),
    2812  key,
    2813  VmaPairFirstLess<KeyT, ValueT>());
    2814  if((it != m_Vector.end()) && (it->first == key))
    2815  {
    2816  return it;
    2817  }
    2818  else
    2819  {
    2820  return m_Vector.end();
    2821  }
    2822 }
    2823 
    2824 template<typename KeyT, typename ValueT>
    2825 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2826 {
    2827  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2828 }
    2829 
    2830 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2831 
    2832 #endif // #if 0
    2833 
    2835 
    2836 class VmaDeviceMemoryBlock;
    2837 
    2838 enum VMA_BLOCK_VECTOR_TYPE
    2839 {
    2840  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2841  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2842  VMA_BLOCK_VECTOR_TYPE_COUNT
    2843 };
    2844 
    2845 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2846 {
    2847  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2848  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2849  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2850 }
    2851 
    2852 struct VmaAllocation_T
    2853 {
    2854 public:
    2855  enum ALLOCATION_TYPE
    2856  {
    2857  ALLOCATION_TYPE_NONE,
    2858  ALLOCATION_TYPE_BLOCK,
    2859  ALLOCATION_TYPE_DEDICATED,
    2860  };
    2861 
    2862  VmaAllocation_T(uint32_t currentFrameIndex) :
    2863  m_Alignment(1),
    2864  m_Size(0),
    2865  m_pUserData(VMA_NULL),
    2866  m_Type(ALLOCATION_TYPE_NONE),
    2867  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2868  m_LastUseFrameIndex(currentFrameIndex)
    2869  {
    2870  }
    2871 
    2872  void InitBlockAllocation(
    2873  VmaPool hPool,
    2874  VmaDeviceMemoryBlock* block,
    2875  VkDeviceSize offset,
    2876  VkDeviceSize alignment,
    2877  VkDeviceSize size,
    2878  VmaSuballocationType suballocationType,
    2879  void* pUserData,
    2880  bool canBecomeLost)
    2881  {
    2882  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2883  VMA_ASSERT(block != VMA_NULL);
    2884  m_Type = ALLOCATION_TYPE_BLOCK;
    2885  m_Alignment = alignment;
    2886  m_Size = size;
    2887  m_pUserData = pUserData;
    2888  m_SuballocationType = suballocationType;
    2889  m_BlockAllocation.m_hPool = hPool;
    2890  m_BlockAllocation.m_Block = block;
    2891  m_BlockAllocation.m_Offset = offset;
    2892  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2893  }
    2894 
    2895  void InitLost()
    2896  {
    2897  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2898  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2899  m_Type = ALLOCATION_TYPE_BLOCK;
    2900  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2901  m_BlockAllocation.m_Block = VMA_NULL;
    2902  m_BlockAllocation.m_Offset = 0;
    2903  m_BlockAllocation.m_CanBecomeLost = true;
    2904  }
    2905 
    2906  void ChangeBlockAllocation(
    2907  VmaDeviceMemoryBlock* block,
    2908  VkDeviceSize offset)
    2909  {
    2910  VMA_ASSERT(block != VMA_NULL);
    2911  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2912  m_BlockAllocation.m_Block = block;
    2913  m_BlockAllocation.m_Offset = offset;
    2914  }
    2915 
    2916  void InitDedicatedAllocation(
    2917  uint32_t memoryTypeIndex,
    2918  VkDeviceMemory hMemory,
    2919  VmaSuballocationType suballocationType,
    2920  bool persistentMap,
    2921  void* pMappedData,
    2922  VkDeviceSize size,
    2923  void* pUserData)
    2924  {
    2925  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2926  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    2927  m_Type = ALLOCATION_TYPE_DEDICATED;
    2928  m_Alignment = 0;
    2929  m_Size = size;
    2930  m_pUserData = pUserData;
    2931  m_SuballocationType = suballocationType;
    2932  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    2933  m_DedicatedAllocation.m_hMemory = hMemory;
    2934  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    2935  m_DedicatedAllocation.m_pMappedData = pMappedData;
    2936  }
    2937 
    2938  ALLOCATION_TYPE GetType() const { return m_Type; }
    2939  VkDeviceSize GetAlignment() const { return m_Alignment; }
    2940  VkDeviceSize GetSize() const { return m_Size; }
    2941  void* GetUserData() const { return m_pUserData; }
    2942  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    2943  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    2944 
    2945  VmaDeviceMemoryBlock* GetBlock() const
    2946  {
    2947  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2948  return m_BlockAllocation.m_Block;
    2949  }
    2950  VkDeviceSize GetOffset() const;
    2951  VkDeviceMemory GetMemory() const;
    2952  uint32_t GetMemoryTypeIndex() const;
    2953  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    2954  void* GetMappedData() const;
    2955  bool CanBecomeLost() const;
    2956  VmaPool GetPool() const;
    2957 
    2958  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2959  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2960 
    2961  uint32_t GetLastUseFrameIndex() const
    2962  {
    2963  return m_LastUseFrameIndex.load();
    2964  }
    2965  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    2966  {
    2967  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    2968  }
    2969  /*
    2970  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    2971  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    2972  - Else, returns false.
    2973 
    2974  If hAllocation is already lost, assert - you should not call it then.
    2975  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    2976  */
    2977  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    2978 
    2979  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    2980  {
    2981  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    2982  outInfo.blockCount = 1;
    2983  outInfo.allocationCount = 1;
    2984  outInfo.unusedRangeCount = 0;
    2985  outInfo.usedBytes = m_Size;
    2986  outInfo.unusedBytes = 0;
    2987  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    2988  outInfo.unusedRangeSizeMin = UINT64_MAX;
    2989  outInfo.unusedRangeSizeMax = 0;
    2990  }
    2991 
    2992 private:
    2993  VkDeviceSize m_Alignment;
    2994  VkDeviceSize m_Size;
    2995  void* m_pUserData;
    2996  ALLOCATION_TYPE m_Type;
    2997  VmaSuballocationType m_SuballocationType;
    2998  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    2999 
    3000  // Allocation out of VmaDeviceMemoryBlock.
    3001  struct BlockAllocation
    3002  {
    3003  VmaPool m_hPool; // Null if belongs to general memory.
    3004  VmaDeviceMemoryBlock* m_Block;
    3005  VkDeviceSize m_Offset;
    3006  bool m_CanBecomeLost;
    3007  };
    3008 
    3009  // Allocation for an object that has its own private VkDeviceMemory.
    3010  struct DedicatedAllocation
    3011  {
    3012  uint32_t m_MemoryTypeIndex;
    3013  VkDeviceMemory m_hMemory;
    3014  bool m_PersistentMap;
    3015  void* m_pMappedData;
    3016  };
    3017 
    3018  union
    3019  {
    3020  // Allocation out of VmaDeviceMemoryBlock.
    3021  BlockAllocation m_BlockAllocation;
    3022  // Allocation for an object that has its own private VkDeviceMemory.
    3023  DedicatedAllocation m_DedicatedAllocation;
    3024  };
    3025 };
    3026 
    3027 /*
    3028 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3029 allocated memory block or free.
    3030 */
    3031 struct VmaSuballocation
    3032 {
    3033  VkDeviceSize offset;
    3034  VkDeviceSize size;
    3035  VmaAllocation hAllocation;
    3036  VmaSuballocationType type;
    3037 };
    3038 
    3039 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3040 
    3041 // Cost of one additional allocation lost, as equivalent in bytes.
    3042 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3043 
    3044 /*
    3045 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3046 
    3047 If canMakeOtherLost was false:
    3048 - item points to a FREE suballocation.
    3049 - itemsToMakeLostCount is 0.
    3050 
    3051 If canMakeOtherLost was true:
    3052 - item points to first of sequence of suballocations, which are either FREE,
    3053  or point to VmaAllocations that can become lost.
    3054 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3055  the requested allocation to succeed.
    3056 */
    3057 struct VmaAllocationRequest
    3058 {
    3059  VkDeviceSize offset;
    3060  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3061  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3062  VmaSuballocationList::iterator item;
    3063  size_t itemsToMakeLostCount;
    3064 
    3065  VkDeviceSize CalcCost() const
    3066  {
    3067  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3068  }
    3069 };
    3070 
    3071 /*
    3072 Data structure used for bookkeeping of allocations and unused ranges of memory
    3073 in a single VkDeviceMemory block.
    3074 */
    3075 class VmaBlockMetadata
    3076 {
    3077 public:
    3078  VmaBlockMetadata(VmaAllocator hAllocator);
    3079  ~VmaBlockMetadata();
    3080  void Init(VkDeviceSize size);
    3081 
    3082  // Validates all data structures inside this object. If not valid, returns false.
    3083  bool Validate() const;
    3084  VkDeviceSize GetSize() const { return m_Size; }
    3085  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3086  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3087  VkDeviceSize GetUnusedRangeSizeMax() const;
    3088  // Returns true if this block is empty - contains only single free suballocation.
    3089  bool IsEmpty() const;
    3090 
    3091  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3092  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3093 
    3094 #if VMA_STATS_STRING_ENABLED
    3095  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3096 #endif
    3097 
    3098  // Creates trivial request for case when block is empty.
    3099  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3100 
    3101  // Tries to find a place for suballocation with given parameters inside this block.
    3102  // If succeeded, fills pAllocationRequest and returns true.
    3103  // If failed, returns false.
    3104  bool CreateAllocationRequest(
    3105  uint32_t currentFrameIndex,
    3106  uint32_t frameInUseCount,
    3107  VkDeviceSize bufferImageGranularity,
    3108  VkDeviceSize allocSize,
    3109  VkDeviceSize allocAlignment,
    3110  VmaSuballocationType allocType,
    3111  bool canMakeOtherLost,
    3112  VmaAllocationRequest* pAllocationRequest);
    3113 
    3114  bool MakeRequestedAllocationsLost(
    3115  uint32_t currentFrameIndex,
    3116  uint32_t frameInUseCount,
    3117  VmaAllocationRequest* pAllocationRequest);
    3118 
    3119  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3120 
    3121  // Makes actual allocation based on request. Request must already be checked and valid.
    3122  void Alloc(
    3123  const VmaAllocationRequest& request,
    3124  VmaSuballocationType type,
    3125  VkDeviceSize allocSize,
    3126  VmaAllocation hAllocation);
    3127 
    3128  // Frees suballocation assigned to given memory region.
    3129  void Free(const VmaAllocation allocation);
    3130 
    3131 private:
    3132  VkDeviceSize m_Size;
    3133  uint32_t m_FreeCount;
    3134  VkDeviceSize m_SumFreeSize;
    3135  VmaSuballocationList m_Suballocations;
    3136  // Suballocations that are free and have size greater than certain threshold.
    3137  // Sorted by size, ascending.
    3138  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3139 
    3140  bool ValidateFreeSuballocationList() const;
    3141 
    3142  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3143  // If yes, fills pOffset and returns true. If no, returns false.
    3144  bool CheckAllocation(
    3145  uint32_t currentFrameIndex,
    3146  uint32_t frameInUseCount,
    3147  VkDeviceSize bufferImageGranularity,
    3148  VkDeviceSize allocSize,
    3149  VkDeviceSize allocAlignment,
    3150  VmaSuballocationType allocType,
    3151  VmaSuballocationList::const_iterator suballocItem,
    3152  bool canMakeOtherLost,
    3153  VkDeviceSize* pOffset,
    3154  size_t* itemsToMakeLostCount,
    3155  VkDeviceSize* pSumFreeSize,
    3156  VkDeviceSize* pSumItemSize) const;
    3157  // Given free suballocation, it merges it with following one, which must also be free.
    3158  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3159  // Releases given suballocation, making it free.
    3160  // Merges it with adjacent free suballocations if applicable.
    3161  // Returns iterator to new free suballocation at this place.
    3162  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3163  // Given free suballocation, it inserts it into sorted list of
    3164  // m_FreeSuballocationsBySize if it's suitable.
    3165  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3166  // Given free suballocation, it removes it from sorted list of
    3167  // m_FreeSuballocationsBySize if it's suitable.
    3168  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3169 };
    3170 
    3171 /*
    3172 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3173 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3174 
    3175 Thread-safety: This class must be externally synchronized.
    3176 */
    3177 class VmaDeviceMemoryBlock
    3178 {
    3179 public:
    3180  uint32_t m_MemoryTypeIndex;
    3181  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3182  VkDeviceMemory m_hMemory;
    3183  bool m_PersistentMap;
    3184  void* m_pMappedData;
    3185  VmaBlockMetadata m_Metadata;
    3186 
    3187  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3188 
    3189  ~VmaDeviceMemoryBlock()
    3190  {
    3191  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3192  }
    3193 
    3194  // Always call after construction.
    3195  void Init(
    3196  uint32_t newMemoryTypeIndex,
    3197  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3198  VkDeviceMemory newMemory,
    3199  VkDeviceSize newSize,
    3200  bool persistentMap,
    3201  void* pMappedData);
    3202  // Always call before destruction.
    3203  void Destroy(VmaAllocator allocator);
    3204 
    3205  // Validates all data structures inside this object. If not valid, returns false.
    3206  bool Validate() const;
    3207 };
    3208 
    3209 struct VmaPointerLess
    3210 {
    3211  bool operator()(const void* lhs, const void* rhs) const
    3212  {
    3213  return lhs < rhs;
    3214  }
    3215 };
    3216 
    3217 class VmaDefragmentator;
    3218 
    3219 /*
    3220 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3221 Vulkan memory type.
    3222 
    3223 Synchronized internally with a mutex.
    3224 */
    3225 struct VmaBlockVector
    3226 {
    3227  VmaBlockVector(
    3228  VmaAllocator hAllocator,
    3229  uint32_t memoryTypeIndex,
    3230  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3231  VkDeviceSize preferredBlockSize,
    3232  size_t minBlockCount,
    3233  size_t maxBlockCount,
    3234  VkDeviceSize bufferImageGranularity,
    3235  uint32_t frameInUseCount,
    3236  bool isCustomPool);
    3237  ~VmaBlockVector();
    3238 
    3239  VkResult CreateMinBlocks();
    3240 
    3241  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3242  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3243  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3244  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3245  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3246 
    3247  void GetPoolStats(VmaPoolStats* pStats);
    3248 
    3249  bool IsEmpty() const { return m_Blocks.empty(); }
    3250 
    3251  VkResult Allocate(
    3252  VmaPool hCurrentPool,
    3253  uint32_t currentFrameIndex,
    3254  const VkMemoryRequirements& vkMemReq,
    3255  const VmaAllocationCreateInfo& createInfo,
    3256  VmaSuballocationType suballocType,
    3257  VmaAllocation* pAllocation);
    3258 
    3259  void Free(
    3260  VmaAllocation hAllocation);
    3261 
    3262  // Adds statistics of this BlockVector to pStats.
    3263  void AddStats(VmaStats* pStats);
    3264 
    3265 #if VMA_STATS_STRING_ENABLED
    3266  void PrintDetailedMap(class VmaJsonWriter& json);
    3267 #endif
    3268 
    3269  void UnmapPersistentlyMappedMemory();
    3270  VkResult MapPersistentlyMappedMemory();
    3271 
    3272  void MakePoolAllocationsLost(
    3273  uint32_t currentFrameIndex,
    3274  size_t* pLostAllocationCount);
    3275 
    3276  VmaDefragmentator* EnsureDefragmentator(
    3277  VmaAllocator hAllocator,
    3278  uint32_t currentFrameIndex);
    3279 
    3280  VkResult Defragment(
    3281  VmaDefragmentationStats* pDefragmentationStats,
    3282  VkDeviceSize& maxBytesToMove,
    3283  uint32_t& maxAllocationsToMove);
    3284 
    3285  void DestroyDefragmentator();
    3286 
    3287 private:
    3288  friend class VmaDefragmentator;
    3289 
    3290  const VmaAllocator m_hAllocator;
    3291  const uint32_t m_MemoryTypeIndex;
    3292  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3293  const VkDeviceSize m_PreferredBlockSize;
    3294  const size_t m_MinBlockCount;
    3295  const size_t m_MaxBlockCount;
    3296  const VkDeviceSize m_BufferImageGranularity;
    3297  const uint32_t m_FrameInUseCount;
    3298  const bool m_IsCustomPool;
    3299  VMA_MUTEX m_Mutex;
    3300  // Incrementally sorted by sumFreeSize, ascending.
    3301  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3302  /* There can be at most one allocation that is completely empty - a
    3303  hysteresis to avoid pessimistic case of alternating creation and destruction
    3304  of a VkDeviceMemory. */
    3305  bool m_HasEmptyBlock;
    3306  VmaDefragmentator* m_pDefragmentator;
    3307 
    3308  // Finds and removes given block from vector.
    3309  void Remove(VmaDeviceMemoryBlock* pBlock);
    3310 
    3311  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3312  // after this call.
    3313  void IncrementallySortBlocks();
    3314 
    3315  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3316 };
    3317 
    3318 struct VmaPool_T
    3319 {
    3320 public:
    3321  VmaBlockVector m_BlockVector;
    3322 
    3323  // Takes ownership.
    3324  VmaPool_T(
    3325  VmaAllocator hAllocator,
    3326  const VmaPoolCreateInfo& createInfo);
    3327  ~VmaPool_T();
    3328 
    3329  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3330 
    3331 #if VMA_STATS_STRING_ENABLED
    3332  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3333 #endif
    3334 };
    3335 
    3336 class VmaDefragmentator
    3337 {
    3338  const VmaAllocator m_hAllocator;
    3339  VmaBlockVector* const m_pBlockVector;
    3340  uint32_t m_CurrentFrameIndex;
    3341  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3342  VkDeviceSize m_BytesMoved;
    3343  uint32_t m_AllocationsMoved;
    3344 
    3345  struct AllocationInfo
    3346  {
    3347  VmaAllocation m_hAllocation;
    3348  VkBool32* m_pChanged;
    3349 
    3350  AllocationInfo() :
    3351  m_hAllocation(VK_NULL_HANDLE),
    3352  m_pChanged(VMA_NULL)
    3353  {
    3354  }
    3355  };
    3356 
    3357  struct AllocationInfoSizeGreater
    3358  {
    3359  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3360  {
    3361  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3362  }
    3363  };
    3364 
    3365  // Used between AddAllocation and Defragment.
    3366  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3367 
    3368  struct BlockInfo
    3369  {
    3370  VmaDeviceMemoryBlock* m_pBlock;
    3371  bool m_HasNonMovableAllocations;
    3372  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3373 
    3374  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3375  m_pBlock(VMA_NULL),
    3376  m_HasNonMovableAllocations(true),
    3377  m_Allocations(pAllocationCallbacks),
    3378  m_pMappedDataForDefragmentation(VMA_NULL)
    3379  {
    3380  }
    3381 
    3382  void CalcHasNonMovableAllocations()
    3383  {
    3384  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3385  const size_t defragmentAllocCount = m_Allocations.size();
    3386  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3387  }
    3388 
    3389  void SortAllocationsBySizeDescecnding()
    3390  {
    3391  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3392  }
    3393 
    3394  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3395  void Unmap(VmaAllocator hAllocator);
    3396 
    3397  private:
    3398  // Not null if mapped for defragmentation only, not persistently mapped.
    3399  void* m_pMappedDataForDefragmentation;
    3400  };
    3401 
    3402  struct BlockPointerLess
    3403  {
    3404  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3405  {
    3406  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3407  }
    3408  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3409  {
    3410  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3411  }
    3412  };
    3413 
    3414  // 1. Blocks with some non-movable allocations go first.
    3415  // 2. Blocks with smaller sumFreeSize go first.
    3416  struct BlockInfoCompareMoveDestination
    3417  {
    3418  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3419  {
    3420  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3421  {
    3422  return true;
    3423  }
    3424  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3425  {
    3426  return false;
    3427  }
    3428  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3429  {
    3430  return true;
    3431  }
    3432  return false;
    3433  }
    3434  };
    3435 
    3436  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3437  BlockInfoVector m_Blocks;
    3438 
    3439  VkResult DefragmentRound(
    3440  VkDeviceSize maxBytesToMove,
    3441  uint32_t maxAllocationsToMove);
    3442 
    3443  static bool MoveMakesSense(
    3444  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3445  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3446 
    3447 public:
    3448  VmaDefragmentator(
    3449  VmaAllocator hAllocator,
    3450  VmaBlockVector* pBlockVector,
    3451  uint32_t currentFrameIndex);
    3452 
    3453  ~VmaDefragmentator();
    3454 
    3455  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3456  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3457 
    3458  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3459 
    3460  VkResult Defragment(
    3461  VkDeviceSize maxBytesToMove,
    3462  uint32_t maxAllocationsToMove);
    3463 };
    3464 
    3465 // Main allocator object.
    3466 struct VmaAllocator_T
    3467 {
    3468  bool m_UseMutex;
    3469  bool m_UseKhrDedicatedAllocation;
    3470  VkDevice m_hDevice;
    3471  bool m_AllocationCallbacksSpecified;
    3472  VkAllocationCallbacks m_AllocationCallbacks;
    3473  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3474  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3475  // Counter to allow nested calls to these functions.
    3476  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3477 
    3478  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3479  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3480  VMA_MUTEX m_HeapSizeLimitMutex;
    3481 
    3482  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3483  VkPhysicalDeviceMemoryProperties m_MemProps;
    3484 
    3485  // Default pools.
    3486  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3487 
    3488  // Each vector is sorted by memory (handle value).
    3489  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3490  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3491  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3492 
    3493  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3494  ~VmaAllocator_T();
    3495 
    3496  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3497  {
    3498  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3499  }
    3500  const VmaVulkanFunctions& GetVulkanFunctions() const
    3501  {
    3502  return m_VulkanFunctions;
    3503  }
    3504 
    3505  VkDeviceSize GetBufferImageGranularity() const
    3506  {
    3507  return VMA_MAX(
    3508  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3509  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3510  }
    3511 
    3512  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3513  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3514 
    3515  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3516  {
    3517  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3518  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3519  }
    3520 
    3521  void GetBufferMemoryRequirements(
    3522  VkBuffer hBuffer,
    3523  VkMemoryRequirements& memReq,
    3524  bool& dedicatedAllocation) const;
    3525  void GetImageMemoryRequirements(
    3526  VkImage hImage,
    3527  VkMemoryRequirements& memReq,
    3528  bool& dedicatedAllocation) const;
    3529 
    3530  // Main allocation function.
    3531  VkResult AllocateMemory(
    3532  const VkMemoryRequirements& vkMemReq,
    3533  bool dedicatedAllocation,
    3534  const VmaAllocationCreateInfo& createInfo,
    3535  VmaSuballocationType suballocType,
    3536  VmaAllocation* pAllocation);
    3537 
    3538  // Main deallocation function.
    3539  void FreeMemory(const VmaAllocation allocation);
    3540 
    3541  void CalculateStats(VmaStats* pStats);
    3542 
    3543 #if VMA_STATS_STRING_ENABLED
    3544  void PrintDetailedMap(class VmaJsonWriter& json);
    3545 #endif
    3546 
    3547  void UnmapPersistentlyMappedMemory();
    3548  VkResult MapPersistentlyMappedMemory();
    3549 
    3550  VkResult Defragment(
    3551  VmaAllocation* pAllocations,
    3552  size_t allocationCount,
    3553  VkBool32* pAllocationsChanged,
    3554  const VmaDefragmentationInfo* pDefragmentationInfo,
    3555  VmaDefragmentationStats* pDefragmentationStats);
    3556 
    3557  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3558 
    3559  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3560  void DestroyPool(VmaPool pool);
    3561  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3562 
    3563  void SetCurrentFrameIndex(uint32_t frameIndex);
    3564 
    3565  void MakePoolAllocationsLost(
    3566  VmaPool hPool,
    3567  size_t* pLostAllocationCount);
    3568 
    3569  void CreateLostAllocation(VmaAllocation* pAllocation);
    3570 
    3571  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3572  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3573 
    3574 private:
    3575  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3576  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3577 
    3578  VkPhysicalDevice m_PhysicalDevice;
    3579  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3580 
    3581  VMA_MUTEX m_PoolsMutex;
    3582  // Protected by m_PoolsMutex. Sorted by pointer value.
    3583  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3584 
    3585  VmaVulkanFunctions m_VulkanFunctions;
    3586 
    3587  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3588 
    3589  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3590 
    3591  VkResult AllocateMemoryOfType(
    3592  const VkMemoryRequirements& vkMemReq,
    3593  bool dedicatedAllocation,
    3594  const VmaAllocationCreateInfo& createInfo,
    3595  uint32_t memTypeIndex,
    3596  VmaSuballocationType suballocType,
    3597  VmaAllocation* pAllocation);
    3598 
    3599  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3600  VkResult AllocateDedicatedMemory(
    3601  VkDeviceSize size,
    3602  VmaSuballocationType suballocType,
    3603  uint32_t memTypeIndex,
    3604  bool map,
    3605  void* pUserData,
    3606  VmaAllocation* pAllocation);
    3607 
    3608  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3609  void FreeDedicatedMemory(VmaAllocation allocation);
    3610 };
    3611 
    3613 // Memory allocation #2 after VmaAllocator_T definition
    3614 
    3615 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3616 {
    3617  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3618 }
    3619 
    3620 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3621 {
    3622  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3623 }
    3624 
    3625 template<typename T>
    3626 static T* VmaAllocate(VmaAllocator hAllocator)
    3627 {
    3628  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3629 }
    3630 
    3631 template<typename T>
    3632 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3633 {
    3634  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3635 }
    3636 
    3637 template<typename T>
    3638 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3639 {
    3640  if(ptr != VMA_NULL)
    3641  {
    3642  ptr->~T();
    3643  VmaFree(hAllocator, ptr);
    3644  }
    3645 }
    3646 
    3647 template<typename T>
    3648 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3649 {
    3650  if(ptr != VMA_NULL)
    3651  {
    3652  for(size_t i = count; i--; )
    3653  ptr[i].~T();
    3654  VmaFree(hAllocator, ptr);
    3655  }
    3656 }
    3657 
    3659 // VmaStringBuilder
    3660 
    3661 #if VMA_STATS_STRING_ENABLED
    3662 
    3663 class VmaStringBuilder
    3664 {
    3665 public:
    3666  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3667  size_t GetLength() const { return m_Data.size(); }
    3668  const char* GetData() const { return m_Data.data(); }
    3669 
    3670  void Add(char ch) { m_Data.push_back(ch); }
    3671  void Add(const char* pStr);
    3672  void AddNewLine() { Add('\n'); }
    3673  void AddNumber(uint32_t num);
    3674  void AddNumber(uint64_t num);
    3675  void AddPointer(const void* ptr);
    3676 
    3677 private:
    3678  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3679 };
    3680 
    3681 void VmaStringBuilder::Add(const char* pStr)
    3682 {
    3683  const size_t strLen = strlen(pStr);
    3684  if(strLen > 0)
    3685  {
    3686  const size_t oldCount = m_Data.size();
    3687  m_Data.resize(oldCount + strLen);
    3688  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3689  }
    3690 }
    3691 
    3692 void VmaStringBuilder::AddNumber(uint32_t num)
    3693 {
    3694  char buf[11];
    3695  VmaUint32ToStr(buf, sizeof(buf), num);
    3696  Add(buf);
    3697 }
    3698 
    3699 void VmaStringBuilder::AddNumber(uint64_t num)
    3700 {
    3701  char buf[21];
    3702  VmaUint64ToStr(buf, sizeof(buf), num);
    3703  Add(buf);
    3704 }
    3705 
    3706 void VmaStringBuilder::AddPointer(const void* ptr)
    3707 {
    3708  char buf[21];
    3709  VmaPtrToStr(buf, sizeof(buf), ptr);
    3710  Add(buf);
    3711 }
    3712 
    3713 #endif // #if VMA_STATS_STRING_ENABLED
    3714 
    3716 // VmaJsonWriter
    3717 
    3718 #if VMA_STATS_STRING_ENABLED
    3719 
    3720 class VmaJsonWriter
    3721 {
    3722 public:
    3723  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3724  ~VmaJsonWriter();
    3725 
    3726  void BeginObject(bool singleLine = false);
    3727  void EndObject();
    3728 
    3729  void BeginArray(bool singleLine = false);
    3730  void EndArray();
    3731 
    3732  void WriteString(const char* pStr);
    3733  void BeginString(const char* pStr = VMA_NULL);
    3734  void ContinueString(const char* pStr);
    3735  void ContinueString(uint32_t n);
    3736  void ContinueString(uint64_t n);
    3737  void EndString(const char* pStr = VMA_NULL);
    3738 
    3739  void WriteNumber(uint32_t n);
    3740  void WriteNumber(uint64_t n);
    3741  void WriteBool(bool b);
    3742  void WriteNull();
    3743 
    3744 private:
    3745  static const char* const INDENT;
    3746 
    3747  enum COLLECTION_TYPE
    3748  {
    3749  COLLECTION_TYPE_OBJECT,
    3750  COLLECTION_TYPE_ARRAY,
    3751  };
    3752  struct StackItem
    3753  {
    3754  COLLECTION_TYPE type;
    3755  uint32_t valueCount;
    3756  bool singleLineMode;
    3757  };
    3758 
    3759  VmaStringBuilder& m_SB;
    3760  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3761  bool m_InsideString;
    3762 
    3763  void BeginValue(bool isString);
    3764  void WriteIndent(bool oneLess = false);
    3765 };
    3766 
    3767 const char* const VmaJsonWriter::INDENT = " ";
    3768 
    3769 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3770  m_SB(sb),
    3771  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3772  m_InsideString(false)
    3773 {
    3774 }
    3775 
    3776 VmaJsonWriter::~VmaJsonWriter()
    3777 {
    3778  VMA_ASSERT(!m_InsideString);
    3779  VMA_ASSERT(m_Stack.empty());
    3780 }
    3781 
    3782 void VmaJsonWriter::BeginObject(bool singleLine)
    3783 {
    3784  VMA_ASSERT(!m_InsideString);
    3785 
    3786  BeginValue(false);
    3787  m_SB.Add('{');
    3788 
    3789  StackItem item;
    3790  item.type = COLLECTION_TYPE_OBJECT;
    3791  item.valueCount = 0;
    3792  item.singleLineMode = singleLine;
    3793  m_Stack.push_back(item);
    3794 }
    3795 
    3796 void VmaJsonWriter::EndObject()
    3797 {
    3798  VMA_ASSERT(!m_InsideString);
    3799 
    3800  WriteIndent(true);
    3801  m_SB.Add('}');
    3802 
    3803  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3804  m_Stack.pop_back();
    3805 }
    3806 
    3807 void VmaJsonWriter::BeginArray(bool singleLine)
    3808 {
    3809  VMA_ASSERT(!m_InsideString);
    3810 
    3811  BeginValue(false);
    3812  m_SB.Add('[');
    3813 
    3814  StackItem item;
    3815  item.type = COLLECTION_TYPE_ARRAY;
    3816  item.valueCount = 0;
    3817  item.singleLineMode = singleLine;
    3818  m_Stack.push_back(item);
    3819 }
    3820 
    3821 void VmaJsonWriter::EndArray()
    3822 {
    3823  VMA_ASSERT(!m_InsideString);
    3824 
    3825  WriteIndent(true);
    3826  m_SB.Add(']');
    3827 
    3828  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3829  m_Stack.pop_back();
    3830 }
    3831 
    3832 void VmaJsonWriter::WriteString(const char* pStr)
    3833 {
    3834  BeginString(pStr);
    3835  EndString();
    3836 }
    3837 
    3838 void VmaJsonWriter::BeginString(const char* pStr)
    3839 {
    3840  VMA_ASSERT(!m_InsideString);
    3841 
    3842  BeginValue(true);
    3843  m_SB.Add('"');
    3844  m_InsideString = true;
    3845  if(pStr != VMA_NULL && pStr[0] != '\0')
    3846  {
    3847  ContinueString(pStr);
    3848  }
    3849 }
    3850 
    3851 void VmaJsonWriter::ContinueString(const char* pStr)
    3852 {
    3853  VMA_ASSERT(m_InsideString);
    3854 
    3855  const size_t strLen = strlen(pStr);
    3856  for(size_t i = 0; i < strLen; ++i)
    3857  {
    3858  char ch = pStr[i];
    3859  if(ch == '\'')
    3860  {
    3861  m_SB.Add("\\\\");
    3862  }
    3863  else if(ch == '"')
    3864  {
    3865  m_SB.Add("\\\"");
    3866  }
    3867  else if(ch >= 32)
    3868  {
    3869  m_SB.Add(ch);
    3870  }
    3871  else switch(ch)
    3872  {
    3873  case '\n':
    3874  m_SB.Add("\\n");
    3875  break;
    3876  case '\r':
    3877  m_SB.Add("\\r");
    3878  break;
    3879  case '\t':
    3880  m_SB.Add("\\t");
    3881  break;
    3882  default:
    3883  VMA_ASSERT(0 && "Character not currently supported.");
    3884  break;
    3885  }
    3886  }
    3887 }
    3888 
    3889 void VmaJsonWriter::ContinueString(uint32_t n)
    3890 {
    3891  VMA_ASSERT(m_InsideString);
    3892  m_SB.AddNumber(n);
    3893 }
    3894 
    3895 void VmaJsonWriter::ContinueString(uint64_t n)
    3896 {
    3897  VMA_ASSERT(m_InsideString);
    3898  m_SB.AddNumber(n);
    3899 }
    3900 
    3901 void VmaJsonWriter::EndString(const char* pStr)
    3902 {
    3903  VMA_ASSERT(m_InsideString);
    3904  if(pStr != VMA_NULL && pStr[0] != '\0')
    3905  {
    3906  ContinueString(pStr);
    3907  }
    3908  m_SB.Add('"');
    3909  m_InsideString = false;
    3910 }
    3911 
    3912 void VmaJsonWriter::WriteNumber(uint32_t n)
    3913 {
    3914  VMA_ASSERT(!m_InsideString);
    3915  BeginValue(false);
    3916  m_SB.AddNumber(n);
    3917 }
    3918 
    3919 void VmaJsonWriter::WriteNumber(uint64_t n)
    3920 {
    3921  VMA_ASSERT(!m_InsideString);
    3922  BeginValue(false);
    3923  m_SB.AddNumber(n);
    3924 }
    3925 
    3926 void VmaJsonWriter::WriteBool(bool b)
    3927 {
    3928  VMA_ASSERT(!m_InsideString);
    3929  BeginValue(false);
    3930  m_SB.Add(b ? "true" : "false");
    3931 }
    3932 
    3933 void VmaJsonWriter::WriteNull()
    3934 {
    3935  VMA_ASSERT(!m_InsideString);
    3936  BeginValue(false);
    3937  m_SB.Add("null");
    3938 }
    3939 
    3940 void VmaJsonWriter::BeginValue(bool isString)
    3941 {
    3942  if(!m_Stack.empty())
    3943  {
    3944  StackItem& currItem = m_Stack.back();
    3945  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3946  currItem.valueCount % 2 == 0)
    3947  {
    3948  VMA_ASSERT(isString);
    3949  }
    3950 
    3951  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3952  currItem.valueCount % 2 != 0)
    3953  {
    3954  m_SB.Add(": ");
    3955  }
    3956  else if(currItem.valueCount > 0)
    3957  {
    3958  m_SB.Add(", ");
    3959  WriteIndent();
    3960  }
    3961  else
    3962  {
    3963  WriteIndent();
    3964  }
    3965  ++currItem.valueCount;
    3966  }
    3967 }
    3968 
    3969 void VmaJsonWriter::WriteIndent(bool oneLess)
    3970 {
    3971  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    3972  {
    3973  m_SB.AddNewLine();
    3974 
    3975  size_t count = m_Stack.size();
    3976  if(count > 0 && oneLess)
    3977  {
    3978  --count;
    3979  }
    3980  for(size_t i = 0; i < count; ++i)
    3981  {
    3982  m_SB.Add(INDENT);
    3983  }
    3984  }
    3985 }
    3986 
    3987 #endif // #if VMA_STATS_STRING_ENABLED
    3988 
    3990 
    3991 VkDeviceSize VmaAllocation_T::GetOffset() const
    3992 {
    3993  switch(m_Type)
    3994  {
    3995  case ALLOCATION_TYPE_BLOCK:
    3996  return m_BlockAllocation.m_Offset;
    3997  case ALLOCATION_TYPE_DEDICATED:
    3998  return 0;
    3999  default:
    4000  VMA_ASSERT(0);
    4001  return 0;
    4002  }
    4003 }
    4004 
    4005 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4006 {
    4007  switch(m_Type)
    4008  {
    4009  case ALLOCATION_TYPE_BLOCK:
    4010  return m_BlockAllocation.m_Block->m_hMemory;
    4011  case ALLOCATION_TYPE_DEDICATED:
    4012  return m_DedicatedAllocation.m_hMemory;
    4013  default:
    4014  VMA_ASSERT(0);
    4015  return VK_NULL_HANDLE;
    4016  }
    4017 }
    4018 
    4019 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4020 {
    4021  switch(m_Type)
    4022  {
    4023  case ALLOCATION_TYPE_BLOCK:
    4024  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4025  case ALLOCATION_TYPE_DEDICATED:
    4026  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4027  default:
    4028  VMA_ASSERT(0);
    4029  return UINT32_MAX;
    4030  }
    4031 }
    4032 
    4033 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4034 {
    4035  switch(m_Type)
    4036  {
    4037  case ALLOCATION_TYPE_BLOCK:
    4038  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4039  case ALLOCATION_TYPE_DEDICATED:
    4040  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4041  default:
    4042  VMA_ASSERT(0);
    4043  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4044  }
    4045 }
    4046 
    4047 void* VmaAllocation_T::GetMappedData() const
    4048 {
    4049  switch(m_Type)
    4050  {
    4051  case ALLOCATION_TYPE_BLOCK:
    4052  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4053  {
    4054  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4055  }
    4056  else
    4057  {
    4058  return VMA_NULL;
    4059  }
    4060  break;
    4061  case ALLOCATION_TYPE_DEDICATED:
    4062  return m_DedicatedAllocation.m_pMappedData;
    4063  default:
    4064  VMA_ASSERT(0);
    4065  return VMA_NULL;
    4066  }
    4067 }
    4068 
    4069 bool VmaAllocation_T::CanBecomeLost() const
    4070 {
    4071  switch(m_Type)
    4072  {
    4073  case ALLOCATION_TYPE_BLOCK:
    4074  return m_BlockAllocation.m_CanBecomeLost;
    4075  case ALLOCATION_TYPE_DEDICATED:
    4076  return false;
    4077  default:
    4078  VMA_ASSERT(0);
    4079  return false;
    4080  }
    4081 }
    4082 
    4083 VmaPool VmaAllocation_T::GetPool() const
    4084 {
    4085  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4086  return m_BlockAllocation.m_hPool;
    4087 }
    4088 
    4089 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4090 {
    4091  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4092  if(m_DedicatedAllocation.m_PersistentMap)
    4093  {
    4094  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4095  hAllocator->m_hDevice,
    4096  m_DedicatedAllocation.m_hMemory,
    4097  0,
    4098  VK_WHOLE_SIZE,
    4099  0,
    4100  &m_DedicatedAllocation.m_pMappedData);
    4101  }
    4102  return VK_SUCCESS;
    4103 }
    4104 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4105 {
    4106  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4107  if(m_DedicatedAllocation.m_pMappedData)
    4108  {
    4109  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4110  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4111  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4112  }
    4113 }
    4114 
    4115 
    4116 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4117 {
    4118  VMA_ASSERT(CanBecomeLost());
    4119 
    4120  /*
    4121  Warning: This is a carefully designed algorithm.
    4122  Do not modify unless you really know what you're doing :)
    4123  */
    4124  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4125  for(;;)
    4126  {
    4127  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4128  {
    4129  VMA_ASSERT(0);
    4130  return false;
    4131  }
    4132  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4133  {
    4134  return false;
    4135  }
    4136  else // Last use time earlier than current time.
    4137  {
    4138  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4139  {
    4140  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4141  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4142  return true;
    4143  }
    4144  }
    4145  }
    4146 }
    4147 
    4148 #if VMA_STATS_STRING_ENABLED
    4149 
    4150 // Correspond to values of enum VmaSuballocationType.
    4151 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4152  "FREE",
    4153  "UNKNOWN",
    4154  "BUFFER",
    4155  "IMAGE_UNKNOWN",
    4156  "IMAGE_LINEAR",
    4157  "IMAGE_OPTIMAL",
    4158 };
    4159 
    4160 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4161 {
    4162  json.BeginObject();
    4163 
    4164  json.WriteString("Blocks");
    4165  json.WriteNumber(stat.blockCount);
    4166 
    4167  json.WriteString("Allocations");
    4168  json.WriteNumber(stat.allocationCount);
    4169 
    4170  json.WriteString("UnusedRanges");
    4171  json.WriteNumber(stat.unusedRangeCount);
    4172 
    4173  json.WriteString("UsedBytes");
    4174  json.WriteNumber(stat.usedBytes);
    4175 
    4176  json.WriteString("UnusedBytes");
    4177  json.WriteNumber(stat.unusedBytes);
    4178 
    4179  if(stat.allocationCount > 1)
    4180  {
    4181  json.WriteString("AllocationSize");
    4182  json.BeginObject(true);
    4183  json.WriteString("Min");
    4184  json.WriteNumber(stat.allocationSizeMin);
    4185  json.WriteString("Avg");
    4186  json.WriteNumber(stat.allocationSizeAvg);
    4187  json.WriteString("Max");
    4188  json.WriteNumber(stat.allocationSizeMax);
    4189  json.EndObject();
    4190  }
    4191 
    4192  if(stat.unusedRangeCount > 1)
    4193  {
    4194  json.WriteString("UnusedRangeSize");
    4195  json.BeginObject(true);
    4196  json.WriteString("Min");
    4197  json.WriteNumber(stat.unusedRangeSizeMin);
    4198  json.WriteString("Avg");
    4199  json.WriteNumber(stat.unusedRangeSizeAvg);
    4200  json.WriteString("Max");
    4201  json.WriteNumber(stat.unusedRangeSizeMax);
    4202  json.EndObject();
    4203  }
    4204 
    4205  json.EndObject();
    4206 }
    4207 
    4208 #endif // #if VMA_STATS_STRING_ENABLED
    4209 
    4210 struct VmaSuballocationItemSizeLess
    4211 {
    4212  bool operator()(
    4213  const VmaSuballocationList::iterator lhs,
    4214  const VmaSuballocationList::iterator rhs) const
    4215  {
    4216  return lhs->size < rhs->size;
    4217  }
    4218  bool operator()(
    4219  const VmaSuballocationList::iterator lhs,
    4220  VkDeviceSize rhsSize) const
    4221  {
    4222  return lhs->size < rhsSize;
    4223  }
    4224 };
    4225 
    4227 // class VmaBlockMetadata
    4228 
    4229 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4230  m_Size(0),
    4231  m_FreeCount(0),
    4232  m_SumFreeSize(0),
    4233  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4234  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4235 {
    4236 }
    4237 
    4238 VmaBlockMetadata::~VmaBlockMetadata()
    4239 {
    4240 }
    4241 
    4242 void VmaBlockMetadata::Init(VkDeviceSize size)
    4243 {
    4244  m_Size = size;
    4245  m_FreeCount = 1;
    4246  m_SumFreeSize = size;
    4247 
    4248  VmaSuballocation suballoc = {};
    4249  suballoc.offset = 0;
    4250  suballoc.size = size;
    4251  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4252  suballoc.hAllocation = VK_NULL_HANDLE;
    4253 
    4254  m_Suballocations.push_back(suballoc);
    4255  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4256  --suballocItem;
    4257  m_FreeSuballocationsBySize.push_back(suballocItem);
    4258 }
    4259 
    4260 bool VmaBlockMetadata::Validate() const
    4261 {
    4262  if(m_Suballocations.empty())
    4263  {
    4264  return false;
    4265  }
    4266 
    4267  // Expected offset of new suballocation as calculates from previous ones.
    4268  VkDeviceSize calculatedOffset = 0;
    4269  // Expected number of free suballocations as calculated from traversing their list.
    4270  uint32_t calculatedFreeCount = 0;
    4271  // Expected sum size of free suballocations as calculated from traversing their list.
    4272  VkDeviceSize calculatedSumFreeSize = 0;
    4273  // Expected number of free suballocations that should be registered in
    4274  // m_FreeSuballocationsBySize calculated from traversing their list.
    4275  size_t freeSuballocationsToRegister = 0;
    4276  // True if previous visisted suballocation was free.
    4277  bool prevFree = false;
    4278 
    4279  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4280  suballocItem != m_Suballocations.cend();
    4281  ++suballocItem)
    4282  {
    4283  const VmaSuballocation& subAlloc = *suballocItem;
    4284 
    4285  // Actual offset of this suballocation doesn't match expected one.
    4286  if(subAlloc.offset != calculatedOffset)
    4287  {
    4288  return false;
    4289  }
    4290 
    4291  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4292  // Two adjacent free suballocations are invalid. They should be merged.
    4293  if(prevFree && currFree)
    4294  {
    4295  return false;
    4296  }
    4297  prevFree = currFree;
    4298 
    4299  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4300  {
    4301  return false;
    4302  }
    4303 
    4304  if(currFree)
    4305  {
    4306  calculatedSumFreeSize += subAlloc.size;
    4307  ++calculatedFreeCount;
    4308  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4309  {
    4310  ++freeSuballocationsToRegister;
    4311  }
    4312  }
    4313 
    4314  calculatedOffset += subAlloc.size;
    4315  }
    4316 
    4317  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4318  // match expected one.
    4319  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4320  {
    4321  return false;
    4322  }
    4323 
    4324  VkDeviceSize lastSize = 0;
    4325  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4326  {
    4327  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4328 
    4329  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4330  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4331  {
    4332  return false;
    4333  }
    4334  // They must be sorted by size ascending.
    4335  if(suballocItem->size < lastSize)
    4336  {
    4337  return false;
    4338  }
    4339 
    4340  lastSize = suballocItem->size;
    4341  }
    4342 
    4343  // Check if totals match calculacted values.
    4344  return
    4345  ValidateFreeSuballocationList() &&
    4346  (calculatedOffset == m_Size) &&
    4347  (calculatedSumFreeSize == m_SumFreeSize) &&
    4348  (calculatedFreeCount == m_FreeCount);
    4349 }
    4350 
    4351 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4352 {
    4353  if(!m_FreeSuballocationsBySize.empty())
    4354  {
    4355  return m_FreeSuballocationsBySize.back()->size;
    4356  }
    4357  else
    4358  {
    4359  return 0;
    4360  }
    4361 }
    4362 
    4363 bool VmaBlockMetadata::IsEmpty() const
    4364 {
    4365  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4366 }
    4367 
    4368 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4369 {
    4370  outInfo.blockCount = 1;
    4371 
    4372  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4373  outInfo.allocationCount = rangeCount - m_FreeCount;
    4374  outInfo.unusedRangeCount = m_FreeCount;
    4375 
    4376  outInfo.unusedBytes = m_SumFreeSize;
    4377  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4378 
    4379  outInfo.allocationSizeMin = UINT64_MAX;
    4380  outInfo.allocationSizeMax = 0;
    4381  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4382  outInfo.unusedRangeSizeMax = 0;
    4383 
    4384  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4385  suballocItem != m_Suballocations.cend();
    4386  ++suballocItem)
    4387  {
    4388  const VmaSuballocation& suballoc = *suballocItem;
    4389  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4390  {
    4391  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4392  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4393  }
    4394  else
    4395  {
    4396  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4397  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4398  }
    4399  }
    4400 }
    4401 
    4402 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4403 {
    4404  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4405 
    4406  inoutStats.size += m_Size;
    4407  inoutStats.unusedSize += m_SumFreeSize;
    4408  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4409  inoutStats.unusedRangeCount += m_FreeCount;
    4410  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4411 }
    4412 
    4413 #if VMA_STATS_STRING_ENABLED
    4414 
    4415 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4416 {
    4417  json.BeginObject();
    4418 
    4419  json.WriteString("TotalBytes");
    4420  json.WriteNumber(m_Size);
    4421 
    4422  json.WriteString("UnusedBytes");
    4423  json.WriteNumber(m_SumFreeSize);
    4424 
    4425  json.WriteString("Allocations");
    4426  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4427 
    4428  json.WriteString("UnusedRanges");
    4429  json.WriteNumber(m_FreeCount);
    4430 
    4431  json.WriteString("Suballocations");
    4432  json.BeginArray();
    4433  size_t i = 0;
    4434  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4435  suballocItem != m_Suballocations.cend();
    4436  ++suballocItem, ++i)
    4437  {
    4438  json.BeginObject(true);
    4439 
    4440  json.WriteString("Type");
    4441  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4442 
    4443  json.WriteString("Size");
    4444  json.WriteNumber(suballocItem->size);
    4445 
    4446  json.WriteString("Offset");
    4447  json.WriteNumber(suballocItem->offset);
    4448 
    4449  json.EndObject();
    4450  }
    4451  json.EndArray();
    4452 
    4453  json.EndObject();
    4454 }
    4455 
    4456 #endif // #if VMA_STATS_STRING_ENABLED
    4457 
    4458 /*
    4459 How many suitable free suballocations to analyze before choosing best one.
    4460 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4461  be chosen.
    4462 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4463  suballocations will be analized and best one will be chosen.
    4464 - Any other value is also acceptable.
    4465 */
    4466 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4467 
    4468 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4469 {
    4470  VMA_ASSERT(IsEmpty());
    4471  pAllocationRequest->offset = 0;
    4472  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4473  pAllocationRequest->sumItemSize = 0;
    4474  pAllocationRequest->item = m_Suballocations.begin();
    4475  pAllocationRequest->itemsToMakeLostCount = 0;
    4476 }
    4477 
    4478 bool VmaBlockMetadata::CreateAllocationRequest(
    4479  uint32_t currentFrameIndex,
    4480  uint32_t frameInUseCount,
    4481  VkDeviceSize bufferImageGranularity,
    4482  VkDeviceSize allocSize,
    4483  VkDeviceSize allocAlignment,
    4484  VmaSuballocationType allocType,
    4485  bool canMakeOtherLost,
    4486  VmaAllocationRequest* pAllocationRequest)
    4487 {
    4488  VMA_ASSERT(allocSize > 0);
    4489  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4490  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4491  VMA_HEAVY_ASSERT(Validate());
    4492 
    4493  // There is not enough total free space in this block to fullfill the request: Early return.
    4494  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4495  {
    4496  return false;
    4497  }
    4498 
    4499  // New algorithm, efficiently searching freeSuballocationsBySize.
    4500  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4501  if(freeSuballocCount > 0)
    4502  {
    4503  if(VMA_BEST_FIT)
    4504  {
    4505  // Find first free suballocation with size not less than allocSize.
    4506  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4507  m_FreeSuballocationsBySize.data(),
    4508  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4509  allocSize,
    4510  VmaSuballocationItemSizeLess());
    4511  size_t index = it - m_FreeSuballocationsBySize.data();
    4512  for(; index < freeSuballocCount; ++index)
    4513  {
    4514  if(CheckAllocation(
    4515  currentFrameIndex,
    4516  frameInUseCount,
    4517  bufferImageGranularity,
    4518  allocSize,
    4519  allocAlignment,
    4520  allocType,
    4521  m_FreeSuballocationsBySize[index],
    4522  false, // canMakeOtherLost
    4523  &pAllocationRequest->offset,
    4524  &pAllocationRequest->itemsToMakeLostCount,
    4525  &pAllocationRequest->sumFreeSize,
    4526  &pAllocationRequest->sumItemSize))
    4527  {
    4528  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4529  return true;
    4530  }
    4531  }
    4532  }
    4533  else
    4534  {
    4535  // Search staring from biggest suballocations.
    4536  for(size_t index = freeSuballocCount; index--; )
    4537  {
    4538  if(CheckAllocation(
    4539  currentFrameIndex,
    4540  frameInUseCount,
    4541  bufferImageGranularity,
    4542  allocSize,
    4543  allocAlignment,
    4544  allocType,
    4545  m_FreeSuballocationsBySize[index],
    4546  false, // canMakeOtherLost
    4547  &pAllocationRequest->offset,
    4548  &pAllocationRequest->itemsToMakeLostCount,
    4549  &pAllocationRequest->sumFreeSize,
    4550  &pAllocationRequest->sumItemSize))
    4551  {
    4552  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4553  return true;
    4554  }
    4555  }
    4556  }
    4557  }
    4558 
    4559  if(canMakeOtherLost)
    4560  {
    4561  // Brute-force algorithm. TODO: Come up with something better.
    4562 
    4563  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4564  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4565 
    4566  VmaAllocationRequest tmpAllocRequest = {};
    4567  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4568  suballocIt != m_Suballocations.end();
    4569  ++suballocIt)
    4570  {
    4571  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4572  suballocIt->hAllocation->CanBecomeLost())
    4573  {
    4574  if(CheckAllocation(
    4575  currentFrameIndex,
    4576  frameInUseCount,
    4577  bufferImageGranularity,
    4578  allocSize,
    4579  allocAlignment,
    4580  allocType,
    4581  suballocIt,
    4582  canMakeOtherLost,
    4583  &tmpAllocRequest.offset,
    4584  &tmpAllocRequest.itemsToMakeLostCount,
    4585  &tmpAllocRequest.sumFreeSize,
    4586  &tmpAllocRequest.sumItemSize))
    4587  {
    4588  tmpAllocRequest.item = suballocIt;
    4589 
    4590  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4591  {
    4592  *pAllocationRequest = tmpAllocRequest;
    4593  }
    4594  }
    4595  }
    4596  }
    4597 
    4598  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4599  {
    4600  return true;
    4601  }
    4602  }
    4603 
    4604  return false;
    4605 }
    4606 
    4607 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4608  uint32_t currentFrameIndex,
    4609  uint32_t frameInUseCount,
    4610  VmaAllocationRequest* pAllocationRequest)
    4611 {
    4612  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4613  {
    4614  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4615  {
    4616  ++pAllocationRequest->item;
    4617  }
    4618  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4619  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4620  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4621  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4622  {
    4623  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4624  --pAllocationRequest->itemsToMakeLostCount;
    4625  }
    4626  else
    4627  {
    4628  return false;
    4629  }
    4630  }
    4631 
    4632  VMA_HEAVY_ASSERT(Validate());
    4633  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4634  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4635 
    4636  return true;
    4637 }
    4638 
    4639 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4640 {
    4641  uint32_t lostAllocationCount = 0;
    4642  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4643  it != m_Suballocations.end();
    4644  ++it)
    4645  {
    4646  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4647  it->hAllocation->CanBecomeLost() &&
    4648  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4649  {
    4650  it = FreeSuballocation(it);
    4651  ++lostAllocationCount;
    4652  }
    4653  }
    4654  return lostAllocationCount;
    4655 }
    4656 
    4657 void VmaBlockMetadata::Alloc(
    4658  const VmaAllocationRequest& request,
    4659  VmaSuballocationType type,
    4660  VkDeviceSize allocSize,
    4661  VmaAllocation hAllocation)
    4662 {
    4663  VMA_ASSERT(request.item != m_Suballocations.end());
    4664  VmaSuballocation& suballoc = *request.item;
    4665  // Given suballocation is a free block.
    4666  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4667  // Given offset is inside this suballocation.
    4668  VMA_ASSERT(request.offset >= suballoc.offset);
    4669  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4670  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4671  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4672 
    4673  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4674  // it to become used.
    4675  UnregisterFreeSuballocation(request.item);
    4676 
    4677  suballoc.offset = request.offset;
    4678  suballoc.size = allocSize;
    4679  suballoc.type = type;
    4680  suballoc.hAllocation = hAllocation;
    4681 
    4682  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4683  if(paddingEnd)
    4684  {
    4685  VmaSuballocation paddingSuballoc = {};
    4686  paddingSuballoc.offset = request.offset + allocSize;
    4687  paddingSuballoc.size = paddingEnd;
    4688  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4689  VmaSuballocationList::iterator next = request.item;
    4690  ++next;
    4691  const VmaSuballocationList::iterator paddingEndItem =
    4692  m_Suballocations.insert(next, paddingSuballoc);
    4693  RegisterFreeSuballocation(paddingEndItem);
    4694  }
    4695 
    4696  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4697  if(paddingBegin)
    4698  {
    4699  VmaSuballocation paddingSuballoc = {};
    4700  paddingSuballoc.offset = request.offset - paddingBegin;
    4701  paddingSuballoc.size = paddingBegin;
    4702  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4703  const VmaSuballocationList::iterator paddingBeginItem =
    4704  m_Suballocations.insert(request.item, paddingSuballoc);
    4705  RegisterFreeSuballocation(paddingBeginItem);
    4706  }
    4707 
    4708  // Update totals.
    4709  m_FreeCount = m_FreeCount - 1;
    4710  if(paddingBegin > 0)
    4711  {
    4712  ++m_FreeCount;
    4713  }
    4714  if(paddingEnd > 0)
    4715  {
    4716  ++m_FreeCount;
    4717  }
    4718  m_SumFreeSize -= allocSize;
    4719 }
    4720 
    4721 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4722 {
    4723  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4724  suballocItem != m_Suballocations.end();
    4725  ++suballocItem)
    4726  {
    4727  VmaSuballocation& suballoc = *suballocItem;
    4728  if(suballoc.hAllocation == allocation)
    4729  {
    4730  FreeSuballocation(suballocItem);
    4731  VMA_HEAVY_ASSERT(Validate());
    4732  return;
    4733  }
    4734  }
    4735  VMA_ASSERT(0 && "Not found!");
    4736 }
    4737 
    4738 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4739 {
    4740  VkDeviceSize lastSize = 0;
    4741  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4742  {
    4743  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4744 
    4745  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4746  {
    4747  VMA_ASSERT(0);
    4748  return false;
    4749  }
    4750  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4751  {
    4752  VMA_ASSERT(0);
    4753  return false;
    4754  }
    4755  if(it->size < lastSize)
    4756  {
    4757  VMA_ASSERT(0);
    4758  return false;
    4759  }
    4760 
    4761  lastSize = it->size;
    4762  }
    4763  return true;
    4764 }
    4765 
    4766 bool VmaBlockMetadata::CheckAllocation(
    4767  uint32_t currentFrameIndex,
    4768  uint32_t frameInUseCount,
    4769  VkDeviceSize bufferImageGranularity,
    4770  VkDeviceSize allocSize,
    4771  VkDeviceSize allocAlignment,
    4772  VmaSuballocationType allocType,
    4773  VmaSuballocationList::const_iterator suballocItem,
    4774  bool canMakeOtherLost,
    4775  VkDeviceSize* pOffset,
    4776  size_t* itemsToMakeLostCount,
    4777  VkDeviceSize* pSumFreeSize,
    4778  VkDeviceSize* pSumItemSize) const
    4779 {
    4780  VMA_ASSERT(allocSize > 0);
    4781  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4782  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4783  VMA_ASSERT(pOffset != VMA_NULL);
    4784 
    4785  *itemsToMakeLostCount = 0;
    4786  *pSumFreeSize = 0;
    4787  *pSumItemSize = 0;
    4788 
    4789  if(canMakeOtherLost)
    4790  {
    4791  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4792  {
    4793  *pSumFreeSize = suballocItem->size;
    4794  }
    4795  else
    4796  {
    4797  if(suballocItem->hAllocation->CanBecomeLost() &&
    4798  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4799  {
    4800  ++*itemsToMakeLostCount;
    4801  *pSumItemSize = suballocItem->size;
    4802  }
    4803  else
    4804  {
    4805  return false;
    4806  }
    4807  }
    4808 
    4809  // Remaining size is too small for this request: Early return.
    4810  if(m_Size - suballocItem->offset < allocSize)
    4811  {
    4812  return false;
    4813  }
    4814 
    4815  // Start from offset equal to beginning of this suballocation.
    4816  *pOffset = suballocItem->offset;
    4817 
    4818  // Apply VMA_DEBUG_MARGIN at the beginning.
    4819  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4820  {
    4821  *pOffset += VMA_DEBUG_MARGIN;
    4822  }
    4823 
    4824  // Apply alignment.
    4825  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4826  *pOffset = VmaAlignUp(*pOffset, alignment);
    4827 
    4828  // Check previous suballocations for BufferImageGranularity conflicts.
    4829  // Make bigger alignment if necessary.
    4830  if(bufferImageGranularity > 1)
    4831  {
    4832  bool bufferImageGranularityConflict = false;
    4833  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4834  while(prevSuballocItem != m_Suballocations.cbegin())
    4835  {
    4836  --prevSuballocItem;
    4837  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4838  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4839  {
    4840  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4841  {
    4842  bufferImageGranularityConflict = true;
    4843  break;
    4844  }
    4845  }
    4846  else
    4847  // Already on previous page.
    4848  break;
    4849  }
    4850  if(bufferImageGranularityConflict)
    4851  {
    4852  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4853  }
    4854  }
    4855 
    4856  // Now that we have final *pOffset, check if we are past suballocItem.
    4857  // If yes, return false - this function should be called for another suballocItem as starting point.
    4858  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4859  {
    4860  return false;
    4861  }
    4862 
    4863  // Calculate padding at the beginning based on current offset.
    4864  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4865 
    4866  // Calculate required margin at the end if this is not last suballocation.
    4867  VmaSuballocationList::const_iterator next = suballocItem;
    4868  ++next;
    4869  const VkDeviceSize requiredEndMargin =
    4870  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4871 
    4872  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4873  // Another early return check.
    4874  if(suballocItem->offset + totalSize > m_Size)
    4875  {
    4876  return false;
    4877  }
    4878 
    4879  // Advance lastSuballocItem until desired size is reached.
    4880  // Update itemsToMakeLostCount.
    4881  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4882  if(totalSize > suballocItem->size)
    4883  {
    4884  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4885  while(remainingSize > 0)
    4886  {
    4887  ++lastSuballocItem;
    4888  if(lastSuballocItem == m_Suballocations.cend())
    4889  {
    4890  return false;
    4891  }
    4892  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4893  {
    4894  *pSumFreeSize += lastSuballocItem->size;
    4895  }
    4896  else
    4897  {
    4898  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4899  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4900  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4901  {
    4902  ++*itemsToMakeLostCount;
    4903  *pSumItemSize += lastSuballocItem->size;
    4904  }
    4905  else
    4906  {
    4907  return false;
    4908  }
    4909  }
    4910  remainingSize = (lastSuballocItem->size < remainingSize) ?
    4911  remainingSize - lastSuballocItem->size : 0;
    4912  }
    4913  }
    4914 
    4915  // Check next suballocations for BufferImageGranularity conflicts.
    4916  // If conflict exists, we must mark more allocations lost or fail.
    4917  if(bufferImageGranularity > 1)
    4918  {
    4919  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    4920  ++nextSuballocItem;
    4921  while(nextSuballocItem != m_Suballocations.cend())
    4922  {
    4923  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    4924  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    4925  {
    4926  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    4927  {
    4928  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    4929  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    4930  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4931  {
    4932  ++*itemsToMakeLostCount;
    4933  }
    4934  else
    4935  {
    4936  return false;
    4937  }
    4938  }
    4939  }
    4940  else
    4941  {
    4942  // Already on next page.
    4943  break;
    4944  }
    4945  ++nextSuballocItem;
    4946  }
    4947  }
    4948  }
    4949  else
    4950  {
    4951  const VmaSuballocation& suballoc = *suballocItem;
    4952  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4953 
    4954  *pSumFreeSize = suballoc.size;
    4955 
    4956  // Size of this suballocation is too small for this request: Early return.
    4957  if(suballoc.size < allocSize)
    4958  {
    4959  return false;
    4960  }
    4961 
    4962  // Start from offset equal to beginning of this suballocation.
    4963  *pOffset = suballoc.offset;
    4964 
    4965  // Apply VMA_DEBUG_MARGIN at the beginning.
    4966  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4967  {
    4968  *pOffset += VMA_DEBUG_MARGIN;
    4969  }
    4970 
    4971  // Apply alignment.
    4972  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4973  *pOffset = VmaAlignUp(*pOffset, alignment);
    4974 
    4975  // Check previous suballocations for BufferImageGranularity conflicts.
    4976  // Make bigger alignment if necessary.
    4977  if(bufferImageGranularity > 1)
    4978  {
    4979  bool bufferImageGranularityConflict = false;
    4980  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4981  while(prevSuballocItem != m_Suballocations.cbegin())
    4982  {
    4983  --prevSuballocItem;
    4984  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4985  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4986  {
    4987  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4988  {
    4989  bufferImageGranularityConflict = true;
    4990  break;
    4991  }
    4992  }
    4993  else
    4994  // Already on previous page.
    4995  break;
    4996  }
    4997  if(bufferImageGranularityConflict)
    4998  {
    4999  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5000  }
    5001  }
    5002 
    5003  // Calculate padding at the beginning based on current offset.
    5004  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5005 
    5006  // Calculate required margin at the end if this is not last suballocation.
    5007  VmaSuballocationList::const_iterator next = suballocItem;
    5008  ++next;
    5009  const VkDeviceSize requiredEndMargin =
    5010  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5011 
    5012  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5013  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5014  {
    5015  return false;
    5016  }
    5017 
    5018  // Check next suballocations for BufferImageGranularity conflicts.
    5019  // If conflict exists, allocation cannot be made here.
    5020  if(bufferImageGranularity > 1)
    5021  {
    5022  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5023  ++nextSuballocItem;
    5024  while(nextSuballocItem != m_Suballocations.cend())
    5025  {
    5026  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5027  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5028  {
    5029  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5030  {
    5031  return false;
    5032  }
    5033  }
    5034  else
    5035  {
    5036  // Already on next page.
    5037  break;
    5038  }
    5039  ++nextSuballocItem;
    5040  }
    5041  }
    5042  }
    5043 
    5044  // All tests passed: Success. pOffset is already filled.
    5045  return true;
    5046 }
    5047 
    5048 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5049 {
    5050  VMA_ASSERT(item != m_Suballocations.end());
    5051  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5052 
    5053  VmaSuballocationList::iterator nextItem = item;
    5054  ++nextItem;
    5055  VMA_ASSERT(nextItem != m_Suballocations.end());
    5056  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5057 
    5058  item->size += nextItem->size;
    5059  --m_FreeCount;
    5060  m_Suballocations.erase(nextItem);
    5061 }
    5062 
    5063 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5064 {
    5065  // Change this suballocation to be marked as free.
    5066  VmaSuballocation& suballoc = *suballocItem;
    5067  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5068  suballoc.hAllocation = VK_NULL_HANDLE;
    5069 
    5070  // Update totals.
    5071  ++m_FreeCount;
    5072  m_SumFreeSize += suballoc.size;
    5073 
    5074  // Merge with previous and/or next suballocation if it's also free.
    5075  bool mergeWithNext = false;
    5076  bool mergeWithPrev = false;
    5077 
    5078  VmaSuballocationList::iterator nextItem = suballocItem;
    5079  ++nextItem;
    5080  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5081  {
    5082  mergeWithNext = true;
    5083  }
    5084 
    5085  VmaSuballocationList::iterator prevItem = suballocItem;
    5086  if(suballocItem != m_Suballocations.begin())
    5087  {
    5088  --prevItem;
    5089  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5090  {
    5091  mergeWithPrev = true;
    5092  }
    5093  }
    5094 
    5095  if(mergeWithNext)
    5096  {
    5097  UnregisterFreeSuballocation(nextItem);
    5098  MergeFreeWithNext(suballocItem);
    5099  }
    5100 
    5101  if(mergeWithPrev)
    5102  {
    5103  UnregisterFreeSuballocation(prevItem);
    5104  MergeFreeWithNext(prevItem);
    5105  RegisterFreeSuballocation(prevItem);
    5106  return prevItem;
    5107  }
    5108  else
    5109  {
    5110  RegisterFreeSuballocation(suballocItem);
    5111  return suballocItem;
    5112  }
    5113 }
    5114 
    5115 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5116 {
    5117  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5118  VMA_ASSERT(item->size > 0);
    5119 
    5120  // You may want to enable this validation at the beginning or at the end of
    5121  // this function, depending on what do you want to check.
    5122  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5123 
    5124  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5125  {
    5126  if(m_FreeSuballocationsBySize.empty())
    5127  {
    5128  m_FreeSuballocationsBySize.push_back(item);
    5129  }
    5130  else
    5131  {
    5132  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5133  }
    5134  }
    5135 
    5136  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5137 }
    5138 
    5139 
    5140 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5141 {
    5142  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5143  VMA_ASSERT(item->size > 0);
    5144 
    5145  // You may want to enable this validation at the beginning or at the end of
    5146  // this function, depending on what do you want to check.
    5147  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5148 
    5149  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5150  {
    5151  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5152  m_FreeSuballocationsBySize.data(),
    5153  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5154  item,
    5155  VmaSuballocationItemSizeLess());
    5156  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5157  index < m_FreeSuballocationsBySize.size();
    5158  ++index)
    5159  {
    5160  if(m_FreeSuballocationsBySize[index] == item)
    5161  {
    5162  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5163  return;
    5164  }
    5165  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5166  }
    5167  VMA_ASSERT(0 && "Not found.");
    5168  }
    5169 
    5170  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5171 }
    5172 
    5174 // class VmaDeviceMemoryBlock
    5175 
    5176 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5177  m_MemoryTypeIndex(UINT32_MAX),
    5178  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5179  m_hMemory(VK_NULL_HANDLE),
    5180  m_PersistentMap(false),
    5181  m_pMappedData(VMA_NULL),
    5182  m_Metadata(hAllocator)
    5183 {
    5184 }
    5185 
    5186 void VmaDeviceMemoryBlock::Init(
    5187  uint32_t newMemoryTypeIndex,
    5188  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5189  VkDeviceMemory newMemory,
    5190  VkDeviceSize newSize,
    5191  bool persistentMap,
    5192  void* pMappedData)
    5193 {
    5194  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5195 
    5196  m_MemoryTypeIndex = newMemoryTypeIndex;
    5197  m_BlockVectorType = newBlockVectorType;
    5198  m_hMemory = newMemory;
    5199  m_PersistentMap = persistentMap;
    5200  m_pMappedData = pMappedData;
    5201 
    5202  m_Metadata.Init(newSize);
    5203 }
    5204 
    5205 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5206 {
    5207  // This is the most important assert in the entire library.
    5208  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5209  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5210 
    5211  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5212  if(m_pMappedData != VMA_NULL)
    5213  {
    5214  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5215  m_pMappedData = VMA_NULL;
    5216  }
    5217 
    5218  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5219  m_hMemory = VK_NULL_HANDLE;
    5220 }
    5221 
    5222 bool VmaDeviceMemoryBlock::Validate() const
    5223 {
    5224  if((m_hMemory == VK_NULL_HANDLE) ||
    5225  (m_Metadata.GetSize() == 0))
    5226  {
    5227  return false;
    5228  }
    5229 
    5230  return m_Metadata.Validate();
    5231 }
    5232 
    5233 static void InitStatInfo(VmaStatInfo& outInfo)
    5234 {
    5235  memset(&outInfo, 0, sizeof(outInfo));
    5236  outInfo.allocationSizeMin = UINT64_MAX;
    5237  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5238 }
    5239 
    5240 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5241 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5242 {
    5243  inoutInfo.blockCount += srcInfo.blockCount;
    5244  inoutInfo.allocationCount += srcInfo.allocationCount;
    5245  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5246  inoutInfo.usedBytes += srcInfo.usedBytes;
    5247  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5248  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5249  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5250  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5251  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5252 }
    5253 
    5254 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5255 {
    5256  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5257  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5258  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5259  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5260 }
    5261 
    5262 VmaPool_T::VmaPool_T(
    5263  VmaAllocator hAllocator,
    5264  const VmaPoolCreateInfo& createInfo) :
    5265  m_BlockVector(
    5266  hAllocator,
    5267  createInfo.memoryTypeIndex,
    5268  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5269  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5270  createInfo.blockSize,
    5271  createInfo.minBlockCount,
    5272  createInfo.maxBlockCount,
    5273  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5274  createInfo.frameInUseCount,
    5275  true) // isCustomPool
    5276 {
    5277 }
    5278 
    5279 VmaPool_T::~VmaPool_T()
    5280 {
    5281 }
    5282 
    5283 #if VMA_STATS_STRING_ENABLED
    5284 
    5285 #endif // #if VMA_STATS_STRING_ENABLED
    5286 
    5287 VmaBlockVector::VmaBlockVector(
    5288  VmaAllocator hAllocator,
    5289  uint32_t memoryTypeIndex,
    5290  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5291  VkDeviceSize preferredBlockSize,
    5292  size_t minBlockCount,
    5293  size_t maxBlockCount,
    5294  VkDeviceSize bufferImageGranularity,
    5295  uint32_t frameInUseCount,
    5296  bool isCustomPool) :
    5297  m_hAllocator(hAllocator),
    5298  m_MemoryTypeIndex(memoryTypeIndex),
    5299  m_BlockVectorType(blockVectorType),
    5300  m_PreferredBlockSize(preferredBlockSize),
    5301  m_MinBlockCount(minBlockCount),
    5302  m_MaxBlockCount(maxBlockCount),
    5303  m_BufferImageGranularity(bufferImageGranularity),
    5304  m_FrameInUseCount(frameInUseCount),
    5305  m_IsCustomPool(isCustomPool),
    5306  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5307  m_HasEmptyBlock(false),
    5308  m_pDefragmentator(VMA_NULL)
    5309 {
    5310 }
    5311 
    5312 VmaBlockVector::~VmaBlockVector()
    5313 {
    5314  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5315 
    5316  for(size_t i = m_Blocks.size(); i--; )
    5317  {
    5318  m_Blocks[i]->Destroy(m_hAllocator);
    5319  vma_delete(m_hAllocator, m_Blocks[i]);
    5320  }
    5321 }
    5322 
    5323 VkResult VmaBlockVector::CreateMinBlocks()
    5324 {
    5325  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5326  {
    5327  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5328  if(res != VK_SUCCESS)
    5329  {
    5330  return res;
    5331  }
    5332  }
    5333  return VK_SUCCESS;
    5334 }
    5335 
    5336 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5337 {
    5338  pStats->size = 0;
    5339  pStats->unusedSize = 0;
    5340  pStats->allocationCount = 0;
    5341  pStats->unusedRangeCount = 0;
    5342  pStats->unusedRangeSizeMax = 0;
    5343 
    5344  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5345 
    5346  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5347  {
    5348  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5349  VMA_ASSERT(pBlock);
    5350  VMA_HEAVY_ASSERT(pBlock->Validate());
    5351  pBlock->m_Metadata.AddPoolStats(*pStats);
    5352  }
    5353 }
    5354 
    5355 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5356 
    5357 VkResult VmaBlockVector::Allocate(
    5358  VmaPool hCurrentPool,
    5359  uint32_t currentFrameIndex,
    5360  const VkMemoryRequirements& vkMemReq,
    5361  const VmaAllocationCreateInfo& createInfo,
    5362  VmaSuballocationType suballocType,
    5363  VmaAllocation* pAllocation)
    5364 {
    5365  // Validate flags.
    5366  if(createInfo.pool != VK_NULL_HANDLE &&
    5367  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5368  {
    5369  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5370  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5371  }
    5372 
    5373  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5374 
    5375  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5376  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5377  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5378  {
    5379  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5380  VMA_ASSERT(pCurrBlock);
    5381  VmaAllocationRequest currRequest = {};
    5382  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5383  currentFrameIndex,
    5384  m_FrameInUseCount,
    5385  m_BufferImageGranularity,
    5386  vkMemReq.size,
    5387  vkMemReq.alignment,
    5388  suballocType,
    5389  false, // canMakeOtherLost
    5390  &currRequest))
    5391  {
    5392  // Allocate from pCurrBlock.
    5393  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5394 
    5395  // We no longer have an empty Allocation.
    5396  if(pCurrBlock->m_Metadata.IsEmpty())
    5397  {
    5398  m_HasEmptyBlock = false;
    5399  }
    5400 
    5401  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5402  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5403  (*pAllocation)->InitBlockAllocation(
    5404  hCurrentPool,
    5405  pCurrBlock,
    5406  currRequest.offset,
    5407  vkMemReq.alignment,
    5408  vkMemReq.size,
    5409  suballocType,
    5410  createInfo.pUserData,
    5411  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5412  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5413  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5414  return VK_SUCCESS;
    5415  }
    5416  }
    5417 
    5418  const bool canCreateNewBlock =
    5419  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5420  (m_Blocks.size() < m_MaxBlockCount);
    5421 
    5422  // 2. Try to create new block.
    5423  if(canCreateNewBlock)
    5424  {
    5425  // 2.1. Start with full preferredBlockSize.
    5426  VkDeviceSize blockSize = m_PreferredBlockSize;
    5427  size_t newBlockIndex = 0;
    5428  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5429  // Allocating blocks of other sizes is allowed only in default pools.
    5430  // In custom pools block size is fixed.
    5431  if(res < 0 && m_IsCustomPool == false)
    5432  {
    5433  // 2.2. Try half the size.
    5434  blockSize /= 2;
    5435  if(blockSize >= vkMemReq.size)
    5436  {
    5437  res = CreateBlock(blockSize, &newBlockIndex);
    5438  if(res < 0)
    5439  {
    5440  // 2.3. Try quarter the size.
    5441  blockSize /= 2;
    5442  if(blockSize >= vkMemReq.size)
    5443  {
    5444  res = CreateBlock(blockSize, &newBlockIndex);
    5445  }
    5446  }
    5447  }
    5448  }
    5449  if(res == VK_SUCCESS)
    5450  {
    5451  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5452  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5453 
    5454  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5455  VmaAllocationRequest allocRequest;
    5456  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5457  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5458  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5459  (*pAllocation)->InitBlockAllocation(
    5460  hCurrentPool,
    5461  pBlock,
    5462  allocRequest.offset,
    5463  vkMemReq.alignment,
    5464  vkMemReq.size,
    5465  suballocType,
    5466  createInfo.pUserData,
    5467  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5468  VMA_HEAVY_ASSERT(pBlock->Validate());
    5469  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5470 
    5471  return VK_SUCCESS;
    5472  }
    5473  }
    5474 
    5475  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5476 
    5477  // 3. Try to allocate from existing blocks with making other allocations lost.
    5478  if(canMakeOtherLost)
    5479  {
    5480  uint32_t tryIndex = 0;
    5481  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5482  {
    5483  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5484  VmaAllocationRequest bestRequest = {};
    5485  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5486 
    5487  // 1. Search existing allocations.
    5488  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5489  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5490  {
    5491  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5492  VMA_ASSERT(pCurrBlock);
    5493  VmaAllocationRequest currRequest = {};
    5494  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5495  currentFrameIndex,
    5496  m_FrameInUseCount,
    5497  m_BufferImageGranularity,
    5498  vkMemReq.size,
    5499  vkMemReq.alignment,
    5500  suballocType,
    5501  canMakeOtherLost,
    5502  &currRequest))
    5503  {
    5504  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5505  if(pBestRequestBlock == VMA_NULL ||
    5506  currRequestCost < bestRequestCost)
    5507  {
    5508  pBestRequestBlock = pCurrBlock;
    5509  bestRequest = currRequest;
    5510  bestRequestCost = currRequestCost;
    5511 
    5512  if(bestRequestCost == 0)
    5513  {
    5514  break;
    5515  }
    5516  }
    5517  }
    5518  }
    5519 
    5520  if(pBestRequestBlock != VMA_NULL)
    5521  {
    5522  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5523  currentFrameIndex,
    5524  m_FrameInUseCount,
    5525  &bestRequest))
    5526  {
    5527  // We no longer have an empty Allocation.
    5528  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5529  {
    5530  m_HasEmptyBlock = false;
    5531  }
    5532  // Allocate from this pBlock.
    5533  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5534  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5535  (*pAllocation)->InitBlockAllocation(
    5536  hCurrentPool,
    5537  pBestRequestBlock,
    5538  bestRequest.offset,
    5539  vkMemReq.alignment,
    5540  vkMemReq.size,
    5541  suballocType,
    5542  createInfo.pUserData,
    5543  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5544  VMA_HEAVY_ASSERT(pBlock->Validate());
    5545  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5546  return VK_SUCCESS;
    5547  }
    5548  // else: Some allocations must have been touched while we are here. Next try.
    5549  }
    5550  else
    5551  {
    5552  // Could not find place in any of the blocks - break outer loop.
    5553  break;
    5554  }
    5555  }
    5556  /* Maximum number of tries exceeded - a very unlike event when many other
    5557  threads are simultaneously touching allocations making it impossible to make
    5558  lost at the same time as we try to allocate. */
    5559  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5560  {
    5561  return VK_ERROR_TOO_MANY_OBJECTS;
    5562  }
    5563  }
    5564 
    5565  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5566 }
    5567 
    5568 void VmaBlockVector::Free(
    5569  VmaAllocation hAllocation)
    5570 {
    5571  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5572 
    5573  // Scope for lock.
    5574  {
    5575  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5576 
    5577  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5578 
    5579  pBlock->m_Metadata.Free(hAllocation);
    5580  VMA_HEAVY_ASSERT(pBlock->Validate());
    5581 
    5582  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5583 
    5584  // pBlock became empty after this deallocation.
    5585  if(pBlock->m_Metadata.IsEmpty())
    5586  {
    5587  // Already has empty Allocation. We don't want to have two, so delete this one.
    5588  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5589  {
    5590  pBlockToDelete = pBlock;
    5591  Remove(pBlock);
    5592  }
    5593  // We now have first empty Allocation.
    5594  else
    5595  {
    5596  m_HasEmptyBlock = true;
    5597  }
    5598  }
    5599  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5600  // (This is optional, heuristics.)
    5601  else if(m_HasEmptyBlock)
    5602  {
    5603  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5604  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5605  {
    5606  pBlockToDelete = pLastBlock;
    5607  m_Blocks.pop_back();
    5608  m_HasEmptyBlock = false;
    5609  }
    5610  }
    5611 
    5612  IncrementallySortBlocks();
    5613  }
    5614 
    5615  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5616  // lock, for performance reason.
    5617  if(pBlockToDelete != VMA_NULL)
    5618  {
    5619  VMA_DEBUG_LOG(" Deleted empty allocation");
    5620  pBlockToDelete->Destroy(m_hAllocator);
    5621  vma_delete(m_hAllocator, pBlockToDelete);
    5622  }
    5623 }
    5624 
    5625 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5626 {
    5627  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5628  {
    5629  if(m_Blocks[blockIndex] == pBlock)
    5630  {
    5631  VmaVectorRemove(m_Blocks, blockIndex);
    5632  return;
    5633  }
    5634  }
    5635  VMA_ASSERT(0);
    5636 }
    5637 
    5638 void VmaBlockVector::IncrementallySortBlocks()
    5639 {
    5640  // Bubble sort only until first swap.
    5641  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5642  {
    5643  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5644  {
    5645  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5646  return;
    5647  }
    5648  }
    5649 }
    5650 
    5651 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5652 {
    5653  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5654  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5655  allocInfo.allocationSize = blockSize;
    5656  VkDeviceMemory mem = VK_NULL_HANDLE;
    5657  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5658  if(res < 0)
    5659  {
    5660  return res;
    5661  }
    5662 
    5663  // New VkDeviceMemory successfully created.
    5664 
    5665  // Map memory if needed.
    5666  void* pMappedData = VMA_NULL;
    5667  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5668  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5669  {
    5670  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5671  m_hAllocator->m_hDevice,
    5672  mem,
    5673  0,
    5674  VK_WHOLE_SIZE,
    5675  0,
    5676  &pMappedData);
    5677  if(res < 0)
    5678  {
    5679  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5680  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5681  return res;
    5682  }
    5683  }
    5684 
    5685  // Create new Allocation for it.
    5686  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5687  pBlock->Init(
    5688  m_MemoryTypeIndex,
    5689  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5690  mem,
    5691  allocInfo.allocationSize,
    5692  persistentMap,
    5693  pMappedData);
    5694 
    5695  m_Blocks.push_back(pBlock);
    5696  if(pNewBlockIndex != VMA_NULL)
    5697  {
    5698  *pNewBlockIndex = m_Blocks.size() - 1;
    5699  }
    5700 
    5701  return VK_SUCCESS;
    5702 }
    5703 
    5704 #if VMA_STATS_STRING_ENABLED
    5705 
    5706 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5707 {
    5708  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5709 
    5710  json.BeginObject();
    5711 
    5712  if(m_IsCustomPool)
    5713  {
    5714  json.WriteString("MemoryTypeIndex");
    5715  json.WriteNumber(m_MemoryTypeIndex);
    5716 
    5717  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5718  {
    5719  json.WriteString("Mapped");
    5720  json.WriteBool(true);
    5721  }
    5722 
    5723  json.WriteString("BlockSize");
    5724  json.WriteNumber(m_PreferredBlockSize);
    5725 
    5726  json.WriteString("BlockCount");
    5727  json.BeginObject(true);
    5728  if(m_MinBlockCount > 0)
    5729  {
    5730  json.WriteString("Min");
    5731  json.WriteNumber(m_MinBlockCount);
    5732  }
    5733  if(m_MaxBlockCount < SIZE_MAX)
    5734  {
    5735  json.WriteString("Max");
    5736  json.WriteNumber(m_MaxBlockCount);
    5737  }
    5738  json.WriteString("Cur");
    5739  json.WriteNumber(m_Blocks.size());
    5740  json.EndObject();
    5741 
    5742  if(m_FrameInUseCount > 0)
    5743  {
    5744  json.WriteString("FrameInUseCount");
    5745  json.WriteNumber(m_FrameInUseCount);
    5746  }
    5747  }
    5748  else
    5749  {
    5750  json.WriteString("PreferredBlockSize");
    5751  json.WriteNumber(m_PreferredBlockSize);
    5752  }
    5753 
    5754  json.WriteString("Blocks");
    5755  json.BeginArray();
    5756  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5757  {
    5758  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5759  }
    5760  json.EndArray();
    5761 
    5762  json.EndObject();
    5763 }
    5764 
    5765 #endif // #if VMA_STATS_STRING_ENABLED
    5766 
    5767 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5768 {
    5769  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5770 
    5771  for(size_t i = m_Blocks.size(); i--; )
    5772  {
    5773  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5774  if(pBlock->m_pMappedData != VMA_NULL)
    5775  {
    5776  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5777  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5778  pBlock->m_pMappedData = VMA_NULL;
    5779  }
    5780  }
    5781 }
    5782 
    5783 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5784 {
    5785  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5786 
    5787  VkResult finalResult = VK_SUCCESS;
    5788  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5789  {
    5790  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5791  if(pBlock->m_PersistentMap)
    5792  {
    5793  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5794  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5795  m_hAllocator->m_hDevice,
    5796  pBlock->m_hMemory,
    5797  0,
    5798  VK_WHOLE_SIZE,
    5799  0,
    5800  &pBlock->m_pMappedData);
    5801  if(localResult != VK_SUCCESS)
    5802  {
    5803  finalResult = localResult;
    5804  }
    5805  }
    5806  }
    5807  return finalResult;
    5808 }
    5809 
    5810 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5811  VmaAllocator hAllocator,
    5812  uint32_t currentFrameIndex)
    5813 {
    5814  if(m_pDefragmentator == VMA_NULL)
    5815  {
    5816  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5817  hAllocator,
    5818  this,
    5819  currentFrameIndex);
    5820  }
    5821 
    5822  return m_pDefragmentator;
    5823 }
    5824 
    5825 VkResult VmaBlockVector::Defragment(
    5826  VmaDefragmentationStats* pDefragmentationStats,
    5827  VkDeviceSize& maxBytesToMove,
    5828  uint32_t& maxAllocationsToMove)
    5829 {
    5830  if(m_pDefragmentator == VMA_NULL)
    5831  {
    5832  return VK_SUCCESS;
    5833  }
    5834 
    5835  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5836 
    5837  // Defragment.
    5838  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5839 
    5840  // Accumulate statistics.
    5841  if(pDefragmentationStats != VMA_NULL)
    5842  {
    5843  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5844  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5845  pDefragmentationStats->bytesMoved += bytesMoved;
    5846  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5847  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5848  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5849  maxBytesToMove -= bytesMoved;
    5850  maxAllocationsToMove -= allocationsMoved;
    5851  }
    5852 
    5853  // Free empty blocks.
    5854  m_HasEmptyBlock = false;
    5855  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5856  {
    5857  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5858  if(pBlock->m_Metadata.IsEmpty())
    5859  {
    5860  if(m_Blocks.size() > m_MinBlockCount)
    5861  {
    5862  if(pDefragmentationStats != VMA_NULL)
    5863  {
    5864  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5865  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5866  }
    5867 
    5868  VmaVectorRemove(m_Blocks, blockIndex);
    5869  pBlock->Destroy(m_hAllocator);
    5870  vma_delete(m_hAllocator, pBlock);
    5871  }
    5872  else
    5873  {
    5874  m_HasEmptyBlock = true;
    5875  }
    5876  }
    5877  }
    5878 
    5879  return result;
    5880 }
    5881 
    5882 void VmaBlockVector::DestroyDefragmentator()
    5883 {
    5884  if(m_pDefragmentator != VMA_NULL)
    5885  {
    5886  vma_delete(m_hAllocator, m_pDefragmentator);
    5887  m_pDefragmentator = VMA_NULL;
    5888  }
    5889 }
    5890 
    5891 void VmaBlockVector::MakePoolAllocationsLost(
    5892  uint32_t currentFrameIndex,
    5893  size_t* pLostAllocationCount)
    5894 {
    5895  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5896 
    5897  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5898  {
    5899  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5900  VMA_ASSERT(pBlock);
    5901  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5902  }
    5903 }
    5904 
    5905 void VmaBlockVector::AddStats(VmaStats* pStats)
    5906 {
    5907  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    5908  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    5909 
    5910  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5911 
    5912  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5913  {
    5914  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5915  VMA_ASSERT(pBlock);
    5916  VMA_HEAVY_ASSERT(pBlock->Validate());
    5917  VmaStatInfo allocationStatInfo;
    5918  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    5919  VmaAddStatInfo(pStats->total, allocationStatInfo);
    5920  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    5921  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    5922  }
    5923 }
    5924 
    5926 // VmaDefragmentator members definition
    5927 
    5928 VmaDefragmentator::VmaDefragmentator(
    5929  VmaAllocator hAllocator,
    5930  VmaBlockVector* pBlockVector,
    5931  uint32_t currentFrameIndex) :
    5932  m_hAllocator(hAllocator),
    5933  m_pBlockVector(pBlockVector),
    5934  m_CurrentFrameIndex(currentFrameIndex),
    5935  m_BytesMoved(0),
    5936  m_AllocationsMoved(0),
    5937  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    5938  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    5939 {
    5940 }
    5941 
    5942 VmaDefragmentator::~VmaDefragmentator()
    5943 {
    5944  for(size_t i = m_Blocks.size(); i--; )
    5945  {
    5946  vma_delete(m_hAllocator, m_Blocks[i]);
    5947  }
    5948 }
    5949 
    5950 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    5951 {
    5952  AllocationInfo allocInfo;
    5953  allocInfo.m_hAllocation = hAlloc;
    5954  allocInfo.m_pChanged = pChanged;
    5955  m_Allocations.push_back(allocInfo);
    5956 }
    5957 
    5958 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    5959 {
    5960  // It has already been mapped for defragmentation.
    5961  if(m_pMappedDataForDefragmentation)
    5962  {
    5963  *ppMappedData = m_pMappedDataForDefragmentation;
    5964  return VK_SUCCESS;
    5965  }
    5966 
    5967  // It is persistently mapped.
    5968  if(m_pBlock->m_PersistentMap)
    5969  {
    5970  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    5971  *ppMappedData = m_pBlock->m_pMappedData;
    5972  return VK_SUCCESS;
    5973  }
    5974 
    5975  // Map on first usage.
    5976  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5977  hAllocator->m_hDevice,
    5978  m_pBlock->m_hMemory,
    5979  0,
    5980  VK_WHOLE_SIZE,
    5981  0,
    5982  &m_pMappedDataForDefragmentation);
    5983  *ppMappedData = m_pMappedDataForDefragmentation;
    5984  return res;
    5985 }
    5986 
    5987 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    5988 {
    5989  if(m_pMappedDataForDefragmentation != VMA_NULL)
    5990  {
    5991  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    5992  }
    5993 }
    5994 
    5995 VkResult VmaDefragmentator::DefragmentRound(
    5996  VkDeviceSize maxBytesToMove,
    5997  uint32_t maxAllocationsToMove)
    5998 {
    5999  if(m_Blocks.empty())
    6000  {
    6001  return VK_SUCCESS;
    6002  }
    6003 
    6004  size_t srcBlockIndex = m_Blocks.size() - 1;
    6005  size_t srcAllocIndex = SIZE_MAX;
    6006  for(;;)
    6007  {
    6008  // 1. Find next allocation to move.
    6009  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6010  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6011  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6012  {
    6013  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6014  {
    6015  // Finished: no more allocations to process.
    6016  if(srcBlockIndex == 0)
    6017  {
    6018  return VK_SUCCESS;
    6019  }
    6020  else
    6021  {
    6022  --srcBlockIndex;
    6023  srcAllocIndex = SIZE_MAX;
    6024  }
    6025  }
    6026  else
    6027  {
    6028  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6029  }
    6030  }
    6031 
    6032  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6033  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6034 
    6035  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6036  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6037  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6038  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6039 
    6040  // 2. Try to find new place for this allocation in preceding or current block.
    6041  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6042  {
    6043  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6044  VmaAllocationRequest dstAllocRequest;
    6045  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6046  m_CurrentFrameIndex,
    6047  m_pBlockVector->GetFrameInUseCount(),
    6048  m_pBlockVector->GetBufferImageGranularity(),
    6049  size,
    6050  alignment,
    6051  suballocType,
    6052  false, // canMakeOtherLost
    6053  &dstAllocRequest) &&
    6054  MoveMakesSense(
    6055  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6056  {
    6057  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6058 
    6059  // Reached limit on number of allocations or bytes to move.
    6060  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6061  (m_BytesMoved + size > maxBytesToMove))
    6062  {
    6063  return VK_INCOMPLETE;
    6064  }
    6065 
    6066  void* pDstMappedData = VMA_NULL;
    6067  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6068  if(res != VK_SUCCESS)
    6069  {
    6070  return res;
    6071  }
    6072 
    6073  void* pSrcMappedData = VMA_NULL;
    6074  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6075  if(res != VK_SUCCESS)
    6076  {
    6077  return res;
    6078  }
    6079 
    6080  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6081  memcpy(
    6082  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6083  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6084  static_cast<size_t>(size));
    6085 
    6086  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6087  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6088 
    6089  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6090 
    6091  if(allocInfo.m_pChanged != VMA_NULL)
    6092  {
    6093  *allocInfo.m_pChanged = VK_TRUE;
    6094  }
    6095 
    6096  ++m_AllocationsMoved;
    6097  m_BytesMoved += size;
    6098 
    6099  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6100 
    6101  break;
    6102  }
    6103  }
    6104 
    6105  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6106 
    6107  if(srcAllocIndex > 0)
    6108  {
    6109  --srcAllocIndex;
    6110  }
    6111  else
    6112  {
    6113  if(srcBlockIndex > 0)
    6114  {
    6115  --srcBlockIndex;
    6116  srcAllocIndex = SIZE_MAX;
    6117  }
    6118  else
    6119  {
    6120  return VK_SUCCESS;
    6121  }
    6122  }
    6123  }
    6124 }
    6125 
    6126 VkResult VmaDefragmentator::Defragment(
    6127  VkDeviceSize maxBytesToMove,
    6128  uint32_t maxAllocationsToMove)
    6129 {
    6130  if(m_Allocations.empty())
    6131  {
    6132  return VK_SUCCESS;
    6133  }
    6134 
    6135  // Create block info for each block.
    6136  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6137  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6138  {
    6139  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6140  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6141  m_Blocks.push_back(pBlockInfo);
    6142  }
    6143 
    6144  // Sort them by m_pBlock pointer value.
    6145  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6146 
    6147  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6148  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6149  {
    6150  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6151  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6152  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6153  {
    6154  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6155  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6156  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6157  {
    6158  (*it)->m_Allocations.push_back(allocInfo);
    6159  }
    6160  else
    6161  {
    6162  VMA_ASSERT(0);
    6163  }
    6164  }
    6165  }
    6166  m_Allocations.clear();
    6167 
    6168  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6169  {
    6170  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6171  pBlockInfo->CalcHasNonMovableAllocations();
    6172  pBlockInfo->SortAllocationsBySizeDescecnding();
    6173  }
    6174 
    6175  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6176  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6177 
    6178  // Execute defragmentation rounds (the main part).
    6179  VkResult result = VK_SUCCESS;
    6180  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6181  {
    6182  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6183  }
    6184 
    6185  // Unmap blocks that were mapped for defragmentation.
    6186  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6187  {
    6188  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6189  }
    6190 
    6191  return result;
    6192 }
    6193 
    6194 bool VmaDefragmentator::MoveMakesSense(
    6195  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6196  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6197 {
    6198  if(dstBlockIndex < srcBlockIndex)
    6199  {
    6200  return true;
    6201  }
    6202  if(dstBlockIndex > srcBlockIndex)
    6203  {
    6204  return false;
    6205  }
    6206  if(dstOffset < srcOffset)
    6207  {
    6208  return true;
    6209  }
    6210  return false;
    6211 }
    6212 
    6214 // VmaAllocator_T
    6215 
    6216 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6217  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6218  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6219  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6220  m_hDevice(pCreateInfo->device),
    6221  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6222  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6223  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6224  m_UnmapPersistentlyMappedMemoryCounter(0),
    6225  m_PreferredLargeHeapBlockSize(0),
    6226  m_PreferredSmallHeapBlockSize(0),
    6227  m_CurrentFrameIndex(0),
    6228  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6229 {
    6230  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6231 
    6232  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6233  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6234  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6235 
    6236  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6237  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6238 
    6239  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6240  {
    6241  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6242  }
    6243 
    6244  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6245  {
    6246  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6247  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6248  }
    6249 
    6250  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6251 
    6252  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6253  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6254 
    6255  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6256  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6257  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6258  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6259 
    6260  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6261  {
    6262  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6263  {
    6264  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6265  if(limit != VK_WHOLE_SIZE)
    6266  {
    6267  m_HeapSizeLimit[heapIndex] = limit;
    6268  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6269  {
    6270  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6271  }
    6272  }
    6273  }
    6274  }
    6275 
    6276  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6277  {
    6278  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6279 
    6280  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6281  {
    6282  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6283  this,
    6284  memTypeIndex,
    6285  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6286  preferredBlockSize,
    6287  0,
    6288  SIZE_MAX,
    6289  GetBufferImageGranularity(),
    6290  pCreateInfo->frameInUseCount,
    6291  false); // isCustomPool
    6292  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6293  // becase minBlockCount is 0.
    6294  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6295  }
    6296  }
    6297 }
    6298 
    6299 VmaAllocator_T::~VmaAllocator_T()
    6300 {
    6301  VMA_ASSERT(m_Pools.empty());
    6302 
    6303  for(size_t i = GetMemoryTypeCount(); i--; )
    6304  {
    6305  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6306  {
    6307  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6308  vma_delete(this, m_pBlockVectors[i][j]);
    6309  }
    6310  }
    6311 }
    6312 
    6313 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6314 {
    6315 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6316  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6317  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6318  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6319  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6320  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6321  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6322  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6323  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6324  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6325  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6326  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6327  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6328  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6329  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6330  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6331  // Ignoring vkGetImageMemoryRequirements2KHR.
    6332 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6333 
    6334 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6335  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6336 
    6337  if(pVulkanFunctions != VMA_NULL)
    6338  {
    6339  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6340  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6341  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6342  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6343  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6344  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6345  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6346  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6347  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6348  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6349  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6350  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6351  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6352  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6353  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6354  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6355  }
    6356 
    6357 #undef VMA_COPY_IF_NOT_NULL
    6358 
    6359  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6360  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6361  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6362  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6363  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6364  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6365  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6366  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6367  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6368  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6369  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6370  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6371  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6372  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6373  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6374  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6375  if(m_UseKhrDedicatedAllocation)
    6376  {
    6377  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6378  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6379  }
    6380 }
    6381 
    6382 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6383 {
    6384  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6385  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6386  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6387  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6388 }
    6389 
    6390 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6391  const VkMemoryRequirements& vkMemReq,
    6392  bool dedicatedAllocation,
    6393  const VmaAllocationCreateInfo& createInfo,
    6394  uint32_t memTypeIndex,
    6395  VmaSuballocationType suballocType,
    6396  VmaAllocation* pAllocation)
    6397 {
    6398  VMA_ASSERT(pAllocation != VMA_NULL);
    6399  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6400 
    6401  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6402  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6403  VMA_ASSERT(blockVector);
    6404 
    6405  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6406 
    6407  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6408  bool preferDedicatedMemory =
    6409  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6410  dedicatedAllocation ||
    6411  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6412  vkMemReq.size > preferredBlockSize / 2;
    6413 
    6414  if(preferDedicatedMemory &&
    6415  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6416  finalCreateInfo.pool == VK_NULL_HANDLE)
    6417  {
    6419  }
    6420 
    6421  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6422  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6423  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6424  {
    6425  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6426  }
    6427 
    6428  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6429  {
    6430  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6431  {
    6432  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6433  }
    6434  else
    6435  {
    6436  return AllocateDedicatedMemory(
    6437  vkMemReq.size,
    6438  suballocType,
    6439  memTypeIndex,
    6440  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6441  finalCreateInfo.pUserData,
    6442  pAllocation);
    6443  }
    6444  }
    6445  else
    6446  {
    6447  VkResult res = blockVector->Allocate(
    6448  VK_NULL_HANDLE, // hCurrentPool
    6449  m_CurrentFrameIndex.load(),
    6450  vkMemReq,
    6451  finalCreateInfo,
    6452  suballocType,
    6453  pAllocation);
    6454  if(res == VK_SUCCESS)
    6455  {
    6456  return res;
    6457  }
    6458 
    6459  // 5. Try dedicated memory.
    6460  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6461  {
    6462  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6463  }
    6464  else
    6465  {
    6466  res = AllocateDedicatedMemory(
    6467  vkMemReq.size,
    6468  suballocType,
    6469  memTypeIndex,
    6470  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6471  finalCreateInfo.pUserData,
    6472  pAllocation);
    6473  if(res == VK_SUCCESS)
    6474  {
    6475  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6476  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6477  return VK_SUCCESS;
    6478  }
    6479  else
    6480  {
    6481  // Everything failed: Return error code.
    6482  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6483  return res;
    6484  }
    6485  }
    6486  }
    6487 }
    6488 
    6489 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6490  VkDeviceSize size,
    6491  VmaSuballocationType suballocType,
    6492  uint32_t memTypeIndex,
    6493  bool map,
    6494  void* pUserData,
    6495  VmaAllocation* pAllocation)
    6496 {
    6497  VMA_ASSERT(pAllocation);
    6498 
    6499  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6500  allocInfo.memoryTypeIndex = memTypeIndex;
    6501  allocInfo.allocationSize = size;
    6502 
    6503  // Allocate VkDeviceMemory.
    6504  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6505  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6506  if(res < 0)
    6507  {
    6508  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6509  return res;
    6510  }
    6511 
    6512  void* pMappedData = nullptr;
    6513  if(map)
    6514  {
    6515  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6516  {
    6517  res = (*m_VulkanFunctions.vkMapMemory)(
    6518  m_hDevice,
    6519  hMemory,
    6520  0,
    6521  VK_WHOLE_SIZE,
    6522  0,
    6523  &pMappedData);
    6524  if(res < 0)
    6525  {
    6526  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6527  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6528  return res;
    6529  }
    6530  }
    6531  }
    6532 
    6533  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6534  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6535 
    6536  // Register it in m_pDedicatedAllocations.
    6537  {
    6538  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6539  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6540  VMA_ASSERT(pDedicatedAllocations);
    6541  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6542  }
    6543 
    6544  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6545 
    6546  return VK_SUCCESS;
    6547 }
    6548 
    6549 void VmaAllocator_T::GetBufferMemoryRequirements(
    6550  VkBuffer hBuffer,
    6551  VkMemoryRequirements& memReq,
    6552  bool& dedicatedAllocation) const
    6553 {
    6554  if(m_UseKhrDedicatedAllocation)
    6555  {
    6556  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6557  memReqInfo.buffer = hBuffer;
    6558 
    6559  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6560 
    6561  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6562  memReq2.pNext = &memDedicatedReq;
    6563 
    6564  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6565 
    6566  memReq = memReq2.memoryRequirements;
    6567  dedicatedAllocation =
    6568  (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE) ||
    6569  (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6570  }
    6571  else
    6572  {
    6573  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6574  dedicatedAllocation = false;
    6575  }
    6576 }
    6577 
    6578 void VmaAllocator_T::GetImageMemoryRequirements(
    6579  VkImage hImage,
    6580  VkMemoryRequirements& memReq,
    6581  bool& dedicatedAllocation) const
    6582 {
    6583  if(m_UseKhrDedicatedAllocation)
    6584  {
    6585  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6586  memReqInfo.image = hImage;
    6587 
    6588  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6589 
    6590  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6591  memReq2.pNext = &memDedicatedReq;
    6592 
    6593  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6594 
    6595  memReq = memReq2.memoryRequirements;
    6596  dedicatedAllocation =
    6597  (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE) ||
    6598  (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6599  }
    6600  else
    6601  {
    6602  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6603  dedicatedAllocation = false;
    6604  }
    6605 }
    6606 
    6607 VkResult VmaAllocator_T::AllocateMemory(
    6608  const VkMemoryRequirements& vkMemReq,
    6609  bool dedicatedAllocation,
    6610  const VmaAllocationCreateInfo& createInfo,
    6611  VmaSuballocationType suballocType,
    6612  VmaAllocation* pAllocation)
    6613 {
    6614  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6615  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6616  {
    6617  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6619  }
    6620  if((createInfo.pool != VK_NULL_HANDLE) &&
    6621  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6622  {
    6623  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6624  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6625  }
    6626 
    6627  if(createInfo.pool != VK_NULL_HANDLE)
    6628  {
    6629  return createInfo.pool->m_BlockVector.Allocate(
    6630  createInfo.pool,
    6631  m_CurrentFrameIndex.load(),
    6632  vkMemReq,
    6633  createInfo,
    6634  suballocType,
    6635  pAllocation);
    6636  }
    6637  else
    6638  {
    6639  // Bit mask of memory Vulkan types acceptable for this allocation.
    6640  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6641  uint32_t memTypeIndex = UINT32_MAX;
    6642  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6643  if(res == VK_SUCCESS)
    6644  {
    6645  res = AllocateMemoryOfType(vkMemReq, dedicatedAllocation, createInfo, memTypeIndex, suballocType, pAllocation);
    6646  // Succeeded on first try.
    6647  if(res == VK_SUCCESS)
    6648  {
    6649  return res;
    6650  }
    6651  // Allocation from this memory type failed. Try other compatible memory types.
    6652  else
    6653  {
    6654  for(;;)
    6655  {
    6656  // Remove old memTypeIndex from list of possibilities.
    6657  memoryTypeBits &= ~(1u << memTypeIndex);
    6658  // Find alternative memTypeIndex.
    6659  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6660  if(res == VK_SUCCESS)
    6661  {
    6662  res = AllocateMemoryOfType(vkMemReq, dedicatedAllocation, createInfo, memTypeIndex, suballocType, pAllocation);
    6663  // Allocation from this alternative memory type succeeded.
    6664  if(res == VK_SUCCESS)
    6665  {
    6666  return res;
    6667  }
    6668  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6669  }
    6670  // No other matching memory type index could be found.
    6671  else
    6672  {
    6673  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6674  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6675  }
    6676  }
    6677  }
    6678  }
    6679  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6680  else
    6681  return res;
    6682  }
    6683 }
    6684 
    6685 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6686 {
    6687  VMA_ASSERT(allocation);
    6688 
    6689  if(allocation->CanBecomeLost() == false ||
    6690  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6691  {
    6692  switch(allocation->GetType())
    6693  {
    6694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6695  {
    6696  VmaBlockVector* pBlockVector = VMA_NULL;
    6697  VmaPool hPool = allocation->GetPool();
    6698  if(hPool != VK_NULL_HANDLE)
    6699  {
    6700  pBlockVector = &hPool->m_BlockVector;
    6701  }
    6702  else
    6703  {
    6704  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6705  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6706  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6707  }
    6708  pBlockVector->Free(allocation);
    6709  }
    6710  break;
    6711  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6712  FreeDedicatedMemory(allocation);
    6713  break;
    6714  default:
    6715  VMA_ASSERT(0);
    6716  }
    6717  }
    6718 
    6719  vma_delete(this, allocation);
    6720 }
    6721 
    6722 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6723 {
    6724  // Initialize.
    6725  InitStatInfo(pStats->total);
    6726  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6727  InitStatInfo(pStats->memoryType[i]);
    6728  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6729  InitStatInfo(pStats->memoryHeap[i]);
    6730 
    6731  // Process default pools.
    6732  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6733  {
    6734  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6735  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6736  {
    6737  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6738  VMA_ASSERT(pBlockVector);
    6739  pBlockVector->AddStats(pStats);
    6740  }
    6741  }
    6742 
    6743  // Process custom pools.
    6744  {
    6745  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6746  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6747  {
    6748  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6749  }
    6750  }
    6751 
    6752  // Process dedicated allocations.
    6753  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6754  {
    6755  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6756  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6757  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6758  {
    6759  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6760  VMA_ASSERT(pDedicatedAllocVector);
    6761  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6762  {
    6763  VmaStatInfo allocationStatInfo;
    6764  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6765  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6766  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6767  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6768  }
    6769  }
    6770  }
    6771 
    6772  // Postprocess.
    6773  VmaPostprocessCalcStatInfo(pStats->total);
    6774  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6775  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6776  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6777  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6778 }
    6779 
    6780 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6781 
    6782 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6783 {
    6784  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6785  {
    6786  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6787  {
    6788  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6789  {
    6790  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6791  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6792  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6793  {
    6794  // Process DedicatedAllocations.
    6795  {
    6796  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6797  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6798  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6799  {
    6800  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6801  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6802  }
    6803  }
    6804 
    6805  // Process normal Allocations.
    6806  {
    6807  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6808  pBlockVector->UnmapPersistentlyMappedMemory();
    6809  }
    6810  }
    6811  }
    6812 
    6813  // Process custom pools.
    6814  {
    6815  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6816  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6817  {
    6818  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6819  }
    6820  }
    6821  }
    6822  }
    6823 }
    6824 
    6825 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6826 {
    6827  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6828  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6829  {
    6830  VkResult finalResult = VK_SUCCESS;
    6831  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6832  {
    6833  // Process custom pools.
    6834  {
    6835  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6836  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6837  {
    6838  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6839  }
    6840  }
    6841 
    6842  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6843  {
    6844  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6845  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6846  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6847  {
    6848  // Process DedicatedAllocations.
    6849  {
    6850  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6851  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6852  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    6853  {
    6854  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    6855  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    6856  }
    6857  }
    6858 
    6859  // Process normal Allocations.
    6860  {
    6861  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6862  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    6863  if(localResult != VK_SUCCESS)
    6864  {
    6865  finalResult = localResult;
    6866  }
    6867  }
    6868  }
    6869  }
    6870  }
    6871  return finalResult;
    6872  }
    6873  else
    6874  return VK_SUCCESS;
    6875 }
    6876 
    6877 VkResult VmaAllocator_T::Defragment(
    6878  VmaAllocation* pAllocations,
    6879  size_t allocationCount,
    6880  VkBool32* pAllocationsChanged,
    6881  const VmaDefragmentationInfo* pDefragmentationInfo,
    6882  VmaDefragmentationStats* pDefragmentationStats)
    6883 {
    6884  if(pAllocationsChanged != VMA_NULL)
    6885  {
    6886  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    6887  }
    6888  if(pDefragmentationStats != VMA_NULL)
    6889  {
    6890  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    6891  }
    6892 
    6893  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    6894  {
    6895  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    6896  return VK_ERROR_MEMORY_MAP_FAILED;
    6897  }
    6898 
    6899  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    6900 
    6901  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    6902 
    6903  const size_t poolCount = m_Pools.size();
    6904 
    6905  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    6906  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    6907  {
    6908  VmaAllocation hAlloc = pAllocations[allocIndex];
    6909  VMA_ASSERT(hAlloc);
    6910  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    6911  // DedicatedAlloc cannot be defragmented.
    6912  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    6913  // Only HOST_VISIBLE memory types can be defragmented.
    6914  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    6915  // Lost allocation cannot be defragmented.
    6916  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    6917  {
    6918  VmaBlockVector* pAllocBlockVector = nullptr;
    6919 
    6920  const VmaPool hAllocPool = hAlloc->GetPool();
    6921  // This allocation belongs to custom pool.
    6922  if(hAllocPool != VK_NULL_HANDLE)
    6923  {
    6924  pAllocBlockVector = &hAllocPool->GetBlockVector();
    6925  }
    6926  // This allocation belongs to general pool.
    6927  else
    6928  {
    6929  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    6930  }
    6931 
    6932  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    6933 
    6934  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    6935  &pAllocationsChanged[allocIndex] : VMA_NULL;
    6936  pDefragmentator->AddAllocation(hAlloc, pChanged);
    6937  }
    6938  }
    6939 
    6940  VkResult result = VK_SUCCESS;
    6941 
    6942  // ======== Main processing.
    6943 
    6944  VkDeviceSize maxBytesToMove = SIZE_MAX;
    6945  uint32_t maxAllocationsToMove = UINT32_MAX;
    6946  if(pDefragmentationInfo != VMA_NULL)
    6947  {
    6948  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    6949  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    6950  }
    6951 
    6952  // Process standard memory.
    6953  for(uint32_t memTypeIndex = 0;
    6954  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    6955  ++memTypeIndex)
    6956  {
    6957  // Only HOST_VISIBLE memory types can be defragmented.
    6958  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    6959  {
    6960  for(uint32_t blockVectorType = 0;
    6961  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    6962  ++blockVectorType)
    6963  {
    6964  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    6965  pDefragmentationStats,
    6966  maxBytesToMove,
    6967  maxAllocationsToMove);
    6968  }
    6969  }
    6970  }
    6971 
    6972  // Process custom pools.
    6973  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    6974  {
    6975  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    6976  pDefragmentationStats,
    6977  maxBytesToMove,
    6978  maxAllocationsToMove);
    6979  }
    6980 
    6981  // ======== Destroy defragmentators.
    6982 
    6983  // Process custom pools.
    6984  for(size_t poolIndex = poolCount; poolIndex--; )
    6985  {
    6986  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    6987  }
    6988 
    6989  // Process standard memory.
    6990  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    6991  {
    6992  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    6993  {
    6994  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    6995  {
    6996  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    6997  }
    6998  }
    6999  }
    7000 
    7001  return result;
    7002 }
    7003 
    7004 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7005 {
    7006  if(hAllocation->CanBecomeLost())
    7007  {
    7008  /*
    7009  Warning: This is a carefully designed algorithm.
    7010  Do not modify unless you really know what you're doing :)
    7011  */
    7012  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7013  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7014  for(;;)
    7015  {
    7016  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7017  {
    7018  pAllocationInfo->memoryType = UINT32_MAX;
    7019  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7020  pAllocationInfo->offset = 0;
    7021  pAllocationInfo->size = hAllocation->GetSize();
    7022  pAllocationInfo->pMappedData = VMA_NULL;
    7023  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7024  return;
    7025  }
    7026  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7027  {
    7028  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7029  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7030  pAllocationInfo->offset = hAllocation->GetOffset();
    7031  pAllocationInfo->size = hAllocation->GetSize();
    7032  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7033  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7034  return;
    7035  }
    7036  else // Last use time earlier than current time.
    7037  {
    7038  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7039  {
    7040  localLastUseFrameIndex = localCurrFrameIndex;
    7041  }
    7042  }
    7043  }
    7044  }
    7045  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7046  else
    7047  {
    7048  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7049  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7050  pAllocationInfo->offset = hAllocation->GetOffset();
    7051  pAllocationInfo->size = hAllocation->GetSize();
    7052  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7053  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7054  }
    7055 }
    7056 
    7057 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7058 {
    7059  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7060 
    7061  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7062 
    7063  if(newCreateInfo.maxBlockCount == 0)
    7064  {
    7065  newCreateInfo.maxBlockCount = SIZE_MAX;
    7066  }
    7067  if(newCreateInfo.blockSize == 0)
    7068  {
    7069  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7070  }
    7071 
    7072  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7073 
    7074  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7075  if(res != VK_SUCCESS)
    7076  {
    7077  vma_delete(this, *pPool);
    7078  *pPool = VMA_NULL;
    7079  return res;
    7080  }
    7081 
    7082  // Add to m_Pools.
    7083  {
    7084  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7085  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7086  }
    7087 
    7088  return VK_SUCCESS;
    7089 }
    7090 
    7091 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7092 {
    7093  // Remove from m_Pools.
    7094  {
    7095  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7096  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7097  VMA_ASSERT(success && "Pool not found in Allocator.");
    7098  }
    7099 
    7100  vma_delete(this, pool);
    7101 }
    7102 
    7103 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7104 {
    7105  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7106 }
    7107 
    7108 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7109 {
    7110  m_CurrentFrameIndex.store(frameIndex);
    7111 }
    7112 
    7113 void VmaAllocator_T::MakePoolAllocationsLost(
    7114  VmaPool hPool,
    7115  size_t* pLostAllocationCount)
    7116 {
    7117  hPool->m_BlockVector.MakePoolAllocationsLost(
    7118  m_CurrentFrameIndex.load(),
    7119  pLostAllocationCount);
    7120 }
    7121 
    7122 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7123 {
    7124  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7125  (*pAllocation)->InitLost();
    7126 }
    7127 
    7128 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7129 {
    7130  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7131 
    7132  VkResult res;
    7133  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7134  {
    7135  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7136  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7137  {
    7138  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7139  if(res == VK_SUCCESS)
    7140  {
    7141  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7142  }
    7143  }
    7144  else
    7145  {
    7146  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7147  }
    7148  }
    7149  else
    7150  {
    7151  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7152  }
    7153 
    7154  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7155  {
    7156  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7157  }
    7158 
    7159  return res;
    7160 }
    7161 
    7162 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7163 {
    7164  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7165  {
    7166  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7167  }
    7168 
    7169  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7170 
    7171  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7172  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7173  {
    7174  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7175  m_HeapSizeLimit[heapIndex] += size;
    7176  }
    7177 }
    7178 
    7179 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7180 {
    7181  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7182 
    7183  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7184  {
    7185  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7186  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7187  VMA_ASSERT(pDedicatedAllocations);
    7188  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7189  VMA_ASSERT(success);
    7190  }
    7191 
    7192  VkDeviceMemory hMemory = allocation->GetMemory();
    7193 
    7194  if(allocation->GetMappedData() != VMA_NULL)
    7195  {
    7196  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7197  }
    7198 
    7199  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7200 
    7201  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7202 }
    7203 
    7204 #if VMA_STATS_STRING_ENABLED
    7205 
    7206 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7207 {
    7208  bool dedicatedAllocationsStarted = false;
    7209  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7210  {
    7211  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7212  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7213  {
    7214  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7215  VMA_ASSERT(pDedicatedAllocVector);
    7216  if(pDedicatedAllocVector->empty() == false)
    7217  {
    7218  if(dedicatedAllocationsStarted == false)
    7219  {
    7220  dedicatedAllocationsStarted = true;
    7221  json.WriteString("DedicatedAllocations");
    7222  json.BeginObject();
    7223  }
    7224 
    7225  json.BeginString("Type ");
    7226  json.ContinueString(memTypeIndex);
    7227  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7228  {
    7229  json.ContinueString(" Mapped");
    7230  }
    7231  json.EndString();
    7232 
    7233  json.BeginArray();
    7234 
    7235  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7236  {
    7237  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7238  json.BeginObject(true);
    7239 
    7240  json.WriteString("Size");
    7241  json.WriteNumber(hAlloc->GetSize());
    7242 
    7243  json.WriteString("Type");
    7244  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7245 
    7246  json.EndObject();
    7247  }
    7248 
    7249  json.EndArray();
    7250  }
    7251  }
    7252  }
    7253  if(dedicatedAllocationsStarted)
    7254  {
    7255  json.EndObject();
    7256  }
    7257 
    7258  {
    7259  bool allocationsStarted = false;
    7260  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7261  {
    7262  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7263  {
    7264  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7265  {
    7266  if(allocationsStarted == false)
    7267  {
    7268  allocationsStarted = true;
    7269  json.WriteString("DefaultPools");
    7270  json.BeginObject();
    7271  }
    7272 
    7273  json.BeginString("Type ");
    7274  json.ContinueString(memTypeIndex);
    7275  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7276  {
    7277  json.ContinueString(" Mapped");
    7278  }
    7279  json.EndString();
    7280 
    7281  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7282  }
    7283  }
    7284  }
    7285  if(allocationsStarted)
    7286  {
    7287  json.EndObject();
    7288  }
    7289  }
    7290 
    7291  {
    7292  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7293  const size_t poolCount = m_Pools.size();
    7294  if(poolCount > 0)
    7295  {
    7296  json.WriteString("Pools");
    7297  json.BeginArray();
    7298  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7299  {
    7300  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7301  }
    7302  json.EndArray();
    7303  }
    7304  }
    7305 }
    7306 
    7307 #endif // #if VMA_STATS_STRING_ENABLED
    7308 
    7309 static VkResult AllocateMemoryForImage(
    7310  VmaAllocator allocator,
    7311  VkImage image,
    7312  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7313  VmaSuballocationType suballocType,
    7314  VmaAllocation* pAllocation)
    7315 {
    7316  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7317 
    7318  VkMemoryRequirements vkMemReq = {};
    7319  bool dedicatedAllocation = false;
    7320  allocator->GetImageMemoryRequirements(image, vkMemReq, dedicatedAllocation);
    7321 
    7322  return allocator->AllocateMemory(
    7323  vkMemReq,
    7324  dedicatedAllocation,
    7325  *pAllocationCreateInfo,
    7326  suballocType,
    7327  pAllocation);
    7328 }
    7329 
    7331 // Public interface
    7332 
    7333 VkResult vmaCreateAllocator(
    7334  const VmaAllocatorCreateInfo* pCreateInfo,
    7335  VmaAllocator* pAllocator)
    7336 {
    7337  VMA_ASSERT(pCreateInfo && pAllocator);
    7338  VMA_DEBUG_LOG("vmaCreateAllocator");
    7339  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7340  return VK_SUCCESS;
    7341 }
    7342 
    7343 void vmaDestroyAllocator(
    7344  VmaAllocator allocator)
    7345 {
    7346  if(allocator != VK_NULL_HANDLE)
    7347  {
    7348  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7349  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7350  vma_delete(&allocationCallbacks, allocator);
    7351  }
    7352 }
    7353 
    7355  VmaAllocator allocator,
    7356  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7357 {
    7358  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7359  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7360 }
    7361 
    7363  VmaAllocator allocator,
    7364  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7365 {
    7366  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7367  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7368 }
    7369 
    7371  VmaAllocator allocator,
    7372  uint32_t memoryTypeIndex,
    7373  VkMemoryPropertyFlags* pFlags)
    7374 {
    7375  VMA_ASSERT(allocator && pFlags);
    7376  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7377  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7378 }
    7379 
    7381  VmaAllocator allocator,
    7382  uint32_t frameIndex)
    7383 {
    7384  VMA_ASSERT(allocator);
    7385  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7386 
    7387  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7388 
    7389  allocator->SetCurrentFrameIndex(frameIndex);
    7390 }
    7391 
    7392 void vmaCalculateStats(
    7393  VmaAllocator allocator,
    7394  VmaStats* pStats)
    7395 {
    7396  VMA_ASSERT(allocator && pStats);
    7397  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7398  allocator->CalculateStats(pStats);
    7399 }
    7400 
    7401 #if VMA_STATS_STRING_ENABLED
    7402 
    7403 void vmaBuildStatsString(
    7404  VmaAllocator allocator,
    7405  char** ppStatsString,
    7406  VkBool32 detailedMap)
    7407 {
    7408  VMA_ASSERT(allocator && ppStatsString);
    7409  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7410 
    7411  VmaStringBuilder sb(allocator);
    7412  {
    7413  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7414  json.BeginObject();
    7415 
    7416  VmaStats stats;
    7417  allocator->CalculateStats(&stats);
    7418 
    7419  json.WriteString("Total");
    7420  VmaPrintStatInfo(json, stats.total);
    7421 
    7422  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7423  {
    7424  json.BeginString("Heap ");
    7425  json.ContinueString(heapIndex);
    7426  json.EndString();
    7427  json.BeginObject();
    7428 
    7429  json.WriteString("Size");
    7430  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7431 
    7432  json.WriteString("Flags");
    7433  json.BeginArray(true);
    7434  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7435  {
    7436  json.WriteString("DEVICE_LOCAL");
    7437  }
    7438  json.EndArray();
    7439 
    7440  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7441  {
    7442  json.WriteString("Stats");
    7443  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7444  }
    7445 
    7446  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7447  {
    7448  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7449  {
    7450  json.BeginString("Type ");
    7451  json.ContinueString(typeIndex);
    7452  json.EndString();
    7453 
    7454  json.BeginObject();
    7455 
    7456  json.WriteString("Flags");
    7457  json.BeginArray(true);
    7458  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7459  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7460  {
    7461  json.WriteString("DEVICE_LOCAL");
    7462  }
    7463  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7464  {
    7465  json.WriteString("HOST_VISIBLE");
    7466  }
    7467  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7468  {
    7469  json.WriteString("HOST_COHERENT");
    7470  }
    7471  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7472  {
    7473  json.WriteString("HOST_CACHED");
    7474  }
    7475  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7476  {
    7477  json.WriteString("LAZILY_ALLOCATED");
    7478  }
    7479  json.EndArray();
    7480 
    7481  if(stats.memoryType[typeIndex].blockCount > 0)
    7482  {
    7483  json.WriteString("Stats");
    7484  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7485  }
    7486 
    7487  json.EndObject();
    7488  }
    7489  }
    7490 
    7491  json.EndObject();
    7492  }
    7493  if(detailedMap == VK_TRUE)
    7494  {
    7495  allocator->PrintDetailedMap(json);
    7496  }
    7497 
    7498  json.EndObject();
    7499  }
    7500 
    7501  const size_t len = sb.GetLength();
    7502  char* const pChars = vma_new_array(allocator, char, len + 1);
    7503  if(len > 0)
    7504  {
    7505  memcpy(pChars, sb.GetData(), len);
    7506  }
    7507  pChars[len] = '\0';
    7508  *ppStatsString = pChars;
    7509 }
    7510 
    7511 void vmaFreeStatsString(
    7512  VmaAllocator allocator,
    7513  char* pStatsString)
    7514 {
    7515  if(pStatsString != VMA_NULL)
    7516  {
    7517  VMA_ASSERT(allocator);
    7518  size_t len = strlen(pStatsString);
    7519  vma_delete_array(allocator, pStatsString, len + 1);
    7520  }
    7521 }
    7522 
    7523 #endif // #if VMA_STATS_STRING_ENABLED
    7524 
    7527 VkResult vmaFindMemoryTypeIndex(
    7528  VmaAllocator allocator,
    7529  uint32_t memoryTypeBits,
    7530  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7531  uint32_t* pMemoryTypeIndex)
    7532 {
    7533  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7534  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7535  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7536 
    7537  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7538  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7539  if(preferredFlags == 0)
    7540  {
    7541  preferredFlags = requiredFlags;
    7542  }
    7543  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7544  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7545 
    7546  // Convert usage to requiredFlags and preferredFlags.
    7547  switch(pAllocationCreateInfo->usage)
    7548  {
    7550  break;
    7552  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7553  break;
    7555  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7556  break;
    7558  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7559  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7560  break;
    7562  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7563  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7564  break;
    7565  default:
    7566  break;
    7567  }
    7568 
    7569  *pMemoryTypeIndex = UINT32_MAX;
    7570  uint32_t minCost = UINT32_MAX;
    7571  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7572  memTypeIndex < allocator->GetMemoryTypeCount();
    7573  ++memTypeIndex, memTypeBit <<= 1)
    7574  {
    7575  // This memory type is acceptable according to memoryTypeBits bitmask.
    7576  if((memTypeBit & memoryTypeBits) != 0)
    7577  {
    7578  const VkMemoryPropertyFlags currFlags =
    7579  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7580  // This memory type contains requiredFlags.
    7581  if((requiredFlags & ~currFlags) == 0)
    7582  {
    7583  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7584  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7585  // Remember memory type with lowest cost.
    7586  if(currCost < minCost)
    7587  {
    7588  *pMemoryTypeIndex = memTypeIndex;
    7589  if(currCost == 0)
    7590  {
    7591  return VK_SUCCESS;
    7592  }
    7593  minCost = currCost;
    7594  }
    7595  }
    7596  }
    7597  }
    7598  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7599 }
    7600 
    7601 VkResult vmaCreatePool(
    7602  VmaAllocator allocator,
    7603  const VmaPoolCreateInfo* pCreateInfo,
    7604  VmaPool* pPool)
    7605 {
    7606  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7607 
    7608  VMA_DEBUG_LOG("vmaCreatePool");
    7609 
    7610  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7611 
    7612  return allocator->CreatePool(pCreateInfo, pPool);
    7613 }
    7614 
    7615 void vmaDestroyPool(
    7616  VmaAllocator allocator,
    7617  VmaPool pool)
    7618 {
    7619  VMA_ASSERT(allocator && pool);
    7620 
    7621  VMA_DEBUG_LOG("vmaDestroyPool");
    7622 
    7623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7624 
    7625  allocator->DestroyPool(pool);
    7626 }
    7627 
    7628 void vmaGetPoolStats(
    7629  VmaAllocator allocator,
    7630  VmaPool pool,
    7631  VmaPoolStats* pPoolStats)
    7632 {
    7633  VMA_ASSERT(allocator && pool && pPoolStats);
    7634 
    7635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7636 
    7637  allocator->GetPoolStats(pool, pPoolStats);
    7638 }
    7639 
    7641  VmaAllocator allocator,
    7642  VmaPool pool,
    7643  size_t* pLostAllocationCount)
    7644 {
    7645  VMA_ASSERT(allocator && pool);
    7646 
    7647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7648 
    7649  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7650 }
    7651 
    7652 VkResult vmaAllocateMemory(
    7653  VmaAllocator allocator,
    7654  const VkMemoryRequirements* pVkMemoryRequirements,
    7655  const VmaAllocationCreateInfo* pCreateInfo,
    7656  VmaAllocation* pAllocation,
    7657  VmaAllocationInfo* pAllocationInfo)
    7658 {
    7659  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7660 
    7661  VMA_DEBUG_LOG("vmaAllocateMemory");
    7662 
    7663  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7664 
    7665  VkResult result = allocator->AllocateMemory(
    7666  *pVkMemoryRequirements,
    7667  false, // dedicatedAllocation
    7668  *pCreateInfo,
    7669  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7670  pAllocation);
    7671 
    7672  if(pAllocationInfo && result == VK_SUCCESS)
    7673  {
    7674  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7675  }
    7676 
    7677  return result;
    7678 }
    7679 
    7681  VmaAllocator allocator,
    7682  VkBuffer buffer,
    7683  const VmaAllocationCreateInfo* pCreateInfo,
    7684  VmaAllocation* pAllocation,
    7685  VmaAllocationInfo* pAllocationInfo)
    7686 {
    7687  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7688 
    7689  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7690 
    7691  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7692 
    7693  VkMemoryRequirements vkMemReq = {};
    7694  bool dedicatedAllocation = false;
    7695  allocator->GetBufferMemoryRequirements(buffer, vkMemReq, dedicatedAllocation);
    7696 
    7697  VkResult result = allocator->AllocateMemory(
    7698  vkMemReq,
    7699  dedicatedAllocation,
    7700  *pCreateInfo,
    7701  VMA_SUBALLOCATION_TYPE_BUFFER,
    7702  pAllocation);
    7703 
    7704  if(pAllocationInfo && result == VK_SUCCESS)
    7705  {
    7706  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7707  }
    7708 
    7709  return result;
    7710 }
    7711 
    7712 VkResult vmaAllocateMemoryForImage(
    7713  VmaAllocator allocator,
    7714  VkImage image,
    7715  const VmaAllocationCreateInfo* pCreateInfo,
    7716  VmaAllocation* pAllocation,
    7717  VmaAllocationInfo* pAllocationInfo)
    7718 {
    7719  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7720 
    7721  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7722 
    7723  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7724 
    7725  VkResult result = AllocateMemoryForImage(
    7726  allocator,
    7727  image,
    7728  pCreateInfo,
    7729  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7730  pAllocation);
    7731 
    7732  if(pAllocationInfo && result == VK_SUCCESS)
    7733  {
    7734  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7735  }
    7736 
    7737  return result;
    7738 }
    7739 
    7740 void vmaFreeMemory(
    7741  VmaAllocator allocator,
    7742  VmaAllocation allocation)
    7743 {
    7744  VMA_ASSERT(allocator && allocation);
    7745 
    7746  VMA_DEBUG_LOG("vmaFreeMemory");
    7747 
    7748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7749 
    7750  allocator->FreeMemory(allocation);
    7751 }
    7752 
    7754  VmaAllocator allocator,
    7755  VmaAllocation allocation,
    7756  VmaAllocationInfo* pAllocationInfo)
    7757 {
    7758  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7759 
    7760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7761 
    7762  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7763 }
    7764 
    7766  VmaAllocator allocator,
    7767  VmaAllocation allocation,
    7768  void* pUserData)
    7769 {
    7770  VMA_ASSERT(allocator && allocation);
    7771 
    7772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7773 
    7774  allocation->SetUserData(pUserData);
    7775 }
    7776 
    7778  VmaAllocator allocator,
    7779  VmaAllocation* pAllocation)
    7780 {
    7781  VMA_ASSERT(allocator && pAllocation);
    7782 
    7783  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7784 
    7785  allocator->CreateLostAllocation(pAllocation);
    7786 }
    7787 
    7788 VkResult vmaMapMemory(
    7789  VmaAllocator allocator,
    7790  VmaAllocation allocation,
    7791  void** ppData)
    7792 {
    7793  VMA_ASSERT(allocator && allocation && ppData);
    7794 
    7795  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7796 
    7797  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7798  allocator->m_hDevice,
    7799  allocation->GetMemory(),
    7800  allocation->GetOffset(),
    7801  allocation->GetSize(),
    7802  0,
    7803  ppData);
    7804 }
    7805 
    7806 void vmaUnmapMemory(
    7807  VmaAllocator allocator,
    7808  VmaAllocation allocation)
    7809 {
    7810  VMA_ASSERT(allocator && allocation);
    7811 
    7812  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7813 
    7814  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7815 }
    7816 
    7817 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7818 {
    7819  VMA_ASSERT(allocator);
    7820 
    7821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7822 
    7823  allocator->UnmapPersistentlyMappedMemory();
    7824 }
    7825 
    7826 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7827 {
    7828  VMA_ASSERT(allocator);
    7829 
    7830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7831 
    7832  return allocator->MapPersistentlyMappedMemory();
    7833 }
    7834 
    7835 VkResult vmaDefragment(
    7836  VmaAllocator allocator,
    7837  VmaAllocation* pAllocations,
    7838  size_t allocationCount,
    7839  VkBool32* pAllocationsChanged,
    7840  const VmaDefragmentationInfo *pDefragmentationInfo,
    7841  VmaDefragmentationStats* pDefragmentationStats)
    7842 {
    7843  VMA_ASSERT(allocator && pAllocations);
    7844 
    7845  VMA_DEBUG_LOG("vmaDefragment");
    7846 
    7847  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7848 
    7849  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    7850 }
    7851 
    7852 VkResult vmaCreateBuffer(
    7853  VmaAllocator allocator,
    7854  const VkBufferCreateInfo* pBufferCreateInfo,
    7855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7856  VkBuffer* pBuffer,
    7857  VmaAllocation* pAllocation,
    7858  VmaAllocationInfo* pAllocationInfo)
    7859 {
    7860  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    7861 
    7862  VMA_DEBUG_LOG("vmaCreateBuffer");
    7863 
    7864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7865 
    7866  *pBuffer = VK_NULL_HANDLE;
    7867  *pAllocation = VK_NULL_HANDLE;
    7868 
    7869  // 1. Create VkBuffer.
    7870  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    7871  allocator->m_hDevice,
    7872  pBufferCreateInfo,
    7873  allocator->GetAllocationCallbacks(),
    7874  pBuffer);
    7875  if(res >= 0)
    7876  {
    7877  // 2. vkGetBufferMemoryRequirements.
    7878  VkMemoryRequirements vkMemReq = {};
    7879  bool dedicatedAllocation = false;
    7880  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, dedicatedAllocation);
    7881 
    7882  // 3. Allocate memory using allocator.
    7883  res = allocator->AllocateMemory(
    7884  vkMemReq,
    7885  dedicatedAllocation,
    7886  *pAllocationCreateInfo,
    7887  VMA_SUBALLOCATION_TYPE_BUFFER,
    7888  pAllocation);
    7889  if(res >= 0)
    7890  {
    7891  // 3. Bind buffer with memory.
    7892  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    7893  allocator->m_hDevice,
    7894  *pBuffer,
    7895  (*pAllocation)->GetMemory(),
    7896  (*pAllocation)->GetOffset());
    7897  if(res >= 0)
    7898  {
    7899  // All steps succeeded.
    7900  if(pAllocationInfo != VMA_NULL)
    7901  {
    7902  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7903  }
    7904  return VK_SUCCESS;
    7905  }
    7906  allocator->FreeMemory(*pAllocation);
    7907  *pAllocation = VK_NULL_HANDLE;
    7908  return res;
    7909  }
    7910  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    7911  *pBuffer = VK_NULL_HANDLE;
    7912  return res;
    7913  }
    7914  return res;
    7915 }
    7916 
    7917 void vmaDestroyBuffer(
    7918  VmaAllocator allocator,
    7919  VkBuffer buffer,
    7920  VmaAllocation allocation)
    7921 {
    7922  if(buffer != VK_NULL_HANDLE)
    7923  {
    7924  VMA_ASSERT(allocator);
    7925 
    7926  VMA_DEBUG_LOG("vmaDestroyBuffer");
    7927 
    7928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7929 
    7930  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    7931 
    7932  allocator->FreeMemory(allocation);
    7933  }
    7934 }
    7935 
    7936 VkResult vmaCreateImage(
    7937  VmaAllocator allocator,
    7938  const VkImageCreateInfo* pImageCreateInfo,
    7939  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7940  VkImage* pImage,
    7941  VmaAllocation* pAllocation,
    7942  VmaAllocationInfo* pAllocationInfo)
    7943 {
    7944  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    7945 
    7946  VMA_DEBUG_LOG("vmaCreateImage");
    7947 
    7948  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7949 
    7950  *pImage = VK_NULL_HANDLE;
    7951  *pAllocation = VK_NULL_HANDLE;
    7952 
    7953  // 1. Create VkImage.
    7954  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    7955  allocator->m_hDevice,
    7956  pImageCreateInfo,
    7957  allocator->GetAllocationCallbacks(),
    7958  pImage);
    7959  if(res >= 0)
    7960  {
    7961  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    7962  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    7963  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    7964 
    7965  // 2. Allocate memory using allocator.
    7966  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    7967  if(res >= 0)
    7968  {
    7969  // 3. Bind image with memory.
    7970  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    7971  allocator->m_hDevice,
    7972  *pImage,
    7973  (*pAllocation)->GetMemory(),
    7974  (*pAllocation)->GetOffset());
    7975  if(res >= 0)
    7976  {
    7977  // All steps succeeded.
    7978  if(pAllocationInfo != VMA_NULL)
    7979  {
    7980  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7981  }
    7982  return VK_SUCCESS;
    7983  }
    7984  allocator->FreeMemory(*pAllocation);
    7985  *pAllocation = VK_NULL_HANDLE;
    7986  return res;
    7987  }
    7988  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    7989  *pImage = VK_NULL_HANDLE;
    7990  return res;
    7991  }
    7992  return res;
    7993 }
    7994 
    7995 void vmaDestroyImage(
    7996  VmaAllocator allocator,
    7997  VkImage image,
    7998  VmaAllocation allocation)
    7999 {
    8000  if(image != VK_NULL_HANDLE)
    8001  {
    8002  VMA_ASSERT(allocator);
    8003 
    8004  VMA_DEBUG_LOG("vmaDestroyImage");
    8005 
    8006  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8007 
    8008  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8009 
    8010  allocator->FreeMemory(allocation);
    8011  }
    8012 }
    8013 
    8014 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:474
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:499
    +
    Definition: vk_mem_alloc.h:836
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:456
    -
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:651
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:484
    +
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:681
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:450
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:934
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1087
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:478
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:964
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1117
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation.
    void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:858
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:888
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    Definition: vk_mem_alloc.h:706
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:739
    +
    Definition: vk_mem_alloc.h:736
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:769
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:409
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:481
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:808
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:528
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:463
    -
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:478
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:593
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:453
    -
    VkFlags VmaAllocatorFlags
    Definition: vk_mem_alloc.h:439
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:592
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1091
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:498
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:602
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1099
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:722
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1082
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:454
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:511
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:838
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:558
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:493
    +
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:508
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:623
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:481
    +
    VkFlags VmaAllocatorFlags
    Definition: vk_mem_alloc.h:467
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:622
    +
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:489
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1121
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:528
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:632
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1129
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:752
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1112
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:482
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:472
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:812
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:502
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:842
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:944
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:974
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:451
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:479
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:741
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:828
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:864
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:815
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:771
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:858
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:894
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:845
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:715
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1077
    +
    Definition: vk_mem_alloc.h:745
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1107
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    Definition: vk_mem_alloc.h:786
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1095
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:452
    +
    Definition: vk_mem_alloc.h:816
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1125
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:480
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:598
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:628
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:695
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1097
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:637
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:725
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1127
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:667
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:733
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:763
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:435
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    VmaAllocatorFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:430
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:874
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:447
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:581
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:823
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:904
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:475
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:611
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:853
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:422
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:594
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:698
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:624
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:426
    VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:818
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:848
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:403
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:728
    -
    Definition: vk_mem_alloc.h:719
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:584
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:449
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:836
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:484
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:867
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:746
    -
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:516
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:600
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:593
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:758
    +
    Definition: vk_mem_alloc.h:749
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:614
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:477
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:866
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:514
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:897
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:776
    +
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:546
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:630
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:623
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:458
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:486
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:424
    -
    Definition: vk_mem_alloc.h:713
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:457
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:850
    -
    VmaAllocatorFlags flags
    Flags for created allocator. Use VmaAllocatorFlagBits enum.
    Definition: vk_mem_alloc.h:466
    +
    Definition: vk_mem_alloc.h:743
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:485
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:880
    +
    VmaAllocatorFlags flags
    Flags for created allocator. Use VmaAllocatorFlagBits enum.
    Definition: vk_mem_alloc.h:496
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:955
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:668
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:475
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:593
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:590
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:855
    -
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:645
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:985
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:505
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:623
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:620
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:885
    +
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:675
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:939
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1093
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:969
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1123
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:445
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:473
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:588
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:717
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:586
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:455
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:459
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:777
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
    Definition: vk_mem_alloc.h:950
    +
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:488
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:618
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:747
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:616
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:483
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:487
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:807
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
    Definition: vk_mem_alloc.h:980
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    -
    No intended memory usage specified.
    Definition: vk_mem_alloc.h:640
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:448
    +
    No intended memory usage specified.
    Definition: vk_mem_alloc.h:670
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:476
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    Definition: vk_mem_alloc.h:652
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:920
    -
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:648
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:656
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:594
    -
    Definition: vk_mem_alloc.h:437
    +
    Definition: vk_mem_alloc.h:682
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:950
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:463
    +
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:678
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:686
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:624
    +
    Definition: vk_mem_alloc.h:465
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:679
    -
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:642
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:709
    +
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:672
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:601
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:631
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:861
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:594
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:804
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:891
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:624
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:834
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:925
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:955
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 3bb5f7e..c52377f 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -31,7 +31,7 @@ extern "C" { \tableofcontents -Version 2.0.0-alpha.3 (2017-09-12) +Version 2.0.0-alpha.4 (2017-10-02) Source repository: [VulkanMemoryAllocator project on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) \n Product page: [Vulkan Memory Allocator on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) @@ -328,7 +328,7 @@ The library uses following algorithm for allocation, in order: specified, try to find space in existing blocks, possilby making some other allocations lost. -# If failed, try to allocate separate `VkDeviceMemory` for this allocation, - just like when you use `VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT`. + just like when you use `VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT`. -# If failed, choose other memory type that meets the requirements specified in VmaAllocationCreateInfo and go to point 1. -# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. @@ -433,6 +433,34 @@ typedef enum VmaAllocatorFlagBits { Using this flag may increase performance because internal mutexes are not used. */ VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + Using this extenion will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 + - VK_KHR_dedicated_allocation + + If this flag is enabled, you must also provide + VmaAllocatorCreateInfo::pVulkanFunctions and fill at least members: + VmaVulkanFunctions::vkGetBufferMemoryRequirements2KHR, + VmaVulkanFunctions::vkGetImageMemoryRequirements2KHR, because they are never + imported statically. + +When this flag is set, you can experience following warnings reported by Vulkan +validation layer. You can ignore them. + +> vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, VMA_ALLOCATOR_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocatorFlagBits; @@ -457,6 +485,8 @@ typedef struct VmaVulkanFunctions { PFN_vkDestroyBuffer vkDestroyBuffer; PFN_vkCreateImage vkCreateImage; PFN_vkDestroyImage vkDestroyImage; + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; } VmaVulkanFunctions; /// Description of a Allocator to be created. @@ -665,14 +695,14 @@ typedef enum VmaAllocationCreateFlagBits { You should not use this flag if VmaAllocationCreateInfo::pool is not null. */ - VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT = 0x00000001, + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. If new allocation cannot be placed in any of the existing blocks, allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - You should not use `VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT` and + You should not use `VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT` and `VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT` at the same time. It makes no sense. If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ @@ -1116,7 +1146,7 @@ allocations are considered nonmovable in this call. Basic rules: - Only allocations made in memory types that have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag can be compacted. You may pass other allocations but it makes no sense - these will never be moved. -- You may pass allocations made with `VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT` but +- You may pass allocations made with `VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT` but it makes no sense - they will never be moved. - Both allocations made with or without `VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT` flag can be compacted. If not persistently mapped, memory will be mapped @@ -1453,12 +1483,12 @@ If providing your own implementation, you need to implement a subset of std::ato #define VMA_BEST_FIT (1) #endif -#ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY /** - Every object will have its own allocation. + Every allocation will have its own memory block. Define to 1 for debugging purposes only. */ - #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0) + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) #endif #ifndef VMA_DEBUG_ALIGNMENT @@ -2826,7 +2856,7 @@ public: { ALLOCATION_TYPE_NONE, ALLOCATION_TYPE_BLOCK, - ALLOCATION_TYPE_OWN, + ALLOCATION_TYPE_DEDICATED, }; VmaAllocation_T(uint32_t currentFrameIndex) : @@ -2883,7 +2913,7 @@ public: m_BlockAllocation.m_Offset = offset; } - void InitOwnAllocation( + void InitDedicatedAllocation( uint32_t memoryTypeIndex, VkDeviceMemory hMemory, VmaSuballocationType suballocationType, @@ -2894,15 +2924,15 @@ public: { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(hMemory != VK_NULL_HANDLE); - m_Type = ALLOCATION_TYPE_OWN; + m_Type = ALLOCATION_TYPE_DEDICATED; m_Alignment = 0; m_Size = size; m_pUserData = pUserData; m_SuballocationType = suballocationType; - m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex; - m_OwnAllocation.m_hMemory = hMemory; - m_OwnAllocation.m_PersistentMap = persistentMap; - m_OwnAllocation.m_pMappedData = pMappedData; + m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_PersistentMap = persistentMap; + m_DedicatedAllocation.m_pMappedData = pMappedData; } ALLOCATION_TYPE GetType() const { return m_Type; } @@ -2925,8 +2955,8 @@ public: bool CanBecomeLost() const; VmaPool GetPool() const; - VkResult OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator); - void OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator); + VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator); + void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator); uint32_t GetLastUseFrameIndex() const { @@ -2946,9 +2976,9 @@ public: */ bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - void OwnAllocCalcStatsInfo(VmaStatInfo& outInfo) + void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) { - VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); outInfo.blockCount = 1; outInfo.allocationCount = 1; outInfo.unusedRangeCount = 0; @@ -2977,7 +3007,7 @@ private: }; // Allocation for an object that has its own private VkDeviceMemory. - struct OwnAllocation + struct DedicatedAllocation { uint32_t m_MemoryTypeIndex; VkDeviceMemory m_hMemory; @@ -2990,7 +3020,7 @@ private: // Allocation out of VmaDeviceMemoryBlock. BlockAllocation m_BlockAllocation; // Allocation for an object that has its own private VkDeviceMemory. - OwnAllocation m_OwnAllocation; + DedicatedAllocation m_DedicatedAllocation; }; }; @@ -3436,6 +3466,7 @@ public: struct VmaAllocator_T { bool m_UseMutex; + bool m_UseKhrDedicatedAllocation; VkDevice m_hDevice; bool m_AllocationCallbacksSpecified; VkAllocationCallbacks m_AllocationCallbacks; @@ -3456,8 +3487,8 @@ struct VmaAllocator_T // Each vector is sorted by memory (handle value). typedef VmaVector< VmaAllocation, VmaStlAllocator > AllocationVectorType; - AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT]; - VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES]; + AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT]; + VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES]; VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); ~VmaAllocator_T(); @@ -3487,9 +3518,19 @@ struct VmaAllocator_T return m_MemProps.memoryTypes[memTypeIndex].heapIndex; } + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& dedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& dedicatedAllocation) const; + // Main allocation function. VkResult AllocateMemory( const VkMemoryRequirements& vkMemReq, + bool dedicatedAllocation, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, VmaAllocation* pAllocation); @@ -3549,13 +3590,14 @@ private: VkResult AllocateMemoryOfType( const VkMemoryRequirements& vkMemReq, + bool dedicatedAllocation, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, VmaAllocation* pAllocation); // Allocates and registers new VkDeviceMemory specifically for single allocation. - VkResult AllocateOwnMemory( + VkResult AllocateDedicatedMemory( VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, @@ -3563,8 +3605,8 @@ private: void* pUserData, VmaAllocation* pAllocation); - // Tries to free pMemory as Own Memory. Returns true if found and freed. - void FreeOwnMemory(VmaAllocation allocation); + // Tries to free pMemory as Dedicated Memory. Returns true if found and freed. + void FreeDedicatedMemory(VmaAllocation allocation); }; //////////////////////////////////////////////////////////////////////////////// @@ -3952,7 +3994,7 @@ VkDeviceSize VmaAllocation_T::GetOffset() const { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Offset; - case ALLOCATION_TYPE_OWN: + case ALLOCATION_TYPE_DEDICATED: return 0; default: VMA_ASSERT(0); @@ -3966,8 +4008,8 @@ VkDeviceMemory VmaAllocation_T::GetMemory() const { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->m_hMemory; - case ALLOCATION_TYPE_OWN: - return m_OwnAllocation.m_hMemory; + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; default: VMA_ASSERT(0); return VK_NULL_HANDLE; @@ -3980,8 +4022,8 @@ uint32_t VmaAllocation_T::GetMemoryTypeIndex() const { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->m_MemoryTypeIndex; - case ALLOCATION_TYPE_OWN: - return m_OwnAllocation.m_MemoryTypeIndex; + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_MemoryTypeIndex; default: VMA_ASSERT(0); return UINT32_MAX; @@ -3994,8 +4036,8 @@ VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_Block->m_BlockVectorType; - case ALLOCATION_TYPE_OWN: - return (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED); + case ALLOCATION_TYPE_DEDICATED: + return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED); default: VMA_ASSERT(0); return VMA_BLOCK_VECTOR_TYPE_COUNT; @@ -4016,8 +4058,8 @@ void* VmaAllocation_T::GetMappedData() const return VMA_NULL; } break; - case ALLOCATION_TYPE_OWN: - return m_OwnAllocation.m_pMappedData; + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_pMappedData; default: VMA_ASSERT(0); return VMA_NULL; @@ -4030,7 +4072,7 @@ bool VmaAllocation_T::CanBecomeLost() const { case ALLOCATION_TYPE_BLOCK: return m_BlockAllocation.m_CanBecomeLost; - case ALLOCATION_TYPE_OWN: + case ALLOCATION_TYPE_DEDICATED: return false; default: VMA_ASSERT(0); @@ -4044,29 +4086,29 @@ VmaPool VmaAllocation_T::GetPool() const return m_BlockAllocation.m_hPool; } -VkResult VmaAllocation_T::OwnAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator) +VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator) { - VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN); - if(m_OwnAllocation.m_PersistentMap) + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); + if(m_DedicatedAllocation.m_PersistentMap) { return (*hAllocator->GetVulkanFunctions().vkMapMemory)( hAllocator->m_hDevice, - m_OwnAllocation.m_hMemory, + m_DedicatedAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, - &m_OwnAllocation.m_pMappedData); + &m_DedicatedAllocation.m_pMappedData); } return VK_SUCCESS; } -void VmaAllocation_T::OwnAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator) +void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator) { - VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN); - if(m_OwnAllocation.m_pMappedData) + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); + if(m_DedicatedAllocation.m_pMappedData) { - VMA_ASSERT(m_OwnAllocation.m_PersistentMap); - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_OwnAllocation.m_hMemory); - m_OwnAllocation.m_pMappedData = VMA_NULL; + VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap); + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory); + m_DedicatedAllocation.m_pMappedData = VMA_NULL; } } @@ -6173,6 +6215,7 @@ bool VmaDefragmentator::MoveMakesSense( VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_KHR_DEDICATED_ALLOCATION_BIT) != 0), m_PhysicalDevice(pCreateInfo->physicalDevice), m_hDevice(pCreateInfo->device), m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), @@ -6191,7 +6234,7 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); - memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations)); + memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations)); for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) { @@ -6248,7 +6291,7 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : false); // isCustomPool // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, // becase minBlockCount is 0. - m_pOwnAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator(GetAllocationCallbacks())); + m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator(GetAllocationCallbacks())); } } } @@ -6261,7 +6304,7 @@ VmaAllocator_T::~VmaAllocator_T() { for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; ) { - vma_delete(this, m_pOwnAllocations[i][j]); + vma_delete(this, m_pDedicatedAllocations[i][j]); vma_delete(this, m_pBlockVectors[i][j]); } } @@ -6284,13 +6327,35 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer; m_VulkanFunctions.vkCreateImage = &vkCreateImage; m_VulkanFunctions.vkDestroyImage = &vkDestroyImage; + // Ignoring vkGetBufferMemoryRequirements2KHR. + // Ignoring vkGetImageMemoryRequirements2KHR. #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + if(pVulkanFunctions != VMA_NULL) { - m_VulkanFunctions = *pVulkanFunctions; + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); } +#undef VMA_COPY_IF_NOT_NULL + // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1 // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions. VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); @@ -6307,6 +6372,11 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + if(m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } } VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) @@ -6319,6 +6389,7 @@ VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) VkResult VmaAllocator_T::AllocateMemoryOfType( const VkMemoryRequirements& vkMemReq, + bool dedicatedAllocation, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, @@ -6333,17 +6404,18 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( VmaAllocationCreateInfo finalCreateInfo = createInfo; - if(VMA_DEBUG_ALWAYS_OWN_MEMORY) - { - finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT; - } - - // Heuristics: Allocate own memory if requested size if greater than half of preferred block size. const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); - if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && - vkMemReq.size > preferredBlockSize / 2) + bool preferDedicatedMemory = + VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || + dedicatedAllocation || + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + vkMemReq.size > preferredBlockSize / 2; + + if(preferDedicatedMemory && + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + finalCreateInfo.pool == VK_NULL_HANDLE) { - finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT; + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP. @@ -6353,7 +6425,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT; } - if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0) + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { @@ -6361,7 +6433,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( } else { - return AllocateOwnMemory( + return AllocateDedicatedMemory( vkMemReq.size, suballocType, memTypeIndex, @@ -6384,14 +6456,14 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( return res; } - // 5. Try own memory. + // 5. Try dedicated memory. if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } else { - res = AllocateOwnMemory( + res = AllocateDedicatedMemory( vkMemReq.size, suballocType, memTypeIndex, @@ -6400,8 +6472,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( pAllocation); if(res == VK_SUCCESS) { - // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here. - VMA_DEBUG_LOG(" Allocated as OwnMemory"); + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); return VK_SUCCESS; } else @@ -6414,7 +6486,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( } } -VkResult VmaAllocator_T::AllocateOwnMemory( +VkResult VmaAllocator_T::AllocateDedicatedMemory( VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, @@ -6459,37 +6531,96 @@ VkResult VmaAllocator_T::AllocateOwnMemory( } *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load()); - (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData); + (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData); - // Register it in m_pOwnAllocations. + // Register it in m_pDedicatedAllocations. { - VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED]; - VMA_ASSERT(pOwnAllocations); - VmaVectorInsertSorted(*pOwnAllocations, *pAllocation); + VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED]; + VMA_ASSERT(pDedicatedAllocations); + VmaVectorInsertSorted(*pDedicatedAllocations, *pAllocation); } - VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex); + VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex); return VK_SUCCESS; } +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& dedicatedAllocation) const +{ + if(m_UseKhrDedicatedAllocation) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + memReq2.pNext = &memDedicatedReq; + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + dedicatedAllocation = + (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE) || + (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + dedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& dedicatedAllocation) const +{ + if(m_UseKhrDedicatedAllocation) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + memReq2.pNext = &memDedicatedReq; + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + dedicatedAllocation = + (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE) || + (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + dedicatedAllocation = false; + } +} + VkResult VmaAllocator_T::AllocateMemory( const VkMemoryRequirements& vkMemReq, + bool dedicatedAllocation, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, VmaAllocation* pAllocation) { - if((createInfo.flags & VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT) != 0 && + if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } if((createInfo.pool != VK_NULL_HANDLE) && - ((createInfo.flags & (VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT)) != 0)) + ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_OWN_MEMORY_BIT when pool != null is invalid."); + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } @@ -6511,7 +6642,7 @@ VkResult VmaAllocator_T::AllocateMemory( VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); if(res == VK_SUCCESS) { - res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation); + res = AllocateMemoryOfType(vkMemReq, dedicatedAllocation, createInfo, memTypeIndex, suballocType, pAllocation); // Succeeded on first try. if(res == VK_SUCCESS) { @@ -6528,7 +6659,7 @@ VkResult VmaAllocator_T::AllocateMemory( res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); if(res == VK_SUCCESS) { - res = AllocateMemoryOfType(vkMemReq, createInfo, memTypeIndex, suballocType, pAllocation); + res = AllocateMemoryOfType(vkMemReq, dedicatedAllocation, createInfo, memTypeIndex, suballocType, pAllocation); // Allocation from this alternative memory type succeeded. if(res == VK_SUCCESS) { @@ -6577,8 +6708,8 @@ void VmaAllocator_T::FreeMemory(const VmaAllocation allocation) pBlockVector->Free(allocation); } break; - case VmaAllocation_T::ALLOCATION_TYPE_OWN: - FreeOwnMemory(allocation); + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); break; default: VMA_ASSERT(0); @@ -6618,19 +6749,19 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats) } } - // Process own allocations. + // Process dedicated allocations. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); + VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType) { - AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType]; - VMA_ASSERT(pOwnAllocVector); - for(size_t allocIndex = 0, allocCount = pOwnAllocVector->size(); allocIndex < allocCount; ++allocIndex) + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType]; + VMA_ASSERT(pDedicatedAllocVector); + for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) { VmaStatInfo allocationStatInfo; - (*pOwnAllocVector)[allocIndex]->OwnAllocCalcStatsInfo(allocationStatInfo); + (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo); VmaAddStatInfo(pStats->total, allocationStatInfo); VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); @@ -6660,14 +6791,14 @@ void VmaAllocator_T::UnmapPersistentlyMappedMemory() if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 && (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) { - // Process OwnAllocations. + // Process DedicatedAllocations. { - VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED]; - for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; ) + VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED]; + for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; ) { - VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex]; - hAlloc->OwnAllocUnmapPersistentlyMappedMemory(this); + VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex]; + hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this); } } @@ -6714,14 +6845,14 @@ VkResult VmaAllocator_T::MapPersistentlyMappedMemory() if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 && (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) { - // Process OwnAllocations. + // Process DedicatedAllocations. { - VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED]; - for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex) + VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED]; + for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex) { - VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex]; - hAlloc->OwnAllocMapPersistentlyMappedMemory(this); + VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex]; + hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this); } } @@ -6777,7 +6908,7 @@ VkResult VmaAllocator_T::Defragment( VmaAllocation hAlloc = pAllocations[allocIndex]; VMA_ASSERT(hAlloc); const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); - // OwnAlloc cannot be defragmented. + // DedicatedAlloc cannot be defragmented. if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && // Only HOST_VISIBLE memory types can be defragmented. ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) && @@ -7045,16 +7176,16 @@ void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, Vk } } -void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation) +void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation) { - VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN); + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); { - VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()]; - VMA_ASSERT(pOwnAllocations); - bool success = VmaVectorRemoveSorted(*pOwnAllocations, allocation); + VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()]; + VMA_ASSERT(pDedicatedAllocations); + bool success = VmaVectorRemoveSorted(*pDedicatedAllocations, allocation); VMA_ASSERT(success); } @@ -7067,27 +7198,27 @@ void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation) FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); - VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex); + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); } #if VMA_STATS_STRING_ENABLED void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) { - bool ownAllocationsStarted = false; + bool dedicatedAllocationsStarted = false; for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex); + VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType) { - AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType]; - VMA_ASSERT(pOwnAllocVector); - if(pOwnAllocVector->empty() == false) + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType]; + VMA_ASSERT(pDedicatedAllocVector); + if(pDedicatedAllocVector->empty() == false) { - if(ownAllocationsStarted == false) + if(dedicatedAllocationsStarted == false) { - ownAllocationsStarted = true; - json.WriteString("OwnAllocations"); + dedicatedAllocationsStarted = true; + json.WriteString("DedicatedAllocations"); json.BeginObject(); } @@ -7101,9 +7232,9 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) json.BeginArray(); - for(size_t i = 0; i < pOwnAllocVector->size(); ++i) + for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i) { - const VmaAllocation hAlloc = (*pOwnAllocVector)[i]; + const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i]; json.BeginObject(true); json.WriteString("Size"); @@ -7119,7 +7250,7 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) } } } - if(ownAllocationsStarted) + if(dedicatedAllocationsStarted) { json.EndObject(); } @@ -7185,10 +7316,12 @@ static VkResult AllocateMemoryForImage( VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation); VkMemoryRequirements vkMemReq = {}; - (*allocator->GetVulkanFunctions().vkGetImageMemoryRequirements)(allocator->m_hDevice, image, &vkMemReq); + bool dedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, dedicatedAllocation); return allocator->AllocateMemory( vkMemReq, + dedicatedAllocation, *pAllocationCreateInfo, suballocType, pAllocation); @@ -7531,6 +7664,7 @@ VkResult vmaAllocateMemory( VkResult result = allocator->AllocateMemory( *pVkMemoryRequirements, + false, // dedicatedAllocation *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, pAllocation); @@ -7557,10 +7691,12 @@ VkResult vmaAllocateMemoryForBuffer( VMA_DEBUG_GLOBAL_MUTEX_LOCK VkMemoryRequirements vkMemReq = {}; - (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, buffer, &vkMemReq); + bool dedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, dedicatedAllocation); VkResult result = allocator->AllocateMemory( vkMemReq, + dedicatedAllocation, *pCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, pAllocation); @@ -7740,11 +7876,13 @@ VkResult vmaCreateBuffer( { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; - (*allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements)(allocator->m_hDevice, *pBuffer, &vkMemReq); + bool dedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, dedicatedAllocation); // 3. Allocate memory using allocator. res = allocator->AllocateMemory( vkMemReq, + dedicatedAllocation, *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, pAllocation);