From 776ae0d5f8c421dc0ebd7eb8e797cf2953ba96de Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Fri, 8 Jun 2018 17:11:12 +0200 Subject: [PATCH] Improved support for non-coherent memory. Added functions: vmaFlushAllocation, vmaInvalidateAllocation. nonCoherentAtomSize is respected automatically. Issue #27 Thanks @egdaniel ! Also added VmaVulkanFunctions::vkFlushMappedMemoryRanges, vkInvalidateMappedMemoryRanges. Fixed bug with uninitialized VmaPool_T::m_Id. --- README.md | 1 + docs/html/functions.html | 6 + docs/html/functions_vars.html | 6 + docs/html/globals.html | 6 + docs/html/globals_func.html | 6 + docs/html/index.html | 2 +- docs/html/memory_mapping.html | 7 +- docs/html/search/all_f.js | 4 + docs/html/search/functions_0.js | 2 + docs/html/search/variables_b.js | 2 + .../struct_vma_vulkan_functions-members.html | 12 +- docs/html/struct_vma_vulkan_functions.html | 32 ++ docs/html/vk__mem__alloc_8h.html | 106 +++++++ docs/html/vk__mem__alloc_8h_source.html | 218 +++++++------- src/vk_mem_alloc.h | 282 ++++++++++++++---- 15 files changed, 521 insertions(+), 171 deletions(-) diff --git a/README.md b/README.md index 1191012..7b33ebb 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ Additional features: - Configuration: Fill optional members of CreateInfo structure to provide custom CPU memory allocator, pointers to Vulkan functions and other parameters. - Customization: Predefine appropriate macros to provide your own implementation of all external facilities used by the library, from assert, mutex, and atomic, to vector and linked list. - Support for memory mapping, reference-counted internally. Support for persistently mapped memory: Just allocate with appropriate flag and you get access to mapped pointer. +- Support for non-coherent memory. Functions that flush/invalidate memory. nonCoherentAtomSize is respected automatically. - Custom memory pools: Create a pool with desired parameters (e.g. fixed or limited maximum size) and allocate memory out of it. - Support for VK_KHR_dedicated_allocation extension: Just enable it and it will be used automatically by the library. - Defragmentation: Call one function and let the library move data around to free some memory blocks and make your allocations better compacted. diff --git a/docs/html/functions.html b/docs/html/functions.html index 24ff073..0751d69 100644 --- a/docs/html/functions.html +++ b/docs/html/functions.html @@ -273,6 +273,9 @@ $(function() {
  • vkDestroyImage : VmaVulkanFunctions
  • +
  • vkFlushMappedMemoryRanges +: VmaVulkanFunctions +
  • vkFreeMemory : VmaVulkanFunctions
  • @@ -288,6 +291,9 @@ $(function() {
  • vkGetPhysicalDeviceProperties : VmaVulkanFunctions
  • +
  • vkInvalidateMappedMemoryRanges +: VmaVulkanFunctions +
  • vkMapMemory : VmaVulkanFunctions
  • diff --git a/docs/html/functions_vars.html b/docs/html/functions_vars.html index c7b0790..6ee6243 100644 --- a/docs/html/functions_vars.html +++ b/docs/html/functions_vars.html @@ -273,6 +273,9 @@ $(function() {
  • vkDestroyImage : VmaVulkanFunctions
  • +
  • vkFlushMappedMemoryRanges +: VmaVulkanFunctions +
  • vkFreeMemory : VmaVulkanFunctions
  • @@ -288,6 +291,9 @@ $(function() {
  • vkGetPhysicalDeviceProperties : VmaVulkanFunctions
  • +
  • vkInvalidateMappedMemoryRanges +: VmaVulkanFunctions +
  • vkMapMemory : VmaVulkanFunctions
  • diff --git a/docs/html/globals.html b/docs/html/globals.html index d583338..f53f299 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -221,6 +221,9 @@ $(function() {
  • vmaFindMemoryTypeIndexForImageInfo() : vk_mem_alloc.h
  • +
  • vmaFlushAllocation() +: vk_mem_alloc.h +
  • vmaFreeMemory() : vk_mem_alloc.h
  • @@ -242,6 +245,9 @@ $(function() {
  • vmaGetPoolStats() : vk_mem_alloc.h
  • +
  • vmaInvalidateAllocation() +: vk_mem_alloc.h +
  • vmaMakePoolAllocationsLost() : vk_mem_alloc.h
  • diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index 55ac717..798f803 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -121,6 +121,9 @@ $(function() {
  • vmaFindMemoryTypeIndexForImageInfo() : vk_mem_alloc.h
  • +
  • vmaFlushAllocation() +: vk_mem_alloc.h +
  • vmaFreeMemory() : vk_mem_alloc.h
  • @@ -142,6 +145,9 @@ $(function() {
  • vmaGetPoolStats() : vk_mem_alloc.h
  • +
  • vmaInvalidateAllocation() +: vk_mem_alloc.h +
  • vmaMakePoolAllocationsLost() : vk_mem_alloc.h
  • diff --git a/docs/html/index.html b/docs/html/index.html index 95134bc..0df3adf 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -62,7 +62,7 @@ $(function() {
    Vulkan Memory Allocator
    -

    Version 2.0.0 (2018-03-19)

    +

    Version 2.1.0-alpha.1 (2018-06-04)

    Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    License: MIT

    Documentation of all members: vk_mem_alloc.h

    diff --git a/docs/html/memory_mapping.html b/docs/html/memory_mapping.html index 9787ab5..fa97ad7 100644 --- a/docs/html/memory_mapping.html +++ b/docs/html/memory_mapping.html @@ -80,9 +80,10 @@ Persistently mapped memory

    Cache control

    -

    Memory in Vulkan doesn't need to be unmapped before using it on GPU, but unless a memory types has VK_MEMORY_PROPERTY_HOST_COHERENT_BIT flag set, you need to manually invalidate cache before reading of mapped pointer using function vkvkInvalidateMappedMemoryRanges() and flush cache after writing to mapped pointer using function vkFlushMappedMemoryRanges(). Example:

    -
    memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
    VkMemoryPropertyFlags memFlags;
    vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
    if((memFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
    {
    VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    memRange.memory = allocInfo.deviceMemory;
    memRange.offset = allocInfo.offset;
    memRange.size = allocInfo.size;
    vkFlushMappedMemoryRanges(device, 1, &memRange);
    }

    Please note that memory allocated with VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be host coherent.

    -

    Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) currently provide VK_MEMORY_PROPERTY_HOST_COHERENT_BIT flag on all memory types that are VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, so on this platform you may not need to bother.

    +

    Memory in Vulkan doesn't need to be unmapped before using it on GPU, but unless a memory types has VK_MEMORY_PROPERTY_HOST_COHERENT_BIT flag set, you need to manually invalidate cache before reading of mapped pointer and flush cache after writing to mapped pointer. Vulkan provides following functions for this purpose vkFlushMappedMemoryRanges(), vkInvalidateMappedMemoryRanges(), but this library provides more convenient functions that refer to given allocation object: vmaFlushAllocation(), vmaInvalidateAllocation().

    +

    Regions of memory specified for flush/invalidate must be aligned to VkPhysicalDeviceLimits::nonCoherentAtomSize. This is automatically ensured by the library. In any memory type that is HOST_VISIBLE but not HOST_COHERENT, all allocations within blocks are aligned to this value, so their offsets are always multiply of nonCoherentAtomSize and two different allocations never share same "line" of this size.

    +

    Please note that memory allocated with VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be HOST_COHERENT.

    +

    Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) currently provide HOST_COHERENT flag on all memory types that are HOST_VISIBLE, so on this platform you may not need to bother.

    Finding out if memory is mappable

    It may happen that your allocation ends up in memory that is HOST_VISIBLE (available for mapping) despite it wasn't explicitly requested. For example, application may work on integrated graphics with unified memory (like Intel) or allocation from video memory might have failed, so the library chose system memory as fallback.

    diff --git a/docs/html/search/all_f.js b/docs/html/search/all_f.js index 8053b68..c9b3721 100644 --- a/docs/html/search/all_f.js +++ b/docs/html/search/all_f.js @@ -10,11 +10,13 @@ var searchData= ['vkcreateimage',['vkCreateImage',['../struct_vma_vulkan_functions.html#a23ebe70be515b9b5010a1d691200e325',1,'VmaVulkanFunctions']]], ['vkdestroybuffer',['vkDestroyBuffer',['../struct_vma_vulkan_functions.html#a7e054606faddb07f0e8556f3ed317d45',1,'VmaVulkanFunctions']]], ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], + ['vkflushmappedmemoryranges',['vkFlushMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a33c322f4c4ad2810f8a9c97a277572f9',1,'VmaVulkanFunctions']]], ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], + ['vkinvalidatemappedmemoryranges',['vkInvalidateMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a5c1093bc32386a8060c37c9f282078a1',1,'VmaVulkanFunctions']]], ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], ['vkunmapmemory',['vkUnmapMemory',['../struct_vma_vulkan_functions.html#acc798589736f0becb317fc2196c1d8b9',1,'VmaVulkanFunctions']]], ['vma_5fallocation_5fcreate_5fcan_5fbecome_5flost_5fbit',['VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT',['../vk__mem__alloc_8h.html#ad9889c10c798b040d59c92f257cae597a5f436af6c8fe8540573a6d22627a6fd2',1,'vk_mem_alloc.h']]], @@ -69,6 +71,7 @@ var searchData= ['vmafindmemorytypeindex',['vmaFindMemoryTypeIndex',['../vk__mem__alloc_8h.html#aef15a94b58fbcb0fe706d5720e84a74a',1,'vk_mem_alloc.h']]], ['vmafindmemorytypeindexforbufferinfo',['vmaFindMemoryTypeIndexForBufferInfo',['../vk__mem__alloc_8h.html#ae790ab9ffaf7667fb8f62523e6897888',1,'vk_mem_alloc.h']]], ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], + ['vmaflushallocation',['vmaFlushAllocation',['../vk__mem__alloc_8h.html#abc34ee6f021f459aff885f3758c435de',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], @@ -76,6 +79,7 @@ var searchData= ['vmagetmemorytypeproperties',['vmaGetMemoryTypeProperties',['../vk__mem__alloc_8h.html#a8701444752eb5de4464adb5a2b514bca',1,'vk_mem_alloc.h']]], ['vmagetphysicaldeviceproperties',['vmaGetPhysicalDeviceProperties',['../vk__mem__alloc_8h.html#aecabf7b6e91ea87d0316fa0a9e014fe0',1,'vk_mem_alloc.h']]], ['vmagetpoolstats',['vmaGetPoolStats',['../vk__mem__alloc_8h.html#ae8bf76997b234ef68aad922616df4153',1,'vk_mem_alloc.h']]], + ['vmainvalidateallocation',['vmaInvalidateAllocation',['../vk__mem__alloc_8h.html#a0d0eb0c1102268fa9a476d12ecbe4006',1,'vk_mem_alloc.h']]], ['vmamakepoolallocationslost',['vmaMakePoolAllocationsLost',['../vk__mem__alloc_8h.html#a736bd6cbda886f36c891727e73bd4024',1,'vk_mem_alloc.h']]], ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], ['vmamemoryusage',['VmaMemoryUsage',['../vk__mem__alloc_8h.html#aa5846affa1e9da3800e3e78fae2305cc',1,'VmaMemoryUsage(): vk_mem_alloc.h'],['../vk__mem__alloc_8h.html#ad63b2113c0bfdbeade1cb498f5a8580d',1,'VmaMemoryUsage(): vk_mem_alloc.h']]], diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index efddc9f..2b8caad 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -20,6 +20,7 @@ var searchData= ['vmafindmemorytypeindex',['vmaFindMemoryTypeIndex',['../vk__mem__alloc_8h.html#aef15a94b58fbcb0fe706d5720e84a74a',1,'vk_mem_alloc.h']]], ['vmafindmemorytypeindexforbufferinfo',['vmaFindMemoryTypeIndexForBufferInfo',['../vk__mem__alloc_8h.html#ae790ab9ffaf7667fb8f62523e6897888',1,'vk_mem_alloc.h']]], ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], + ['vmaflushallocation',['vmaFlushAllocation',['../vk__mem__alloc_8h.html#abc34ee6f021f459aff885f3758c435de',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], @@ -27,6 +28,7 @@ var searchData= ['vmagetmemorytypeproperties',['vmaGetMemoryTypeProperties',['../vk__mem__alloc_8h.html#a8701444752eb5de4464adb5a2b514bca',1,'vk_mem_alloc.h']]], ['vmagetphysicaldeviceproperties',['vmaGetPhysicalDeviceProperties',['../vk__mem__alloc_8h.html#aecabf7b6e91ea87d0316fa0a9e014fe0',1,'vk_mem_alloc.h']]], ['vmagetpoolstats',['vmaGetPoolStats',['../vk__mem__alloc_8h.html#ae8bf76997b234ef68aad922616df4153',1,'vk_mem_alloc.h']]], + ['vmainvalidateallocation',['vmaInvalidateAllocation',['../vk__mem__alloc_8h.html#a0d0eb0c1102268fa9a476d12ecbe4006',1,'vk_mem_alloc.h']]], ['vmamakepoolallocationslost',['vmaMakePoolAllocationsLost',['../vk__mem__alloc_8h.html#a736bd6cbda886f36c891727e73bd4024',1,'vk_mem_alloc.h']]], ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], diff --git a/docs/html/search/variables_b.js b/docs/html/search/variables_b.js index e40881e..c1444df 100644 --- a/docs/html/search/variables_b.js +++ b/docs/html/search/variables_b.js @@ -7,11 +7,13 @@ var searchData= ['vkcreateimage',['vkCreateImage',['../struct_vma_vulkan_functions.html#a23ebe70be515b9b5010a1d691200e325',1,'VmaVulkanFunctions']]], ['vkdestroybuffer',['vkDestroyBuffer',['../struct_vma_vulkan_functions.html#a7e054606faddb07f0e8556f3ed317d45',1,'VmaVulkanFunctions']]], ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], + ['vkflushmappedmemoryranges',['vkFlushMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a33c322f4c4ad2810f8a9c97a277572f9',1,'VmaVulkanFunctions']]], ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], + ['vkinvalidatemappedmemoryranges',['vkInvalidateMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a5c1093bc32386a8060c37c9f282078a1',1,'VmaVulkanFunctions']]], ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], ['vkunmapmemory',['vkUnmapMemory',['../struct_vma_vulkan_functions.html#acc798589736f0becb317fc2196c1d8b9',1,'VmaVulkanFunctions']]] ]; diff --git a/docs/html/struct_vma_vulkan_functions-members.html b/docs/html/struct_vma_vulkan_functions-members.html index 05d3e41..22699b3 100644 --- a/docs/html/struct_vma_vulkan_functions-members.html +++ b/docs/html/struct_vma_vulkan_functions-members.html @@ -72,11 +72,13 @@ $(function() { vkCreateImageVmaVulkanFunctions vkDestroyBufferVmaVulkanFunctions vkDestroyImageVmaVulkanFunctions - vkFreeMemoryVmaVulkanFunctions - vkGetBufferMemoryRequirementsVmaVulkanFunctions - vkGetImageMemoryRequirementsVmaVulkanFunctions - vkGetPhysicalDeviceMemoryPropertiesVmaVulkanFunctions - vkGetPhysicalDevicePropertiesVmaVulkanFunctions + vkFlushMappedMemoryRangesVmaVulkanFunctions + vkFreeMemoryVmaVulkanFunctions + vkGetBufferMemoryRequirementsVmaVulkanFunctions + vkGetImageMemoryRequirementsVmaVulkanFunctions + vkGetPhysicalDeviceMemoryPropertiesVmaVulkanFunctions + vkGetPhysicalDevicePropertiesVmaVulkanFunctions + vkInvalidateMappedMemoryRangesVmaVulkanFunctions vkMapMemoryVmaVulkanFunctions vkUnmapMemoryVmaVulkanFunctions
    diff --git a/docs/html/struct_vma_vulkan_functions.html b/docs/html/struct_vma_vulkan_functions.html index 2df59f5..5e49e4d 100644 --- a/docs/html/struct_vma_vulkan_functions.html +++ b/docs/html/struct_vma_vulkan_functions.html @@ -85,6 +85,10 @@ Public Attributes   PFN_vkUnmapMemory vkUnmapMemory   +PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges +  +PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges +  PFN_vkBindBufferMemory vkBindBufferMemory   PFN_vkBindImageMemory vkBindImageMemory @@ -202,6 +206,20 @@ Public Attributes
    +
    +
    + +

    ◆ vkFlushMappedMemoryRanges

    + +
    +
    + + + + +
    PFN_vkFlushMappedMemoryRanges VmaVulkanFunctions::vkFlushMappedMemoryRanges
    +
    +
    @@ -272,6 +290,20 @@ Public Attributes
    +
    + + +

    ◆ vkInvalidateMappedMemoryRanges

    + +
    +
    + + + + +
    PFN_vkInvalidateMappedMemoryRanges VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
    +
    +
    diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 90dd3a3..7aa389a 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -286,6 +286,12 @@ Functions void vmaUnmapMemory (VmaAllocator allocator, VmaAllocation allocation)  Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). More...
      +void vmaFlushAllocation (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) + Flushes memory of given allocation. More...
    +  +void vmaInvalidateAllocation (VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) + Invalidates memory of given allocation. More...
    +  VkResult vmaDefragment (VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)  Compacts memory by moving allocations. More...
      @@ -1681,6 +1687,56 @@ Functions
  • vkDestroyImage
  • + + + +

    ◆ vmaFlushAllocation()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    void vmaFlushAllocation (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize offset,
    VkDeviceSize size 
    )
    +
    + +

    Flushes memory of given allocation.

    +

    Calls vkFlushMappedMemoryRanges() for memory associated with given range of given allocation.

    +
      +
    • offset must be relative to the beginning of allocation.
    • +
    • size can be VK_WHOLE_SIZE. It means all memory from offset the the end of given allocation.
    • +
    • offset and size don't have to be aligned. They are internally rounded down/up to multiply of nonCoherentAtomSize.
    • +
    • If size is 0, this call is ignored.
    • +
    • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
    • +
    +
    @@ -1921,6 +1977,56 @@ Functions + + + +

    ◆ vmaInvalidateAllocation()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    void vmaInvalidateAllocation (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize offset,
    VkDeviceSize size 
    )
    +
    + +

    Invalidates memory of given allocation.

    +

    Calls vkInvalidateMappedMemoryRanges() for memory associated with given range of given allocation.

    +
      +
    • offset must be relative to the beginning of allocation.
    • +
    • size can be VK_WHOLE_SIZE. It means all memory from offset the the end of given allocation.
    • +
    • offset and size don't have to be aligned. They are internally rounded down/up to multiply of nonCoherentAtomSize.
    • +
    • If size is 0, this call is ignored.
    • +
    • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
    • +
    +
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index b7125a1..5d35609 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,160 +62,164 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1092 #include <vulkan/vulkan.h>
    1093 
    1094 #if !defined(VMA_DEDICATED_ALLOCATION)
    1095  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1096  #define VMA_DEDICATED_ALLOCATION 1
    1097  #else
    1098  #define VMA_DEDICATED_ALLOCATION 0
    1099  #endif
    1100 #endif
    1101 
    1111 VK_DEFINE_HANDLE(VmaAllocator)
    1112 
    1113 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1115  VmaAllocator allocator,
    1116  uint32_t memoryType,
    1117  VkDeviceMemory memory,
    1118  VkDeviceSize size);
    1120 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1121  VmaAllocator allocator,
    1122  uint32_t memoryType,
    1123  VkDeviceMemory memory,
    1124  VkDeviceSize size);
    1125 
    1139 
    1169 
    1172 typedef VkFlags VmaAllocatorCreateFlags;
    1173 
    1178 typedef struct VmaVulkanFunctions {
    1179  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1180  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1181  PFN_vkAllocateMemory vkAllocateMemory;
    1182  PFN_vkFreeMemory vkFreeMemory;
    1183  PFN_vkMapMemory vkMapMemory;
    1184  PFN_vkUnmapMemory vkUnmapMemory;
    1185  PFN_vkBindBufferMemory vkBindBufferMemory;
    1186  PFN_vkBindImageMemory vkBindImageMemory;
    1187  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1188  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1189  PFN_vkCreateBuffer vkCreateBuffer;
    1190  PFN_vkDestroyBuffer vkDestroyBuffer;
    1191  PFN_vkCreateImage vkCreateImage;
    1192  PFN_vkDestroyImage vkDestroyImage;
    1193 #if VMA_DEDICATED_ALLOCATION
    1194  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1195  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1196 #endif
    1198 
    1201 {
    1203  VmaAllocatorCreateFlags flags;
    1205 
    1206  VkPhysicalDevice physicalDevice;
    1208 
    1209  VkDevice device;
    1211 
    1214 
    1215  const VkAllocationCallbacks* pAllocationCallbacks;
    1217 
    1256  const VkDeviceSize* pHeapSizeLimit;
    1270 
    1272 VkResult vmaCreateAllocator(
    1273  const VmaAllocatorCreateInfo* pCreateInfo,
    1274  VmaAllocator* pAllocator);
    1275 
    1277 void vmaDestroyAllocator(
    1278  VmaAllocator allocator);
    1279 
    1285  VmaAllocator allocator,
    1286  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1287 
    1293  VmaAllocator allocator,
    1294  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1295 
    1303  VmaAllocator allocator,
    1304  uint32_t memoryTypeIndex,
    1305  VkMemoryPropertyFlags* pFlags);
    1306 
    1316  VmaAllocator allocator,
    1317  uint32_t frameIndex);
    1318 
    1321 typedef struct VmaStatInfo
    1322 {
    1324  uint32_t blockCount;
    1330  VkDeviceSize usedBytes;
    1332  VkDeviceSize unusedBytes;
    1333  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    1334  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    1335 } VmaStatInfo;
    1336 
    1338 typedef struct VmaStats
    1339 {
    1340  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1341  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1343 } VmaStats;
    1344 
    1346 void vmaCalculateStats(
    1347  VmaAllocator allocator,
    1348  VmaStats* pStats);
    1349 
    1350 #define VMA_STATS_STRING_ENABLED 1
    1351 
    1352 #if VMA_STATS_STRING_ENABLED
    1353 
    1355 
    1357 void vmaBuildStatsString(
    1358  VmaAllocator allocator,
    1359  char** ppStatsString,
    1360  VkBool32 detailedMap);
    1361 
    1362 void vmaFreeStatsString(
    1363  VmaAllocator allocator,
    1364  char* pStatsString);
    1365 
    1366 #endif // #if VMA_STATS_STRING_ENABLED
    1367 
    1376 VK_DEFINE_HANDLE(VmaPool)
    1377 
    1378 typedef enum VmaMemoryUsage
    1379 {
    1428 } VmaMemoryUsage;
    1429 
    1444 
    1494 
    1498 
    1500 {
    1502  VmaAllocationCreateFlags flags;
    1513  VkMemoryPropertyFlags requiredFlags;
    1518  VkMemoryPropertyFlags preferredFlags;
    1526  uint32_t memoryTypeBits;
    1539  void* pUserData;
    1541 
    1558 VkResult vmaFindMemoryTypeIndex(
    1559  VmaAllocator allocator,
    1560  uint32_t memoryTypeBits,
    1561  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1562  uint32_t* pMemoryTypeIndex);
    1563 
    1577  VmaAllocator allocator,
    1578  const VkBufferCreateInfo* pBufferCreateInfo,
    1579  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1580  uint32_t* pMemoryTypeIndex);
    1581 
    1595  VmaAllocator allocator,
    1596  const VkImageCreateInfo* pImageCreateInfo,
    1597  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1598  uint32_t* pMemoryTypeIndex);
    1599 
    1620 
    1623 typedef VkFlags VmaPoolCreateFlags;
    1624 
    1627 typedef struct VmaPoolCreateInfo {
    1633  VmaPoolCreateFlags flags;
    1638  VkDeviceSize blockSize;
    1667 
    1670 typedef struct VmaPoolStats {
    1673  VkDeviceSize size;
    1676  VkDeviceSize unusedSize;
    1689  VkDeviceSize unusedRangeSizeMax;
    1690 } VmaPoolStats;
    1691 
    1698 VkResult vmaCreatePool(
    1699  VmaAllocator allocator,
    1700  const VmaPoolCreateInfo* pCreateInfo,
    1701  VmaPool* pPool);
    1702 
    1705 void vmaDestroyPool(
    1706  VmaAllocator allocator,
    1707  VmaPool pool);
    1708 
    1715 void vmaGetPoolStats(
    1716  VmaAllocator allocator,
    1717  VmaPool pool,
    1718  VmaPoolStats* pPoolStats);
    1719 
    1727  VmaAllocator allocator,
    1728  VmaPool pool,
    1729  size_t* pLostAllocationCount);
    1730 
    1755 VK_DEFINE_HANDLE(VmaAllocation)
    1756 
    1757 
    1759 typedef struct VmaAllocationInfo {
    1764  uint32_t memoryType;
    1773  VkDeviceMemory deviceMemory;
    1778  VkDeviceSize offset;
    1783  VkDeviceSize size;
    1797  void* pUserData;
    1799 
    1810 VkResult vmaAllocateMemory(
    1811  VmaAllocator allocator,
    1812  const VkMemoryRequirements* pVkMemoryRequirements,
    1813  const VmaAllocationCreateInfo* pCreateInfo,
    1814  VmaAllocation* pAllocation,
    1815  VmaAllocationInfo* pAllocationInfo);
    1816 
    1824  VmaAllocator allocator,
    1825  VkBuffer buffer,
    1826  const VmaAllocationCreateInfo* pCreateInfo,
    1827  VmaAllocation* pAllocation,
    1828  VmaAllocationInfo* pAllocationInfo);
    1829 
    1831 VkResult vmaAllocateMemoryForImage(
    1832  VmaAllocator allocator,
    1833  VkImage image,
    1834  const VmaAllocationCreateInfo* pCreateInfo,
    1835  VmaAllocation* pAllocation,
    1836  VmaAllocationInfo* pAllocationInfo);
    1837 
    1839 void vmaFreeMemory(
    1840  VmaAllocator allocator,
    1841  VmaAllocation allocation);
    1842 
    1860  VmaAllocator allocator,
    1861  VmaAllocation allocation,
    1862  VmaAllocationInfo* pAllocationInfo);
    1863 
    1878 VkBool32 vmaTouchAllocation(
    1879  VmaAllocator allocator,
    1880  VmaAllocation allocation);
    1881 
    1896  VmaAllocator allocator,
    1897  VmaAllocation allocation,
    1898  void* pUserData);
    1899 
    1911  VmaAllocator allocator,
    1912  VmaAllocation* pAllocation);
    1913 
    1948 VkResult vmaMapMemory(
    1949  VmaAllocator allocator,
    1950  VmaAllocation allocation,
    1951  void** ppData);
    1952 
    1957 void vmaUnmapMemory(
    1958  VmaAllocator allocator,
    1959  VmaAllocation allocation);
    1960 
    1962 typedef struct VmaDefragmentationInfo {
    1967  VkDeviceSize maxBytesToMove;
    1974 
    1976 typedef struct VmaDefragmentationStats {
    1978  VkDeviceSize bytesMoved;
    1980  VkDeviceSize bytesFreed;
    1986 
    2069 VkResult vmaDefragment(
    2070  VmaAllocator allocator,
    2071  VmaAllocation* pAllocations,
    2072  size_t allocationCount,
    2073  VkBool32* pAllocationsChanged,
    2074  const VmaDefragmentationInfo *pDefragmentationInfo,
    2075  VmaDefragmentationStats* pDefragmentationStats);
    2076 
    2089 VkResult vmaBindBufferMemory(
    2090  VmaAllocator allocator,
    2091  VmaAllocation allocation,
    2092  VkBuffer buffer);
    2093 
    2106 VkResult vmaBindImageMemory(
    2107  VmaAllocator allocator,
    2108  VmaAllocation allocation,
    2109  VkImage image);
    2110 
    2137 VkResult vmaCreateBuffer(
    2138  VmaAllocator allocator,
    2139  const VkBufferCreateInfo* pBufferCreateInfo,
    2140  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2141  VkBuffer* pBuffer,
    2142  VmaAllocation* pAllocation,
    2143  VmaAllocationInfo* pAllocationInfo);
    2144 
    2156 void vmaDestroyBuffer(
    2157  VmaAllocator allocator,
    2158  VkBuffer buffer,
    2159  VmaAllocation allocation);
    2160 
    2162 VkResult vmaCreateImage(
    2163  VmaAllocator allocator,
    2164  const VkImageCreateInfo* pImageCreateInfo,
    2165  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2166  VkImage* pImage,
    2167  VmaAllocation* pAllocation,
    2168  VmaAllocationInfo* pAllocationInfo);
    2169 
    2181 void vmaDestroyImage(
    2182  VmaAllocator allocator,
    2183  VkImage image,
    2184  VmaAllocation allocation);
    2185 
    2186 #ifdef __cplusplus
    2187 }
    2188 #endif
    2189 
    2190 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2191 
    2192 // For Visual Studio IntelliSense.
    2193 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2194 #define VMA_IMPLEMENTATION
    2195 #endif
    2196 
    2197 #ifdef VMA_IMPLEMENTATION
    2198 #undef VMA_IMPLEMENTATION
    2199 
    2200 #include <cstdint>
    2201 #include <cstdlib>
    2202 #include <cstring>
    2203 
    2204 /*******************************************************************************
    2205 CONFIGURATION SECTION
    2206 
    2207 Define some of these macros before each #include of this header or change them
    2208 here if you need other then default behavior depending on your environment.
    2209 */
    2210 
    2211 /*
    2212 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2213 internally, like:
    2214 
    2215  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2216 
    2217 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2218 VmaAllocatorCreateInfo::pVulkanFunctions.
    2219 */
    2220 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2221 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2222 #endif
    2223 
    2224 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2225 //#define VMA_USE_STL_CONTAINERS 1
    2226 
    2227 /* Set this macro to 1 to make the library including and using STL containers:
    2228 std::pair, std::vector, std::list, std::unordered_map.
    2229 
    2230 Set it to 0 or undefined to make the library using its own implementation of
    2231 the containers.
    2232 */
    2233 #if VMA_USE_STL_CONTAINERS
    2234  #define VMA_USE_STL_VECTOR 1
    2235  #define VMA_USE_STL_UNORDERED_MAP 1
    2236  #define VMA_USE_STL_LIST 1
    2237 #endif
    2238 
    2239 #if VMA_USE_STL_VECTOR
    2240  #include <vector>
    2241 #endif
    2242 
    2243 #if VMA_USE_STL_UNORDERED_MAP
    2244  #include <unordered_map>
    2245 #endif
    2246 
    2247 #if VMA_USE_STL_LIST
    2248  #include <list>
    2249 #endif
    2250 
    2251 /*
    2252 Following headers are used in this CONFIGURATION section only, so feel free to
    2253 remove them if not needed.
    2254 */
    2255 #include <cassert> // for assert
    2256 #include <algorithm> // for min, max
    2257 #include <mutex> // for std::mutex
    2258 #include <atomic> // for std::atomic
    2259 
    2260 #if !defined(_WIN32) && !defined(__APPLE__)
    2261  #include <malloc.h> // for aligned_alloc()
    2262 #endif
    2263 
    2264 #ifndef VMA_NULL
    2265  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2266  #define VMA_NULL nullptr
    2267 #endif
    2268 
    2269 #if defined(__APPLE__) || defined(__ANDROID__)
    2270 #include <cstdlib>
    2271 void *aligned_alloc(size_t alignment, size_t size)
    2272 {
    2273  // alignment must be >= sizeof(void*)
    2274  if(alignment < sizeof(void*))
    2275  {
    2276  alignment = sizeof(void*);
    2277  }
    2278 
    2279  void *pointer;
    2280  if(posix_memalign(&pointer, alignment, size) == 0)
    2281  return pointer;
    2282  return VMA_NULL;
    2283 }
    2284 #endif
    2285 
    2286 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2287 #ifndef VMA_ASSERT
    2288  #ifdef _DEBUG
    2289  #define VMA_ASSERT(expr) assert(expr)
    2290  #else
    2291  #define VMA_ASSERT(expr)
    2292  #endif
    2293 #endif
    2294 
    2295 // Assert that will be called very often, like inside data structures e.g. operator[].
    2296 // Making it non-empty can make program slow.
    2297 #ifndef VMA_HEAVY_ASSERT
    2298  #ifdef _DEBUG
    2299  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2300  #else
    2301  #define VMA_HEAVY_ASSERT(expr)
    2302  #endif
    2303 #endif
    2304 
    2305 #ifndef VMA_ALIGN_OF
    2306  #define VMA_ALIGN_OF(type) (__alignof(type))
    2307 #endif
    2308 
    2309 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2310  #if defined(_WIN32)
    2311  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2312  #else
    2313  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2314  #endif
    2315 #endif
    2316 
    2317 #ifndef VMA_SYSTEM_FREE
    2318  #if defined(_WIN32)
    2319  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2320  #else
    2321  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2322  #endif
    2323 #endif
    2324 
    2325 #ifndef VMA_MIN
    2326  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2327 #endif
    2328 
    2329 #ifndef VMA_MAX
    2330  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2331 #endif
    2332 
    2333 #ifndef VMA_SWAP
    2334  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2335 #endif
    2336 
    2337 #ifndef VMA_SORT
    2338  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2339 #endif
    2340 
    2341 #ifndef VMA_DEBUG_LOG
    2342  #define VMA_DEBUG_LOG(format, ...)
    2343  /*
    2344  #define VMA_DEBUG_LOG(format, ...) do { \
    2345  printf(format, __VA_ARGS__); \
    2346  printf("\n"); \
    2347  } while(false)
    2348  */
    2349 #endif
    2350 
    2351 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2352 #if VMA_STATS_STRING_ENABLED
    2353  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2354  {
    2355  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2356  }
    2357  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2358  {
    2359  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2360  }
    2361  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2362  {
    2363  snprintf(outStr, strLen, "%p", ptr);
    2364  }
    2365 #endif
    2366 
    2367 #ifndef VMA_MUTEX
    2368  class VmaMutex
    2369  {
    2370  public:
    2371  VmaMutex() { }
    2372  ~VmaMutex() { }
    2373  void Lock() { m_Mutex.lock(); }
    2374  void Unlock() { m_Mutex.unlock(); }
    2375  private:
    2376  std::mutex m_Mutex;
    2377  };
    2378  #define VMA_MUTEX VmaMutex
    2379 #endif
    2380 
    2381 /*
    2382 If providing your own implementation, you need to implement a subset of std::atomic:
    2383 
    2384 - Constructor(uint32_t desired)
    2385 - uint32_t load() const
    2386 - void store(uint32_t desired)
    2387 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2388 */
    2389 #ifndef VMA_ATOMIC_UINT32
    2390  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2391 #endif
    2392 
    2393 #ifndef VMA_BEST_FIT
    2394 
    2406  #define VMA_BEST_FIT (1)
    2407 #endif
    2408 
    2409 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2410 
    2414  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2415 #endif
    2416 
    2417 #ifndef VMA_DEBUG_ALIGNMENT
    2418 
    2422  #define VMA_DEBUG_ALIGNMENT (1)
    2423 #endif
    2424 
    2425 #ifndef VMA_DEBUG_MARGIN
    2426 
    2430  #define VMA_DEBUG_MARGIN (0)
    2431 #endif
    2432 
    2433 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2434 
    2438  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2439 #endif
    2440 
    2441 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2442 
    2446  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2447 #endif
    2448 
    2449 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2450  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2452 #endif
    2453 
    2454 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2455  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2457 #endif
    2458 
    2459 #ifndef VMA_CLASS_NO_COPY
    2460  #define VMA_CLASS_NO_COPY(className) \
    2461  private: \
    2462  className(const className&) = delete; \
    2463  className& operator=(const className&) = delete;
    2464 #endif
    2465 
    2466 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2467 
    2468 /*******************************************************************************
    2469 END OF CONFIGURATION
    2470 */
    2471 
    2472 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    2473  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    2474 
    2475 // Returns number of bits set to 1 in (v).
    2476 static inline uint32_t VmaCountBitsSet(uint32_t v)
    2477 {
    2478  uint32_t c = v - ((v >> 1) & 0x55555555);
    2479  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    2480  c = ((c >> 4) + c) & 0x0F0F0F0F;
    2481  c = ((c >> 8) + c) & 0x00FF00FF;
    2482  c = ((c >> 16) + c) & 0x0000FFFF;
    2483  return c;
    2484 }
    2485 
    2486 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    2487 // Use types like uint32_t, uint64_t as T.
    2488 template <typename T>
    2489 static inline T VmaAlignUp(T val, T align)
    2490 {
    2491  return (val + align - 1) / align * align;
    2492 }
    2493 
    2494 // Division with mathematical rounding to nearest number.
    2495 template <typename T>
    2496 inline T VmaRoundDiv(T x, T y)
    2497 {
    2498  return (x + (y / (T)2)) / y;
    2499 }
    2500 
    2501 #ifndef VMA_SORT
    2502 
    2503 template<typename Iterator, typename Compare>
    2504 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    2505 {
    2506  Iterator centerValue = end; --centerValue;
    2507  Iterator insertIndex = beg;
    2508  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    2509  {
    2510  if(cmp(*memTypeIndex, *centerValue))
    2511  {
    2512  if(insertIndex != memTypeIndex)
    2513  {
    2514  VMA_SWAP(*memTypeIndex, *insertIndex);
    2515  }
    2516  ++insertIndex;
    2517  }
    2518  }
    2519  if(insertIndex != centerValue)
    2520  {
    2521  VMA_SWAP(*insertIndex, *centerValue);
    2522  }
    2523  return insertIndex;
    2524 }
    2525 
    2526 template<typename Iterator, typename Compare>
    2527 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    2528 {
    2529  if(beg < end)
    2530  {
    2531  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    2532  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    2533  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    2534  }
    2535 }
    2536 
    2537 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    2538 
    2539 #endif // #ifndef VMA_SORT
    2540 
    2541 /*
    2542 Returns true if two memory blocks occupy overlapping pages.
    2543 ResourceA must be in less memory offset than ResourceB.
    2544 
    2545 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    2546 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    2547 */
    2548 static inline bool VmaBlocksOnSamePage(
    2549  VkDeviceSize resourceAOffset,
    2550  VkDeviceSize resourceASize,
    2551  VkDeviceSize resourceBOffset,
    2552  VkDeviceSize pageSize)
    2553 {
    2554  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    2555  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    2556  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    2557  VkDeviceSize resourceBStart = resourceBOffset;
    2558  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    2559  return resourceAEndPage == resourceBStartPage;
    2560 }
    2561 
    2562 enum VmaSuballocationType
    2563 {
    2564  VMA_SUBALLOCATION_TYPE_FREE = 0,
    2565  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    2566  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    2567  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    2568  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    2569  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    2570  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    2571 };
    2572 
    2573 /*
    2574 Returns true if given suballocation types could conflict and must respect
    2575 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    2576 or linear image and another one is optimal image. If type is unknown, behave
    2577 conservatively.
    2578 */
    2579 static inline bool VmaIsBufferImageGranularityConflict(
    2580  VmaSuballocationType suballocType1,
    2581  VmaSuballocationType suballocType2)
    2582 {
    2583  if(suballocType1 > suballocType2)
    2584  {
    2585  VMA_SWAP(suballocType1, suballocType2);
    2586  }
    2587 
    2588  switch(suballocType1)
    2589  {
    2590  case VMA_SUBALLOCATION_TYPE_FREE:
    2591  return false;
    2592  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    2593  return true;
    2594  case VMA_SUBALLOCATION_TYPE_BUFFER:
    2595  return
    2596  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2597  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2598  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    2599  return
    2600  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    2602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2603  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    2604  return
    2605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2606  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    2607  return false;
    2608  default:
    2609  VMA_ASSERT(0);
    2610  return true;
    2611  }
    2612 }
    2613 
    2614 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    2615 struct VmaMutexLock
    2616 {
    2617  VMA_CLASS_NO_COPY(VmaMutexLock)
    2618 public:
    2619  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    2620  m_pMutex(useMutex ? &mutex : VMA_NULL)
    2621  {
    2622  if(m_pMutex)
    2623  {
    2624  m_pMutex->Lock();
    2625  }
    2626  }
    2627 
    2628  ~VmaMutexLock()
    2629  {
    2630  if(m_pMutex)
    2631  {
    2632  m_pMutex->Unlock();
    2633  }
    2634  }
    2635 
    2636 private:
    2637  VMA_MUTEX* m_pMutex;
    2638 };
    2639 
    2640 #if VMA_DEBUG_GLOBAL_MUTEX
    2641  static VMA_MUTEX gDebugGlobalMutex;
    2642  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    2643 #else
    2644  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    2645 #endif
    2646 
    2647 // Minimum size of a free suballocation to register it in the free suballocation collection.
    2648 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    2649 
    2650 /*
    2651 Performs binary search and returns iterator to first element that is greater or
    2652 equal to (key), according to comparison (cmp).
    2653 
    2654 Cmp should return true if first argument is less than second argument.
    2655 
    2656 Returned value is the found element, if present in the collection or place where
    2657 new element with value (key) should be inserted.
    2658 */
    2659 template <typename IterT, typename KeyT, typename CmpT>
    2660 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    2661 {
    2662  size_t down = 0, up = (end - beg);
    2663  while(down < up)
    2664  {
    2665  const size_t mid = (down + up) / 2;
    2666  if(cmp(*(beg+mid), key))
    2667  {
    2668  down = mid + 1;
    2669  }
    2670  else
    2671  {
    2672  up = mid;
    2673  }
    2674  }
    2675  return beg + down;
    2676 }
    2677 
    2679 // Memory allocation
    2680 
    2681 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    2682 {
    2683  if((pAllocationCallbacks != VMA_NULL) &&
    2684  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    2685  {
    2686  return (*pAllocationCallbacks->pfnAllocation)(
    2687  pAllocationCallbacks->pUserData,
    2688  size,
    2689  alignment,
    2690  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    2691  }
    2692  else
    2693  {
    2694  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    2695  }
    2696 }
    2697 
    2698 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    2699 {
    2700  if((pAllocationCallbacks != VMA_NULL) &&
    2701  (pAllocationCallbacks->pfnFree != VMA_NULL))
    2702  {
    2703  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    2704  }
    2705  else
    2706  {
    2707  VMA_SYSTEM_FREE(ptr);
    2708  }
    2709 }
    2710 
    2711 template<typename T>
    2712 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    2713 {
    2714  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    2715 }
    2716 
    2717 template<typename T>
    2718 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    2719 {
    2720  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    2721 }
    2722 
    2723 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    2724 
    2725 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    2726 
    2727 template<typename T>
    2728 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    2729 {
    2730  ptr->~T();
    2731  VmaFree(pAllocationCallbacks, ptr);
    2732 }
    2733 
    2734 template<typename T>
    2735 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    2736 {
    2737  if(ptr != VMA_NULL)
    2738  {
    2739  for(size_t i = count; i--; )
    2740  {
    2741  ptr[i].~T();
    2742  }
    2743  VmaFree(pAllocationCallbacks, ptr);
    2744  }
    2745 }
    2746 
    2747 // STL-compatible allocator.
    2748 template<typename T>
    2749 class VmaStlAllocator
    2750 {
    2751 public:
    2752  const VkAllocationCallbacks* const m_pCallbacks;
    2753  typedef T value_type;
    2754 
    2755  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    2756  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    2757 
    2758  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    2759  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    2760 
    2761  template<typename U>
    2762  bool operator==(const VmaStlAllocator<U>& rhs) const
    2763  {
    2764  return m_pCallbacks == rhs.m_pCallbacks;
    2765  }
    2766  template<typename U>
    2767  bool operator!=(const VmaStlAllocator<U>& rhs) const
    2768  {
    2769  return m_pCallbacks != rhs.m_pCallbacks;
    2770  }
    2771 
    2772  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    2773 };
    2774 
    2775 #if VMA_USE_STL_VECTOR
    2776 
    2777 #define VmaVector std::vector
    2778 
    2779 template<typename T, typename allocatorT>
    2780 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    2781 {
    2782  vec.insert(vec.begin() + index, item);
    2783 }
    2784 
    2785 template<typename T, typename allocatorT>
    2786 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    2787 {
    2788  vec.erase(vec.begin() + index);
    2789 }
    2790 
    2791 #else // #if VMA_USE_STL_VECTOR
    2792 
    2793 /* Class with interface compatible with subset of std::vector.
    2794 T must be POD because constructors and destructors are not called and memcpy is
    2795 used for these objects. */
    2796 template<typename T, typename AllocatorT>
    2797 class VmaVector
    2798 {
    2799 public:
    2800  typedef T value_type;
    2801 
    2802  VmaVector(const AllocatorT& allocator) :
    2803  m_Allocator(allocator),
    2804  m_pArray(VMA_NULL),
    2805  m_Count(0),
    2806  m_Capacity(0)
    2807  {
    2808  }
    2809 
    2810  VmaVector(size_t count, const AllocatorT& allocator) :
    2811  m_Allocator(allocator),
    2812  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    2813  m_Count(count),
    2814  m_Capacity(count)
    2815  {
    2816  }
    2817 
    2818  VmaVector(const VmaVector<T, AllocatorT>& src) :
    2819  m_Allocator(src.m_Allocator),
    2820  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    2821  m_Count(src.m_Count),
    2822  m_Capacity(src.m_Count)
    2823  {
    2824  if(m_Count != 0)
    2825  {
    2826  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    2827  }
    2828  }
    2829 
    2830  ~VmaVector()
    2831  {
    2832  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2833  }
    2834 
    2835  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    2836  {
    2837  if(&rhs != this)
    2838  {
    2839  resize(rhs.m_Count);
    2840  if(m_Count != 0)
    2841  {
    2842  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2843  }
    2844  }
    2845  return *this;
    2846  }
    2847 
    2848  bool empty() const { return m_Count == 0; }
    2849  size_t size() const { return m_Count; }
    2850  T* data() { return m_pArray; }
    2851  const T* data() const { return m_pArray; }
    2852 
    2853  T& operator[](size_t index)
    2854  {
    2855  VMA_HEAVY_ASSERT(index < m_Count);
    2856  return m_pArray[index];
    2857  }
    2858  const T& operator[](size_t index) const
    2859  {
    2860  VMA_HEAVY_ASSERT(index < m_Count);
    2861  return m_pArray[index];
    2862  }
    2863 
    2864  T& front()
    2865  {
    2866  VMA_HEAVY_ASSERT(m_Count > 0);
    2867  return m_pArray[0];
    2868  }
    2869  const T& front() const
    2870  {
    2871  VMA_HEAVY_ASSERT(m_Count > 0);
    2872  return m_pArray[0];
    2873  }
    2874  T& back()
    2875  {
    2876  VMA_HEAVY_ASSERT(m_Count > 0);
    2877  return m_pArray[m_Count - 1];
    2878  }
    2879  const T& back() const
    2880  {
    2881  VMA_HEAVY_ASSERT(m_Count > 0);
    2882  return m_pArray[m_Count - 1];
    2883  }
    2884 
    2885  void reserve(size_t newCapacity, bool freeMemory = false)
    2886  {
    2887  newCapacity = VMA_MAX(newCapacity, m_Count);
    2888 
    2889  if((newCapacity < m_Capacity) && !freeMemory)
    2890  {
    2891  newCapacity = m_Capacity;
    2892  }
    2893 
    2894  if(newCapacity != m_Capacity)
    2895  {
    2896  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2897  if(m_Count != 0)
    2898  {
    2899  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2900  }
    2901  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2902  m_Capacity = newCapacity;
    2903  m_pArray = newArray;
    2904  }
    2905  }
    2906 
    2907  void resize(size_t newCount, bool freeMemory = false)
    2908  {
    2909  size_t newCapacity = m_Capacity;
    2910  if(newCount > m_Capacity)
    2911  {
    2912  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2913  }
    2914  else if(freeMemory)
    2915  {
    2916  newCapacity = newCount;
    2917  }
    2918 
    2919  if(newCapacity != m_Capacity)
    2920  {
    2921  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2922  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2923  if(elementsToCopy != 0)
    2924  {
    2925  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2926  }
    2927  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2928  m_Capacity = newCapacity;
    2929  m_pArray = newArray;
    2930  }
    2931 
    2932  m_Count = newCount;
    2933  }
    2934 
    2935  void clear(bool freeMemory = false)
    2936  {
    2937  resize(0, freeMemory);
    2938  }
    2939 
    2940  void insert(size_t index, const T& src)
    2941  {
    2942  VMA_HEAVY_ASSERT(index <= m_Count);
    2943  const size_t oldCount = size();
    2944  resize(oldCount + 1);
    2945  if(index < oldCount)
    2946  {
    2947  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2948  }
    2949  m_pArray[index] = src;
    2950  }
    2951 
    2952  void remove(size_t index)
    2953  {
    2954  VMA_HEAVY_ASSERT(index < m_Count);
    2955  const size_t oldCount = size();
    2956  if(index < oldCount - 1)
    2957  {
    2958  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2959  }
    2960  resize(oldCount - 1);
    2961  }
    2962 
    2963  void push_back(const T& src)
    2964  {
    2965  const size_t newIndex = size();
    2966  resize(newIndex + 1);
    2967  m_pArray[newIndex] = src;
    2968  }
    2969 
    2970  void pop_back()
    2971  {
    2972  VMA_HEAVY_ASSERT(m_Count > 0);
    2973  resize(size() - 1);
    2974  }
    2975 
    2976  void push_front(const T& src)
    2977  {
    2978  insert(0, src);
    2979  }
    2980 
    2981  void pop_front()
    2982  {
    2983  VMA_HEAVY_ASSERT(m_Count > 0);
    2984  remove(0);
    2985  }
    2986 
    2987  typedef T* iterator;
    2988 
    2989  iterator begin() { return m_pArray; }
    2990  iterator end() { return m_pArray + m_Count; }
    2991 
    2992 private:
    2993  AllocatorT m_Allocator;
    2994  T* m_pArray;
    2995  size_t m_Count;
    2996  size_t m_Capacity;
    2997 };
    2998 
    2999 template<typename T, typename allocatorT>
    3000 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3001 {
    3002  vec.insert(index, item);
    3003 }
    3004 
    3005 template<typename T, typename allocatorT>
    3006 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3007 {
    3008  vec.remove(index);
    3009 }
    3010 
    3011 #endif // #if VMA_USE_STL_VECTOR
    3012 
    3013 template<typename CmpLess, typename VectorT>
    3014 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3015 {
    3016  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3017  vector.data(),
    3018  vector.data() + vector.size(),
    3019  value,
    3020  CmpLess()) - vector.data();
    3021  VmaVectorInsert(vector, indexToInsert, value);
    3022  return indexToInsert;
    3023 }
    3024 
    3025 template<typename CmpLess, typename VectorT>
    3026 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3027 {
    3028  CmpLess comparator;
    3029  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3030  vector.begin(),
    3031  vector.end(),
    3032  value,
    3033  comparator);
    3034  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3035  {
    3036  size_t indexToRemove = it - vector.begin();
    3037  VmaVectorRemove(vector, indexToRemove);
    3038  return true;
    3039  }
    3040  return false;
    3041 }
    3042 
    3043 template<typename CmpLess, typename VectorT>
    3044 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    3045 {
    3046  CmpLess comparator;
    3047  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3048  vector.data(),
    3049  vector.data() + vector.size(),
    3050  value,
    3051  comparator);
    3052  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    3053  {
    3054  return it - vector.begin();
    3055  }
    3056  else
    3057  {
    3058  return vector.size();
    3059  }
    3060 }
    3061 
    3063 // class VmaPoolAllocator
    3064 
    3065 /*
    3066 Allocator for objects of type T using a list of arrays (pools) to speed up
    3067 allocation. Number of elements that can be allocated is not bounded because
    3068 allocator can create multiple blocks.
    3069 */
    3070 template<typename T>
    3071 class VmaPoolAllocator
    3072 {
    3073  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3074 public:
    3075  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3076  ~VmaPoolAllocator();
    3077  void Clear();
    3078  T* Alloc();
    3079  void Free(T* ptr);
    3080 
    3081 private:
    3082  union Item
    3083  {
    3084  uint32_t NextFreeIndex;
    3085  T Value;
    3086  };
    3087 
    3088  struct ItemBlock
    3089  {
    3090  Item* pItems;
    3091  uint32_t FirstFreeIndex;
    3092  };
    3093 
    3094  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3095  size_t m_ItemsPerBlock;
    3096  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3097 
    3098  ItemBlock& CreateNewBlock();
    3099 };
    3100 
    3101 template<typename T>
    3102 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3103  m_pAllocationCallbacks(pAllocationCallbacks),
    3104  m_ItemsPerBlock(itemsPerBlock),
    3105  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3106 {
    3107  VMA_ASSERT(itemsPerBlock > 0);
    3108 }
    3109 
    3110 template<typename T>
    3111 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3112 {
    3113  Clear();
    3114 }
    3115 
    3116 template<typename T>
    3117 void VmaPoolAllocator<T>::Clear()
    3118 {
    3119  for(size_t i = m_ItemBlocks.size(); i--; )
    3120  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3121  m_ItemBlocks.clear();
    3122 }
    3123 
    3124 template<typename T>
    3125 T* VmaPoolAllocator<T>::Alloc()
    3126 {
    3127  for(size_t i = m_ItemBlocks.size(); i--; )
    3128  {
    3129  ItemBlock& block = m_ItemBlocks[i];
    3130  // This block has some free items: Use first one.
    3131  if(block.FirstFreeIndex != UINT32_MAX)
    3132  {
    3133  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3134  block.FirstFreeIndex = pItem->NextFreeIndex;
    3135  return &pItem->Value;
    3136  }
    3137  }
    3138 
    3139  // No block has free item: Create new one and use it.
    3140  ItemBlock& newBlock = CreateNewBlock();
    3141  Item* const pItem = &newBlock.pItems[0];
    3142  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3143  return &pItem->Value;
    3144 }
    3145 
    3146 template<typename T>
    3147 void VmaPoolAllocator<T>::Free(T* ptr)
    3148 {
    3149  // Search all memory blocks to find ptr.
    3150  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3151  {
    3152  ItemBlock& block = m_ItemBlocks[i];
    3153 
    3154  // Casting to union.
    3155  Item* pItemPtr;
    3156  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3157 
    3158  // Check if pItemPtr is in address range of this block.
    3159  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3160  {
    3161  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3162  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3163  block.FirstFreeIndex = index;
    3164  return;
    3165  }
    3166  }
    3167  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3168 }
    3169 
    3170 template<typename T>
    3171 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3172 {
    3173  ItemBlock newBlock = {
    3174  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3175 
    3176  m_ItemBlocks.push_back(newBlock);
    3177 
    3178  // Setup singly-linked list of all free items in this block.
    3179  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3180  newBlock.pItems[i].NextFreeIndex = i + 1;
    3181  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3182  return m_ItemBlocks.back();
    3183 }
    3184 
    3186 // class VmaRawList, VmaList
    3187 
    3188 #if VMA_USE_STL_LIST
    3189 
    3190 #define VmaList std::list
    3191 
    3192 #else // #if VMA_USE_STL_LIST
    3193 
    3194 template<typename T>
    3195 struct VmaListItem
    3196 {
    3197  VmaListItem* pPrev;
    3198  VmaListItem* pNext;
    3199  T Value;
    3200 };
    3201 
    3202 // Doubly linked list.
    3203 template<typename T>
    3204 class VmaRawList
    3205 {
    3206  VMA_CLASS_NO_COPY(VmaRawList)
    3207 public:
    3208  typedef VmaListItem<T> ItemType;
    3209 
    3210  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3211  ~VmaRawList();
    3212  void Clear();
    3213 
    3214  size_t GetCount() const { return m_Count; }
    3215  bool IsEmpty() const { return m_Count == 0; }
    3216 
    3217  ItemType* Front() { return m_pFront; }
    3218  const ItemType* Front() const { return m_pFront; }
    3219  ItemType* Back() { return m_pBack; }
    3220  const ItemType* Back() const { return m_pBack; }
    3221 
    3222  ItemType* PushBack();
    3223  ItemType* PushFront();
    3224  ItemType* PushBack(const T& value);
    3225  ItemType* PushFront(const T& value);
    3226  void PopBack();
    3227  void PopFront();
    3228 
    3229  // Item can be null - it means PushBack.
    3230  ItemType* InsertBefore(ItemType* pItem);
    3231  // Item can be null - it means PushFront.
    3232  ItemType* InsertAfter(ItemType* pItem);
    3233 
    3234  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3235  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3236 
    3237  void Remove(ItemType* pItem);
    3238 
    3239 private:
    3240  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3241  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3242  ItemType* m_pFront;
    3243  ItemType* m_pBack;
    3244  size_t m_Count;
    3245 };
    3246 
    3247 template<typename T>
    3248 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3249  m_pAllocationCallbacks(pAllocationCallbacks),
    3250  m_ItemAllocator(pAllocationCallbacks, 128),
    3251  m_pFront(VMA_NULL),
    3252  m_pBack(VMA_NULL),
    3253  m_Count(0)
    3254 {
    3255 }
    3256 
    3257 template<typename T>
    3258 VmaRawList<T>::~VmaRawList()
    3259 {
    3260  // Intentionally not calling Clear, because that would be unnecessary
    3261  // computations to return all items to m_ItemAllocator as free.
    3262 }
    3263 
    3264 template<typename T>
    3265 void VmaRawList<T>::Clear()
    3266 {
    3267  if(IsEmpty() == false)
    3268  {
    3269  ItemType* pItem = m_pBack;
    3270  while(pItem != VMA_NULL)
    3271  {
    3272  ItemType* const pPrevItem = pItem->pPrev;
    3273  m_ItemAllocator.Free(pItem);
    3274  pItem = pPrevItem;
    3275  }
    3276  m_pFront = VMA_NULL;
    3277  m_pBack = VMA_NULL;
    3278  m_Count = 0;
    3279  }
    3280 }
    3281 
    3282 template<typename T>
    3283 VmaListItem<T>* VmaRawList<T>::PushBack()
    3284 {
    3285  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3286  pNewItem->pNext = VMA_NULL;
    3287  if(IsEmpty())
    3288  {
    3289  pNewItem->pPrev = VMA_NULL;
    3290  m_pFront = pNewItem;
    3291  m_pBack = pNewItem;
    3292  m_Count = 1;
    3293  }
    3294  else
    3295  {
    3296  pNewItem->pPrev = m_pBack;
    3297  m_pBack->pNext = pNewItem;
    3298  m_pBack = pNewItem;
    3299  ++m_Count;
    3300  }
    3301  return pNewItem;
    3302 }
    3303 
    3304 template<typename T>
    3305 VmaListItem<T>* VmaRawList<T>::PushFront()
    3306 {
    3307  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3308  pNewItem->pPrev = VMA_NULL;
    3309  if(IsEmpty())
    3310  {
    3311  pNewItem->pNext = VMA_NULL;
    3312  m_pFront = pNewItem;
    3313  m_pBack = pNewItem;
    3314  m_Count = 1;
    3315  }
    3316  else
    3317  {
    3318  pNewItem->pNext = m_pFront;
    3319  m_pFront->pPrev = pNewItem;
    3320  m_pFront = pNewItem;
    3321  ++m_Count;
    3322  }
    3323  return pNewItem;
    3324 }
    3325 
    3326 template<typename T>
    3327 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3328 {
    3329  ItemType* const pNewItem = PushBack();
    3330  pNewItem->Value = value;
    3331  return pNewItem;
    3332 }
    3333 
    3334 template<typename T>
    3335 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3336 {
    3337  ItemType* const pNewItem = PushFront();
    3338  pNewItem->Value = value;
    3339  return pNewItem;
    3340 }
    3341 
    3342 template<typename T>
    3343 void VmaRawList<T>::PopBack()
    3344 {
    3345  VMA_HEAVY_ASSERT(m_Count > 0);
    3346  ItemType* const pBackItem = m_pBack;
    3347  ItemType* const pPrevItem = pBackItem->pPrev;
    3348  if(pPrevItem != VMA_NULL)
    3349  {
    3350  pPrevItem->pNext = VMA_NULL;
    3351  }
    3352  m_pBack = pPrevItem;
    3353  m_ItemAllocator.Free(pBackItem);
    3354  --m_Count;
    3355 }
    3356 
    3357 template<typename T>
    3358 void VmaRawList<T>::PopFront()
    3359 {
    3360  VMA_HEAVY_ASSERT(m_Count > 0);
    3361  ItemType* const pFrontItem = m_pFront;
    3362  ItemType* const pNextItem = pFrontItem->pNext;
    3363  if(pNextItem != VMA_NULL)
    3364  {
    3365  pNextItem->pPrev = VMA_NULL;
    3366  }
    3367  m_pFront = pNextItem;
    3368  m_ItemAllocator.Free(pFrontItem);
    3369  --m_Count;
    3370 }
    3371 
    3372 template<typename T>
    3373 void VmaRawList<T>::Remove(ItemType* pItem)
    3374 {
    3375  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    3376  VMA_HEAVY_ASSERT(m_Count > 0);
    3377 
    3378  if(pItem->pPrev != VMA_NULL)
    3379  {
    3380  pItem->pPrev->pNext = pItem->pNext;
    3381  }
    3382  else
    3383  {
    3384  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3385  m_pFront = pItem->pNext;
    3386  }
    3387 
    3388  if(pItem->pNext != VMA_NULL)
    3389  {
    3390  pItem->pNext->pPrev = pItem->pPrev;
    3391  }
    3392  else
    3393  {
    3394  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3395  m_pBack = pItem->pPrev;
    3396  }
    3397 
    3398  m_ItemAllocator.Free(pItem);
    3399  --m_Count;
    3400 }
    3401 
    3402 template<typename T>
    3403 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    3404 {
    3405  if(pItem != VMA_NULL)
    3406  {
    3407  ItemType* const prevItem = pItem->pPrev;
    3408  ItemType* const newItem = m_ItemAllocator.Alloc();
    3409  newItem->pPrev = prevItem;
    3410  newItem->pNext = pItem;
    3411  pItem->pPrev = newItem;
    3412  if(prevItem != VMA_NULL)
    3413  {
    3414  prevItem->pNext = newItem;
    3415  }
    3416  else
    3417  {
    3418  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3419  m_pFront = newItem;
    3420  }
    3421  ++m_Count;
    3422  return newItem;
    3423  }
    3424  else
    3425  return PushBack();
    3426 }
    3427 
    3428 template<typename T>
    3429 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    3430 {
    3431  if(pItem != VMA_NULL)
    3432  {
    3433  ItemType* const nextItem = pItem->pNext;
    3434  ItemType* const newItem = m_ItemAllocator.Alloc();
    3435  newItem->pNext = nextItem;
    3436  newItem->pPrev = pItem;
    3437  pItem->pNext = newItem;
    3438  if(nextItem != VMA_NULL)
    3439  {
    3440  nextItem->pPrev = newItem;
    3441  }
    3442  else
    3443  {
    3444  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3445  m_pBack = newItem;
    3446  }
    3447  ++m_Count;
    3448  return newItem;
    3449  }
    3450  else
    3451  return PushFront();
    3452 }
    3453 
    3454 template<typename T>
    3455 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    3456 {
    3457  ItemType* const newItem = InsertBefore(pItem);
    3458  newItem->Value = value;
    3459  return newItem;
    3460 }
    3461 
    3462 template<typename T>
    3463 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    3464 {
    3465  ItemType* const newItem = InsertAfter(pItem);
    3466  newItem->Value = value;
    3467  return newItem;
    3468 }
    3469 
    3470 template<typename T, typename AllocatorT>
    3471 class VmaList
    3472 {
    3473  VMA_CLASS_NO_COPY(VmaList)
    3474 public:
    3475  class iterator
    3476  {
    3477  public:
    3478  iterator() :
    3479  m_pList(VMA_NULL),
    3480  m_pItem(VMA_NULL)
    3481  {
    3482  }
    3483 
    3484  T& operator*() const
    3485  {
    3486  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3487  return m_pItem->Value;
    3488  }
    3489  T* operator->() const
    3490  {
    3491  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3492  return &m_pItem->Value;
    3493  }
    3494 
    3495  iterator& operator++()
    3496  {
    3497  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3498  m_pItem = m_pItem->pNext;
    3499  return *this;
    3500  }
    3501  iterator& operator--()
    3502  {
    3503  if(m_pItem != VMA_NULL)
    3504  {
    3505  m_pItem = m_pItem->pPrev;
    3506  }
    3507  else
    3508  {
    3509  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3510  m_pItem = m_pList->Back();
    3511  }
    3512  return *this;
    3513  }
    3514 
    3515  iterator operator++(int)
    3516  {
    3517  iterator result = *this;
    3518  ++*this;
    3519  return result;
    3520  }
    3521  iterator operator--(int)
    3522  {
    3523  iterator result = *this;
    3524  --*this;
    3525  return result;
    3526  }
    3527 
    3528  bool operator==(const iterator& rhs) const
    3529  {
    3530  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3531  return m_pItem == rhs.m_pItem;
    3532  }
    3533  bool operator!=(const iterator& rhs) const
    3534  {
    3535  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3536  return m_pItem != rhs.m_pItem;
    3537  }
    3538 
    3539  private:
    3540  VmaRawList<T>* m_pList;
    3541  VmaListItem<T>* m_pItem;
    3542 
    3543  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    3544  m_pList(pList),
    3545  m_pItem(pItem)
    3546  {
    3547  }
    3548 
    3549  friend class VmaList<T, AllocatorT>;
    3550  };
    3551 
    3552  class const_iterator
    3553  {
    3554  public:
    3555  const_iterator() :
    3556  m_pList(VMA_NULL),
    3557  m_pItem(VMA_NULL)
    3558  {
    3559  }
    3560 
    3561  const_iterator(const iterator& src) :
    3562  m_pList(src.m_pList),
    3563  m_pItem(src.m_pItem)
    3564  {
    3565  }
    3566 
    3567  const T& operator*() const
    3568  {
    3569  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3570  return m_pItem->Value;
    3571  }
    3572  const T* operator->() const
    3573  {
    3574  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3575  return &m_pItem->Value;
    3576  }
    3577 
    3578  const_iterator& operator++()
    3579  {
    3580  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3581  m_pItem = m_pItem->pNext;
    3582  return *this;
    3583  }
    3584  const_iterator& operator--()
    3585  {
    3586  if(m_pItem != VMA_NULL)
    3587  {
    3588  m_pItem = m_pItem->pPrev;
    3589  }
    3590  else
    3591  {
    3592  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3593  m_pItem = m_pList->Back();
    3594  }
    3595  return *this;
    3596  }
    3597 
    3598  const_iterator operator++(int)
    3599  {
    3600  const_iterator result = *this;
    3601  ++*this;
    3602  return result;
    3603  }
    3604  const_iterator operator--(int)
    3605  {
    3606  const_iterator result = *this;
    3607  --*this;
    3608  return result;
    3609  }
    3610 
    3611  bool operator==(const const_iterator& rhs) const
    3612  {
    3613  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3614  return m_pItem == rhs.m_pItem;
    3615  }
    3616  bool operator!=(const const_iterator& rhs) const
    3617  {
    3618  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3619  return m_pItem != rhs.m_pItem;
    3620  }
    3621 
    3622  private:
    3623  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    3624  m_pList(pList),
    3625  m_pItem(pItem)
    3626  {
    3627  }
    3628 
    3629  const VmaRawList<T>* m_pList;
    3630  const VmaListItem<T>* m_pItem;
    3631 
    3632  friend class VmaList<T, AllocatorT>;
    3633  };
    3634 
    3635  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    3636 
    3637  bool empty() const { return m_RawList.IsEmpty(); }
    3638  size_t size() const { return m_RawList.GetCount(); }
    3639 
    3640  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    3641  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    3642 
    3643  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    3644  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    3645 
    3646  void clear() { m_RawList.Clear(); }
    3647  void push_back(const T& value) { m_RawList.PushBack(value); }
    3648  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    3649  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    3650 
    3651 private:
    3652  VmaRawList<T> m_RawList;
    3653 };
    3654 
    3655 #endif // #if VMA_USE_STL_LIST
    3656 
    3658 // class VmaMap
    3659 
    3660 // Unused in this version.
    3661 #if 0
    3662 
    3663 #if VMA_USE_STL_UNORDERED_MAP
    3664 
    3665 #define VmaPair std::pair
    3666 
    3667 #define VMA_MAP_TYPE(KeyT, ValueT) \
    3668  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    3669 
    3670 #else // #if VMA_USE_STL_UNORDERED_MAP
    3671 
    3672 template<typename T1, typename T2>
    3673 struct VmaPair
    3674 {
    3675  T1 first;
    3676  T2 second;
    3677 
    3678  VmaPair() : first(), second() { }
    3679  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    3680 };
    3681 
    3682 /* Class compatible with subset of interface of std::unordered_map.
    3683 KeyT, ValueT must be POD because they will be stored in VmaVector.
    3684 */
    3685 template<typename KeyT, typename ValueT>
    3686 class VmaMap
    3687 {
    3688 public:
    3689  typedef VmaPair<KeyT, ValueT> PairType;
    3690  typedef PairType* iterator;
    3691 
    3692  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    3693 
    3694  iterator begin() { return m_Vector.begin(); }
    3695  iterator end() { return m_Vector.end(); }
    3696 
    3697  void insert(const PairType& pair);
    3698  iterator find(const KeyT& key);
    3699  void erase(iterator it);
    3700 
    3701 private:
    3702  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    3703 };
    3704 
    3705 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    3706 
    3707 template<typename FirstT, typename SecondT>
    3708 struct VmaPairFirstLess
    3709 {
    3710  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    3711  {
    3712  return lhs.first < rhs.first;
    3713  }
    3714  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    3715  {
    3716  return lhs.first < rhsFirst;
    3717  }
    3718 };
    3719 
    3720 template<typename KeyT, typename ValueT>
    3721 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    3722 {
    3723  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3724  m_Vector.data(),
    3725  m_Vector.data() + m_Vector.size(),
    3726  pair,
    3727  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    3728  VmaVectorInsert(m_Vector, indexToInsert, pair);
    3729 }
    3730 
    3731 template<typename KeyT, typename ValueT>
    3732 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    3733 {
    3734  PairType* it = VmaBinaryFindFirstNotLess(
    3735  m_Vector.data(),
    3736  m_Vector.data() + m_Vector.size(),
    3737  key,
    3738  VmaPairFirstLess<KeyT, ValueT>());
    3739  if((it != m_Vector.end()) && (it->first == key))
    3740  {
    3741  return it;
    3742  }
    3743  else
    3744  {
    3745  return m_Vector.end();
    3746  }
    3747 }
    3748 
    3749 template<typename KeyT, typename ValueT>
    3750 void VmaMap<KeyT, ValueT>::erase(iterator it)
    3751 {
    3752  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    3753 }
    3754 
    3755 #endif // #if VMA_USE_STL_UNORDERED_MAP
    3756 
    3757 #endif // #if 0
    3758 
    3760 
    3761 class VmaDeviceMemoryBlock;
    3762 
    3763 struct VmaAllocation_T
    3764 {
    3765  VMA_CLASS_NO_COPY(VmaAllocation_T)
    3766 private:
    3767  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    3768 
    3769  enum FLAGS
    3770  {
    3771  FLAG_USER_DATA_STRING = 0x01,
    3772  };
    3773 
    3774 public:
    3775  enum ALLOCATION_TYPE
    3776  {
    3777  ALLOCATION_TYPE_NONE,
    3778  ALLOCATION_TYPE_BLOCK,
    3779  ALLOCATION_TYPE_DEDICATED,
    3780  };
    3781 
    3782  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    3783  m_Alignment(1),
    3784  m_Size(0),
    3785  m_pUserData(VMA_NULL),
    3786  m_LastUseFrameIndex(currentFrameIndex),
    3787  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    3788  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    3789  m_MapCount(0),
    3790  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    3791  {
    3792 #if VMA_STATS_STRING_ENABLED
    3793  m_CreationFrameIndex = currentFrameIndex;
    3794  m_BufferImageUsage = 0;
    3795 #endif
    3796  }
    3797 
    3798  ~VmaAllocation_T()
    3799  {
    3800  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    3801 
    3802  // Check if owned string was freed.
    3803  VMA_ASSERT(m_pUserData == VMA_NULL);
    3804  }
    3805 
    3806  void InitBlockAllocation(
    3807  VmaPool hPool,
    3808  VmaDeviceMemoryBlock* block,
    3809  VkDeviceSize offset,
    3810  VkDeviceSize alignment,
    3811  VkDeviceSize size,
    3812  VmaSuballocationType suballocationType,
    3813  bool mapped,
    3814  bool canBecomeLost)
    3815  {
    3816  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3817  VMA_ASSERT(block != VMA_NULL);
    3818  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3819  m_Alignment = alignment;
    3820  m_Size = size;
    3821  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3822  m_SuballocationType = (uint8_t)suballocationType;
    3823  m_BlockAllocation.m_hPool = hPool;
    3824  m_BlockAllocation.m_Block = block;
    3825  m_BlockAllocation.m_Offset = offset;
    3826  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    3827  }
    3828 
    3829  void InitLost()
    3830  {
    3831  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3832  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    3833  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3834  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    3835  m_BlockAllocation.m_Block = VMA_NULL;
    3836  m_BlockAllocation.m_Offset = 0;
    3837  m_BlockAllocation.m_CanBecomeLost = true;
    3838  }
    3839 
    3840  void ChangeBlockAllocation(
    3841  VmaAllocator hAllocator,
    3842  VmaDeviceMemoryBlock* block,
    3843  VkDeviceSize offset);
    3844 
    3845  // pMappedData not null means allocation is created with MAPPED flag.
    3846  void InitDedicatedAllocation(
    3847  uint32_t memoryTypeIndex,
    3848  VkDeviceMemory hMemory,
    3849  VmaSuballocationType suballocationType,
    3850  void* pMappedData,
    3851  VkDeviceSize size)
    3852  {
    3853  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3854  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3855  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    3856  m_Alignment = 0;
    3857  m_Size = size;
    3858  m_SuballocationType = (uint8_t)suballocationType;
    3859  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3860  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3861  m_DedicatedAllocation.m_hMemory = hMemory;
    3862  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3863  }
    3864 
    3865  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    3866  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3867  VkDeviceSize GetSize() const { return m_Size; }
    3868  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    3869  void* GetUserData() const { return m_pUserData; }
    3870  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    3871  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    3872 
    3873  VmaDeviceMemoryBlock* GetBlock() const
    3874  {
    3875  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3876  return m_BlockAllocation.m_Block;
    3877  }
    3878  VkDeviceSize GetOffset() const;
    3879  VkDeviceMemory GetMemory() const;
    3880  uint32_t GetMemoryTypeIndex() const;
    3881  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    3882  void* GetMappedData() const;
    3883  bool CanBecomeLost() const;
    3884  VmaPool GetPool() const;
    3885 
    3886  uint32_t GetLastUseFrameIndex() const
    3887  {
    3888  return m_LastUseFrameIndex.load();
    3889  }
    3890  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3891  {
    3892  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3893  }
    3894  /*
    3895  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3896  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3897  - Else, returns false.
    3898 
    3899  If hAllocation is already lost, assert - you should not call it then.
    3900  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3901  */
    3902  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3903 
    3904  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3905  {
    3906  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3907  outInfo.blockCount = 1;
    3908  outInfo.allocationCount = 1;
    3909  outInfo.unusedRangeCount = 0;
    3910  outInfo.usedBytes = m_Size;
    3911  outInfo.unusedBytes = 0;
    3912  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3913  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3914  outInfo.unusedRangeSizeMax = 0;
    3915  }
    3916 
    3917  void BlockAllocMap();
    3918  void BlockAllocUnmap();
    3919  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    3920  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    3921 
    3922 #if VMA_STATS_STRING_ENABLED
    3923  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    3924  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    3925 
    3926  void InitBufferImageUsage(uint32_t bufferImageUsage)
    3927  {
    3928  VMA_ASSERT(m_BufferImageUsage == 0);
    3929  m_BufferImageUsage = bufferImageUsage;
    3930  }
    3931 
    3932  void PrintParameters(class VmaJsonWriter& json) const;
    3933 #endif
    3934 
    3935 private:
    3936  VkDeviceSize m_Alignment;
    3937  VkDeviceSize m_Size;
    3938  void* m_pUserData;
    3939  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3940  uint8_t m_Type; // ALLOCATION_TYPE
    3941  uint8_t m_SuballocationType; // VmaSuballocationType
    3942  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    3943  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    3944  uint8_t m_MapCount;
    3945  uint8_t m_Flags; // enum FLAGS
    3946 
    3947  // Allocation out of VmaDeviceMemoryBlock.
    3948  struct BlockAllocation
    3949  {
    3950  VmaPool m_hPool; // Null if belongs to general memory.
    3951  VmaDeviceMemoryBlock* m_Block;
    3952  VkDeviceSize m_Offset;
    3953  bool m_CanBecomeLost;
    3954  };
    3955 
    3956  // Allocation for an object that has its own private VkDeviceMemory.
    3957  struct DedicatedAllocation
    3958  {
    3959  uint32_t m_MemoryTypeIndex;
    3960  VkDeviceMemory m_hMemory;
    3961  void* m_pMappedData; // Not null means memory is mapped.
    3962  };
    3963 
    3964  union
    3965  {
    3966  // Allocation out of VmaDeviceMemoryBlock.
    3967  BlockAllocation m_BlockAllocation;
    3968  // Allocation for an object that has its own private VkDeviceMemory.
    3969  DedicatedAllocation m_DedicatedAllocation;
    3970  };
    3971 
    3972 #if VMA_STATS_STRING_ENABLED
    3973  uint32_t m_CreationFrameIndex;
    3974  uint32_t m_BufferImageUsage; // 0 if unknown.
    3975 #endif
    3976 
    3977  void FreeUserDataString(VmaAllocator hAllocator);
    3978 };
    3979 
    3980 /*
    3981 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3982 allocated memory block or free.
    3983 */
    3984 struct VmaSuballocation
    3985 {
    3986  VkDeviceSize offset;
    3987  VkDeviceSize size;
    3988  VmaAllocation hAllocation;
    3989  VmaSuballocationType type;
    3990 };
    3991 
    3992 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3993 
    3994 // Cost of one additional allocation lost, as equivalent in bytes.
    3995 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3996 
    3997 /*
    3998 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3999 
    4000 If canMakeOtherLost was false:
    4001 - item points to a FREE suballocation.
    4002 - itemsToMakeLostCount is 0.
    4003 
    4004 If canMakeOtherLost was true:
    4005 - item points to first of sequence of suballocations, which are either FREE,
    4006  or point to VmaAllocations that can become lost.
    4007 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4008  the requested allocation to succeed.
    4009 */
    4010 struct VmaAllocationRequest
    4011 {
    4012  VkDeviceSize offset;
    4013  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4014  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4015  VmaSuballocationList::iterator item;
    4016  size_t itemsToMakeLostCount;
    4017 
    4018  VkDeviceSize CalcCost() const
    4019  {
    4020  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4021  }
    4022 };
    4023 
    4024 /*
    4025 Data structure used for bookkeeping of allocations and unused ranges of memory
    4026 in a single VkDeviceMemory block.
    4027 */
    4028 class VmaBlockMetadata
    4029 {
    4030  VMA_CLASS_NO_COPY(VmaBlockMetadata)
    4031 public:
    4032  VmaBlockMetadata(VmaAllocator hAllocator);
    4033  ~VmaBlockMetadata();
    4034  void Init(VkDeviceSize size);
    4035 
    4036  // Validates all data structures inside this object. If not valid, returns false.
    4037  bool Validate() const;
    4038  VkDeviceSize GetSize() const { return m_Size; }
    4039  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4040  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4041  VkDeviceSize GetUnusedRangeSizeMax() const;
    4042  // Returns true if this block is empty - contains only single free suballocation.
    4043  bool IsEmpty() const;
    4044 
    4045  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4046  void AddPoolStats(VmaPoolStats& inoutStats) const;
    4047 
    4048 #if VMA_STATS_STRING_ENABLED
    4049  void PrintDetailedMap(class VmaJsonWriter& json) const;
    4050 #endif
    4051 
    4052  // Creates trivial request for case when block is empty.
    4053  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    4054 
    4055  // Tries to find a place for suballocation with given parameters inside this block.
    4056  // If succeeded, fills pAllocationRequest and returns true.
    4057  // If failed, returns false.
    4058  bool CreateAllocationRequest(
    4059  uint32_t currentFrameIndex,
    4060  uint32_t frameInUseCount,
    4061  VkDeviceSize bufferImageGranularity,
    4062  VkDeviceSize allocSize,
    4063  VkDeviceSize allocAlignment,
    4064  VmaSuballocationType allocType,
    4065  bool canMakeOtherLost,
    4066  VmaAllocationRequest* pAllocationRequest);
    4067 
    4068  bool MakeRequestedAllocationsLost(
    4069  uint32_t currentFrameIndex,
    4070  uint32_t frameInUseCount,
    4071  VmaAllocationRequest* pAllocationRequest);
    4072 
    4073  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4074 
    4075  // Makes actual allocation based on request. Request must already be checked and valid.
    4076  void Alloc(
    4077  const VmaAllocationRequest& request,
    4078  VmaSuballocationType type,
    4079  VkDeviceSize allocSize,
    4080  VmaAllocation hAllocation);
    4081 
    4082  // Frees suballocation assigned to given memory region.
    4083  void Free(const VmaAllocation allocation);
    4084  void FreeAtOffset(VkDeviceSize offset);
    4085 
    4086 private:
    4087  VkDeviceSize m_Size;
    4088  uint32_t m_FreeCount;
    4089  VkDeviceSize m_SumFreeSize;
    4090  VmaSuballocationList m_Suballocations;
    4091  // Suballocations that are free and have size greater than certain threshold.
    4092  // Sorted by size, ascending.
    4093  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4094 
    4095  bool ValidateFreeSuballocationList() const;
    4096 
    4097  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4098  // If yes, fills pOffset and returns true. If no, returns false.
    4099  bool CheckAllocation(
    4100  uint32_t currentFrameIndex,
    4101  uint32_t frameInUseCount,
    4102  VkDeviceSize bufferImageGranularity,
    4103  VkDeviceSize allocSize,
    4104  VkDeviceSize allocAlignment,
    4105  VmaSuballocationType allocType,
    4106  VmaSuballocationList::const_iterator suballocItem,
    4107  bool canMakeOtherLost,
    4108  VkDeviceSize* pOffset,
    4109  size_t* itemsToMakeLostCount,
    4110  VkDeviceSize* pSumFreeSize,
    4111  VkDeviceSize* pSumItemSize) const;
    4112  // Given free suballocation, it merges it with following one, which must also be free.
    4113  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4114  // Releases given suballocation, making it free.
    4115  // Merges it with adjacent free suballocations if applicable.
    4116  // Returns iterator to new free suballocation at this place.
    4117  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4118  // Given free suballocation, it inserts it into sorted list of
    4119  // m_FreeSuballocationsBySize if it's suitable.
    4120  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4121  // Given free suballocation, it removes it from sorted list of
    4122  // m_FreeSuballocationsBySize if it's suitable.
    4123  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4124 };
    4125 
    4126 /*
    4127 Represents a single block of device memory (`VkDeviceMemory`) with all the
    4128 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    4129 
    4130 Thread-safety: This class must be externally synchronized.
    4131 */
    4132 class VmaDeviceMemoryBlock
    4133 {
    4134  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    4135 public:
    4136  VmaBlockMetadata m_Metadata;
    4137 
    4138  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    4139 
    4140  ~VmaDeviceMemoryBlock()
    4141  {
    4142  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    4143  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    4144  }
    4145 
    4146  // Always call after construction.
    4147  void Init(
    4148  uint32_t newMemoryTypeIndex,
    4149  VkDeviceMemory newMemory,
    4150  VkDeviceSize newSize);
    4151  // Always call before destruction.
    4152  void Destroy(VmaAllocator allocator);
    4153 
    4154  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    4155  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    4156  void* GetMappedData() const { return m_pMappedData; }
    4157 
    4158  // Validates all data structures inside this object. If not valid, returns false.
    4159  bool Validate() const;
    4160 
    4161  // ppData can be null.
    4162  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    4163  void Unmap(VmaAllocator hAllocator, uint32_t count);
    4164 
    4165  VkResult BindBufferMemory(
    4166  const VmaAllocator hAllocator,
    4167  const VmaAllocation hAllocation,
    4168  VkBuffer hBuffer);
    4169  VkResult BindImageMemory(
    4170  const VmaAllocator hAllocator,
    4171  const VmaAllocation hAllocation,
    4172  VkImage hImage);
    4173 
    4174 private:
    4175  uint32_t m_MemoryTypeIndex;
    4176  VkDeviceMemory m_hMemory;
    4177 
    4178  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    4179  // Also protects m_MapCount, m_pMappedData.
    4180  VMA_MUTEX m_Mutex;
    4181  uint32_t m_MapCount;
    4182  void* m_pMappedData;
    4183 };
    4184 
    4185 struct VmaPointerLess
    4186 {
    4187  bool operator()(const void* lhs, const void* rhs) const
    4188  {
    4189  return lhs < rhs;
    4190  }
    4191 };
    4192 
    4193 class VmaDefragmentator;
    4194 
    4195 /*
    4196 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    4197 Vulkan memory type.
    4198 
    4199 Synchronized internally with a mutex.
    4200 */
    4201 struct VmaBlockVector
    4202 {
    4203  VMA_CLASS_NO_COPY(VmaBlockVector)
    4204 public:
    4205  VmaBlockVector(
    4206  VmaAllocator hAllocator,
    4207  uint32_t memoryTypeIndex,
    4208  VkDeviceSize preferredBlockSize,
    4209  size_t minBlockCount,
    4210  size_t maxBlockCount,
    4211  VkDeviceSize bufferImageGranularity,
    4212  uint32_t frameInUseCount,
    4213  bool isCustomPool);
    4214  ~VmaBlockVector();
    4215 
    4216  VkResult CreateMinBlocks();
    4217 
    4218  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    4219  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    4220  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    4221  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    4222 
    4223  void GetPoolStats(VmaPoolStats* pStats);
    4224 
    4225  bool IsEmpty() const { return m_Blocks.empty(); }
    4226 
    4227  VkResult Allocate(
    4228  VmaPool hCurrentPool,
    4229  uint32_t currentFrameIndex,
    4230  const VkMemoryRequirements& vkMemReq,
    4231  const VmaAllocationCreateInfo& createInfo,
    4232  VmaSuballocationType suballocType,
    4233  VmaAllocation* pAllocation);
    4234 
    4235  void Free(
    4236  VmaAllocation hAllocation);
    4237 
    4238  // Adds statistics of this BlockVector to pStats.
    4239  void AddStats(VmaStats* pStats);
    4240 
    4241 #if VMA_STATS_STRING_ENABLED
    4242  void PrintDetailedMap(class VmaJsonWriter& json);
    4243 #endif
    4244 
    4245  void MakePoolAllocationsLost(
    4246  uint32_t currentFrameIndex,
    4247  size_t* pLostAllocationCount);
    4248 
    4249  VmaDefragmentator* EnsureDefragmentator(
    4250  VmaAllocator hAllocator,
    4251  uint32_t currentFrameIndex);
    4252 
    4253  VkResult Defragment(
    4254  VmaDefragmentationStats* pDefragmentationStats,
    4255  VkDeviceSize& maxBytesToMove,
    4256  uint32_t& maxAllocationsToMove);
    4257 
    4258  void DestroyDefragmentator();
    4259 
    4260 private:
    4261  friend class VmaDefragmentator;
    4262 
    4263  const VmaAllocator m_hAllocator;
    4264  const uint32_t m_MemoryTypeIndex;
    4265  const VkDeviceSize m_PreferredBlockSize;
    4266  const size_t m_MinBlockCount;
    4267  const size_t m_MaxBlockCount;
    4268  const VkDeviceSize m_BufferImageGranularity;
    4269  const uint32_t m_FrameInUseCount;
    4270  const bool m_IsCustomPool;
    4271  VMA_MUTEX m_Mutex;
    4272  // Incrementally sorted by sumFreeSize, ascending.
    4273  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    4274  /* There can be at most one allocation that is completely empty - a
    4275  hysteresis to avoid pessimistic case of alternating creation and destruction
    4276  of a VkDeviceMemory. */
    4277  bool m_HasEmptyBlock;
    4278  VmaDefragmentator* m_pDefragmentator;
    4279 
    4280  VkDeviceSize CalcMaxBlockSize() const;
    4281 
    4282  // Finds and removes given block from vector.
    4283  void Remove(VmaDeviceMemoryBlock* pBlock);
    4284 
    4285  // Performs single step in sorting m_Blocks. They may not be fully sorted
    4286  // after this call.
    4287  void IncrementallySortBlocks();
    4288 
    4289  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    4290 };
    4291 
    4292 struct VmaPool_T
    4293 {
    4294  VMA_CLASS_NO_COPY(VmaPool_T)
    4295 public:
    4296  VmaBlockVector m_BlockVector;
    4297 
    4298  // Takes ownership.
    4299  VmaPool_T(
    4300  VmaAllocator hAllocator,
    4301  const VmaPoolCreateInfo& createInfo);
    4302  ~VmaPool_T();
    4303 
    4304  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    4305 
    4306 #if VMA_STATS_STRING_ENABLED
    4307  //void PrintDetailedMap(class VmaStringBuilder& sb);
    4308 #endif
    4309 };
    4310 
    4311 class VmaDefragmentator
    4312 {
    4313  VMA_CLASS_NO_COPY(VmaDefragmentator)
    4314 private:
    4315  const VmaAllocator m_hAllocator;
    4316  VmaBlockVector* const m_pBlockVector;
    4317  uint32_t m_CurrentFrameIndex;
    4318  VkDeviceSize m_BytesMoved;
    4319  uint32_t m_AllocationsMoved;
    4320 
    4321  struct AllocationInfo
    4322  {
    4323  VmaAllocation m_hAllocation;
    4324  VkBool32* m_pChanged;
    4325 
    4326  AllocationInfo() :
    4327  m_hAllocation(VK_NULL_HANDLE),
    4328  m_pChanged(VMA_NULL)
    4329  {
    4330  }
    4331  };
    4332 
    4333  struct AllocationInfoSizeGreater
    4334  {
    4335  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    4336  {
    4337  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    4338  }
    4339  };
    4340 
    4341  // Used between AddAllocation and Defragment.
    4342  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    4343 
    4344  struct BlockInfo
    4345  {
    4346  VmaDeviceMemoryBlock* m_pBlock;
    4347  bool m_HasNonMovableAllocations;
    4348  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    4349 
    4350  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    4351  m_pBlock(VMA_NULL),
    4352  m_HasNonMovableAllocations(true),
    4353  m_Allocations(pAllocationCallbacks),
    4354  m_pMappedDataForDefragmentation(VMA_NULL)
    4355  {
    4356  }
    4357 
    4358  void CalcHasNonMovableAllocations()
    4359  {
    4360  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    4361  const size_t defragmentAllocCount = m_Allocations.size();
    4362  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    4363  }
    4364 
    4365  void SortAllocationsBySizeDescecnding()
    4366  {
    4367  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    4368  }
    4369 
    4370  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    4371  void Unmap(VmaAllocator hAllocator);
    4372 
    4373  private:
    4374  // Not null if mapped for defragmentation only, not originally mapped.
    4375  void* m_pMappedDataForDefragmentation;
    4376  };
    4377 
    4378  struct BlockPointerLess
    4379  {
    4380  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    4381  {
    4382  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    4383  }
    4384  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    4385  {
    4386  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    4387  }
    4388  };
    4389 
    4390  // 1. Blocks with some non-movable allocations go first.
    4391  // 2. Blocks with smaller sumFreeSize go first.
    4392  struct BlockInfoCompareMoveDestination
    4393  {
    4394  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    4395  {
    4396  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    4397  {
    4398  return true;
    4399  }
    4400  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    4401  {
    4402  return false;
    4403  }
    4404  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    4405  {
    4406  return true;
    4407  }
    4408  return false;
    4409  }
    4410  };
    4411 
    4412  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    4413  BlockInfoVector m_Blocks;
    4414 
    4415  VkResult DefragmentRound(
    4416  VkDeviceSize maxBytesToMove,
    4417  uint32_t maxAllocationsToMove);
    4418 
    4419  static bool MoveMakesSense(
    4420  size_t dstBlockIndex, VkDeviceSize dstOffset,
    4421  size_t srcBlockIndex, VkDeviceSize srcOffset);
    4422 
    4423 public:
    4424  VmaDefragmentator(
    4425  VmaAllocator hAllocator,
    4426  VmaBlockVector* pBlockVector,
    4427  uint32_t currentFrameIndex);
    4428 
    4429  ~VmaDefragmentator();
    4430 
    4431  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    4432  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    4433 
    4434  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    4435 
    4436  VkResult Defragment(
    4437  VkDeviceSize maxBytesToMove,
    4438  uint32_t maxAllocationsToMove);
    4439 };
    4440 
    4441 // Main allocator object.
    4442 struct VmaAllocator_T
    4443 {
    4444  VMA_CLASS_NO_COPY(VmaAllocator_T)
    4445 public:
    4446  bool m_UseMutex;
    4447  bool m_UseKhrDedicatedAllocation;
    4448  VkDevice m_hDevice;
    4449  bool m_AllocationCallbacksSpecified;
    4450  VkAllocationCallbacks m_AllocationCallbacks;
    4451  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    4452 
    4453  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    4454  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    4455  VMA_MUTEX m_HeapSizeLimitMutex;
    4456 
    4457  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    4458  VkPhysicalDeviceMemoryProperties m_MemProps;
    4459 
    4460  // Default pools.
    4461  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    4462 
    4463  // Each vector is sorted by memory (handle value).
    4464  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    4465  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    4466  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    4467 
    4468  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    4469  ~VmaAllocator_T();
    4470 
    4471  const VkAllocationCallbacks* GetAllocationCallbacks() const
    4472  {
    4473  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    4474  }
    4475  const VmaVulkanFunctions& GetVulkanFunctions() const
    4476  {
    4477  return m_VulkanFunctions;
    4478  }
    4479 
    4480  VkDeviceSize GetBufferImageGranularity() const
    4481  {
    4482  return VMA_MAX(
    4483  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    4484  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    4485  }
    4486 
    4487  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    4488  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    4489 
    4490  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    4491  {
    4492  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    4493  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    4494  }
    4495 
    4496  bool IsIntegratedGpu() const
    4497  {
    4498  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    4499  }
    4500 
    4501  void GetBufferMemoryRequirements(
    4502  VkBuffer hBuffer,
    4503  VkMemoryRequirements& memReq,
    4504  bool& requiresDedicatedAllocation,
    4505  bool& prefersDedicatedAllocation) const;
    4506  void GetImageMemoryRequirements(
    4507  VkImage hImage,
    4508  VkMemoryRequirements& memReq,
    4509  bool& requiresDedicatedAllocation,
    4510  bool& prefersDedicatedAllocation) const;
    4511 
    4512  // Main allocation function.
    4513  VkResult AllocateMemory(
    4514  const VkMemoryRequirements& vkMemReq,
    4515  bool requiresDedicatedAllocation,
    4516  bool prefersDedicatedAllocation,
    4517  VkBuffer dedicatedBuffer,
    4518  VkImage dedicatedImage,
    4519  const VmaAllocationCreateInfo& createInfo,
    4520  VmaSuballocationType suballocType,
    4521  VmaAllocation* pAllocation);
    4522 
    4523  // Main deallocation function.
    4524  void FreeMemory(const VmaAllocation allocation);
    4525 
    4526  void CalculateStats(VmaStats* pStats);
    4527 
    4528 #if VMA_STATS_STRING_ENABLED
    4529  void PrintDetailedMap(class VmaJsonWriter& json);
    4530 #endif
    4531 
    4532  VkResult Defragment(
    4533  VmaAllocation* pAllocations,
    4534  size_t allocationCount,
    4535  VkBool32* pAllocationsChanged,
    4536  const VmaDefragmentationInfo* pDefragmentationInfo,
    4537  VmaDefragmentationStats* pDefragmentationStats);
    4538 
    4539  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    4540  bool TouchAllocation(VmaAllocation hAllocation);
    4541 
    4542  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    4543  void DestroyPool(VmaPool pool);
    4544  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    4545 
    4546  void SetCurrentFrameIndex(uint32_t frameIndex);
    4547 
    4548  void MakePoolAllocationsLost(
    4549  VmaPool hPool,
    4550  size_t* pLostAllocationCount);
    4551 
    4552  void CreateLostAllocation(VmaAllocation* pAllocation);
    4553 
    4554  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    4555  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    4556 
    4557  VkResult Map(VmaAllocation hAllocation, void** ppData);
    4558  void Unmap(VmaAllocation hAllocation);
    4559 
    4560  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    4561  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    4562 
    4563 private:
    4564  VkDeviceSize m_PreferredLargeHeapBlockSize;
    4565 
    4566  VkPhysicalDevice m_PhysicalDevice;
    4567  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    4568 
    4569  VMA_MUTEX m_PoolsMutex;
    4570  // Protected by m_PoolsMutex. Sorted by pointer value.
    4571  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    4572 
    4573  VmaVulkanFunctions m_VulkanFunctions;
    4574 
    4575  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    4576 
    4577  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    4578 
    4579  VkResult AllocateMemoryOfType(
    4580  const VkMemoryRequirements& vkMemReq,
    4581  bool dedicatedAllocation,
    4582  VkBuffer dedicatedBuffer,
    4583  VkImage dedicatedImage,
    4584  const VmaAllocationCreateInfo& createInfo,
    4585  uint32_t memTypeIndex,
    4586  VmaSuballocationType suballocType,
    4587  VmaAllocation* pAllocation);
    4588 
    4589  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    4590  VkResult AllocateDedicatedMemory(
    4591  VkDeviceSize size,
    4592  VmaSuballocationType suballocType,
    4593  uint32_t memTypeIndex,
    4594  bool map,
    4595  bool isUserDataString,
    4596  void* pUserData,
    4597  VkBuffer dedicatedBuffer,
    4598  VkImage dedicatedImage,
    4599  VmaAllocation* pAllocation);
    4600 
    4601  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    4602  void FreeDedicatedMemory(VmaAllocation allocation);
    4603 };
    4604 
    4606 // Memory allocation #2 after VmaAllocator_T definition
    4607 
    4608 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    4609 {
    4610  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    4611 }
    4612 
    4613 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    4614 {
    4615  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    4616 }
    4617 
    4618 template<typename T>
    4619 static T* VmaAllocate(VmaAllocator hAllocator)
    4620 {
    4621  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    4622 }
    4623 
    4624 template<typename T>
    4625 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    4626 {
    4627  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    4628 }
    4629 
    4630 template<typename T>
    4631 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    4632 {
    4633  if(ptr != VMA_NULL)
    4634  {
    4635  ptr->~T();
    4636  VmaFree(hAllocator, ptr);
    4637  }
    4638 }
    4639 
    4640 template<typename T>
    4641 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    4642 {
    4643  if(ptr != VMA_NULL)
    4644  {
    4645  for(size_t i = count; i--; )
    4646  ptr[i].~T();
    4647  VmaFree(hAllocator, ptr);
    4648  }
    4649 }
    4650 
    4652 // VmaStringBuilder
    4653 
    4654 #if VMA_STATS_STRING_ENABLED
    4655 
    4656 class VmaStringBuilder
    4657 {
    4658 public:
    4659  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    4660  size_t GetLength() const { return m_Data.size(); }
    4661  const char* GetData() const { return m_Data.data(); }
    4662 
    4663  void Add(char ch) { m_Data.push_back(ch); }
    4664  void Add(const char* pStr);
    4665  void AddNewLine() { Add('\n'); }
    4666  void AddNumber(uint32_t num);
    4667  void AddNumber(uint64_t num);
    4668  void AddPointer(const void* ptr);
    4669 
    4670 private:
    4671  VmaVector< char, VmaStlAllocator<char> > m_Data;
    4672 };
    4673 
    4674 void VmaStringBuilder::Add(const char* pStr)
    4675 {
    4676  const size_t strLen = strlen(pStr);
    4677  if(strLen > 0)
    4678  {
    4679  const size_t oldCount = m_Data.size();
    4680  m_Data.resize(oldCount + strLen);
    4681  memcpy(m_Data.data() + oldCount, pStr, strLen);
    4682  }
    4683 }
    4684 
    4685 void VmaStringBuilder::AddNumber(uint32_t num)
    4686 {
    4687  char buf[11];
    4688  VmaUint32ToStr(buf, sizeof(buf), num);
    4689  Add(buf);
    4690 }
    4691 
    4692 void VmaStringBuilder::AddNumber(uint64_t num)
    4693 {
    4694  char buf[21];
    4695  VmaUint64ToStr(buf, sizeof(buf), num);
    4696  Add(buf);
    4697 }
    4698 
    4699 void VmaStringBuilder::AddPointer(const void* ptr)
    4700 {
    4701  char buf[21];
    4702  VmaPtrToStr(buf, sizeof(buf), ptr);
    4703  Add(buf);
    4704 }
    4705 
    4706 #endif // #if VMA_STATS_STRING_ENABLED
    4707 
    4709 // VmaJsonWriter
    4710 
    4711 #if VMA_STATS_STRING_ENABLED
    4712 
    4713 class VmaJsonWriter
    4714 {
    4715  VMA_CLASS_NO_COPY(VmaJsonWriter)
    4716 public:
    4717  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    4718  ~VmaJsonWriter();
    4719 
    4720  void BeginObject(bool singleLine = false);
    4721  void EndObject();
    4722 
    4723  void BeginArray(bool singleLine = false);
    4724  void EndArray();
    4725 
    4726  void WriteString(const char* pStr);
    4727  void BeginString(const char* pStr = VMA_NULL);
    4728  void ContinueString(const char* pStr);
    4729  void ContinueString(uint32_t n);
    4730  void ContinueString(uint64_t n);
    4731  void ContinueString_Pointer(const void* ptr);
    4732  void EndString(const char* pStr = VMA_NULL);
    4733 
    4734  void WriteNumber(uint32_t n);
    4735  void WriteNumber(uint64_t n);
    4736  void WriteBool(bool b);
    4737  void WriteNull();
    4738 
    4739 private:
    4740  static const char* const INDENT;
    4741 
    4742  enum COLLECTION_TYPE
    4743  {
    4744  COLLECTION_TYPE_OBJECT,
    4745  COLLECTION_TYPE_ARRAY,
    4746  };
    4747  struct StackItem
    4748  {
    4749  COLLECTION_TYPE type;
    4750  uint32_t valueCount;
    4751  bool singleLineMode;
    4752  };
    4753 
    4754  VmaStringBuilder& m_SB;
    4755  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    4756  bool m_InsideString;
    4757 
    4758  void BeginValue(bool isString);
    4759  void WriteIndent(bool oneLess = false);
    4760 };
    4761 
    4762 const char* const VmaJsonWriter::INDENT = " ";
    4763 
    4764 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    4765  m_SB(sb),
    4766  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    4767  m_InsideString(false)
    4768 {
    4769 }
    4770 
    4771 VmaJsonWriter::~VmaJsonWriter()
    4772 {
    4773  VMA_ASSERT(!m_InsideString);
    4774  VMA_ASSERT(m_Stack.empty());
    4775 }
    4776 
    4777 void VmaJsonWriter::BeginObject(bool singleLine)
    4778 {
    4779  VMA_ASSERT(!m_InsideString);
    4780 
    4781  BeginValue(false);
    4782  m_SB.Add('{');
    4783 
    4784  StackItem item;
    4785  item.type = COLLECTION_TYPE_OBJECT;
    4786  item.valueCount = 0;
    4787  item.singleLineMode = singleLine;
    4788  m_Stack.push_back(item);
    4789 }
    4790 
    4791 void VmaJsonWriter::EndObject()
    4792 {
    4793  VMA_ASSERT(!m_InsideString);
    4794 
    4795  WriteIndent(true);
    4796  m_SB.Add('}');
    4797 
    4798  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    4799  m_Stack.pop_back();
    4800 }
    4801 
    4802 void VmaJsonWriter::BeginArray(bool singleLine)
    4803 {
    4804  VMA_ASSERT(!m_InsideString);
    4805 
    4806  BeginValue(false);
    4807  m_SB.Add('[');
    4808 
    4809  StackItem item;
    4810  item.type = COLLECTION_TYPE_ARRAY;
    4811  item.valueCount = 0;
    4812  item.singleLineMode = singleLine;
    4813  m_Stack.push_back(item);
    4814 }
    4815 
    4816 void VmaJsonWriter::EndArray()
    4817 {
    4818  VMA_ASSERT(!m_InsideString);
    4819 
    4820  WriteIndent(true);
    4821  m_SB.Add(']');
    4822 
    4823  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    4824  m_Stack.pop_back();
    4825 }
    4826 
    4827 void VmaJsonWriter::WriteString(const char* pStr)
    4828 {
    4829  BeginString(pStr);
    4830  EndString();
    4831 }
    4832 
    4833 void VmaJsonWriter::BeginString(const char* pStr)
    4834 {
    4835  VMA_ASSERT(!m_InsideString);
    4836 
    4837  BeginValue(true);
    4838  m_SB.Add('"');
    4839  m_InsideString = true;
    4840  if(pStr != VMA_NULL && pStr[0] != '\0')
    4841  {
    4842  ContinueString(pStr);
    4843  }
    4844 }
    4845 
    4846 void VmaJsonWriter::ContinueString(const char* pStr)
    4847 {
    4848  VMA_ASSERT(m_InsideString);
    4849 
    4850  const size_t strLen = strlen(pStr);
    4851  for(size_t i = 0; i < strLen; ++i)
    4852  {
    4853  char ch = pStr[i];
    4854  if(ch == '\'')
    4855  {
    4856  m_SB.Add("\\\\");
    4857  }
    4858  else if(ch == '"')
    4859  {
    4860  m_SB.Add("\\\"");
    4861  }
    4862  else if(ch >= 32)
    4863  {
    4864  m_SB.Add(ch);
    4865  }
    4866  else switch(ch)
    4867  {
    4868  case '\b':
    4869  m_SB.Add("\\b");
    4870  break;
    4871  case '\f':
    4872  m_SB.Add("\\f");
    4873  break;
    4874  case '\n':
    4875  m_SB.Add("\\n");
    4876  break;
    4877  case '\r':
    4878  m_SB.Add("\\r");
    4879  break;
    4880  case '\t':
    4881  m_SB.Add("\\t");
    4882  break;
    4883  default:
    4884  VMA_ASSERT(0 && "Character not currently supported.");
    4885  break;
    4886  }
    4887  }
    4888 }
    4889 
    4890 void VmaJsonWriter::ContinueString(uint32_t n)
    4891 {
    4892  VMA_ASSERT(m_InsideString);
    4893  m_SB.AddNumber(n);
    4894 }
    4895 
    4896 void VmaJsonWriter::ContinueString(uint64_t n)
    4897 {
    4898  VMA_ASSERT(m_InsideString);
    4899  m_SB.AddNumber(n);
    4900 }
    4901 
    4902 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    4903 {
    4904  VMA_ASSERT(m_InsideString);
    4905  m_SB.AddPointer(ptr);
    4906 }
    4907 
    4908 void VmaJsonWriter::EndString(const char* pStr)
    4909 {
    4910  VMA_ASSERT(m_InsideString);
    4911  if(pStr != VMA_NULL && pStr[0] != '\0')
    4912  {
    4913  ContinueString(pStr);
    4914  }
    4915  m_SB.Add('"');
    4916  m_InsideString = false;
    4917 }
    4918 
    4919 void VmaJsonWriter::WriteNumber(uint32_t n)
    4920 {
    4921  VMA_ASSERT(!m_InsideString);
    4922  BeginValue(false);
    4923  m_SB.AddNumber(n);
    4924 }
    4925 
    4926 void VmaJsonWriter::WriteNumber(uint64_t n)
    4927 {
    4928  VMA_ASSERT(!m_InsideString);
    4929  BeginValue(false);
    4930  m_SB.AddNumber(n);
    4931 }
    4932 
    4933 void VmaJsonWriter::WriteBool(bool b)
    4934 {
    4935  VMA_ASSERT(!m_InsideString);
    4936  BeginValue(false);
    4937  m_SB.Add(b ? "true" : "false");
    4938 }
    4939 
    4940 void VmaJsonWriter::WriteNull()
    4941 {
    4942  VMA_ASSERT(!m_InsideString);
    4943  BeginValue(false);
    4944  m_SB.Add("null");
    4945 }
    4946 
    4947 void VmaJsonWriter::BeginValue(bool isString)
    4948 {
    4949  if(!m_Stack.empty())
    4950  {
    4951  StackItem& currItem = m_Stack.back();
    4952  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4953  currItem.valueCount % 2 == 0)
    4954  {
    4955  VMA_ASSERT(isString);
    4956  }
    4957 
    4958  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4959  currItem.valueCount % 2 != 0)
    4960  {
    4961  m_SB.Add(": ");
    4962  }
    4963  else if(currItem.valueCount > 0)
    4964  {
    4965  m_SB.Add(", ");
    4966  WriteIndent();
    4967  }
    4968  else
    4969  {
    4970  WriteIndent();
    4971  }
    4972  ++currItem.valueCount;
    4973  }
    4974 }
    4975 
    4976 void VmaJsonWriter::WriteIndent(bool oneLess)
    4977 {
    4978  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4979  {
    4980  m_SB.AddNewLine();
    4981 
    4982  size_t count = m_Stack.size();
    4983  if(count > 0 && oneLess)
    4984  {
    4985  --count;
    4986  }
    4987  for(size_t i = 0; i < count; ++i)
    4988  {
    4989  m_SB.Add(INDENT);
    4990  }
    4991  }
    4992 }
    4993 
    4994 #endif // #if VMA_STATS_STRING_ENABLED
    4995 
    4997 
    4998 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    4999 {
    5000  if(IsUserDataString())
    5001  {
    5002  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    5003 
    5004  FreeUserDataString(hAllocator);
    5005 
    5006  if(pUserData != VMA_NULL)
    5007  {
    5008  const char* const newStrSrc = (char*)pUserData;
    5009  const size_t newStrLen = strlen(newStrSrc);
    5010  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    5011  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    5012  m_pUserData = newStrDst;
    5013  }
    5014  }
    5015  else
    5016  {
    5017  m_pUserData = pUserData;
    5018  }
    5019 }
    5020 
    5021 void VmaAllocation_T::ChangeBlockAllocation(
    5022  VmaAllocator hAllocator,
    5023  VmaDeviceMemoryBlock* block,
    5024  VkDeviceSize offset)
    5025 {
    5026  VMA_ASSERT(block != VMA_NULL);
    5027  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5028 
    5029  // Move mapping reference counter from old block to new block.
    5030  if(block != m_BlockAllocation.m_Block)
    5031  {
    5032  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    5033  if(IsPersistentMap())
    5034  ++mapRefCount;
    5035  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    5036  block->Map(hAllocator, mapRefCount, VMA_NULL);
    5037  }
    5038 
    5039  m_BlockAllocation.m_Block = block;
    5040  m_BlockAllocation.m_Offset = offset;
    5041 }
    5042 
    5043 VkDeviceSize VmaAllocation_T::GetOffset() const
    5044 {
    5045  switch(m_Type)
    5046  {
    5047  case ALLOCATION_TYPE_BLOCK:
    5048  return m_BlockAllocation.m_Offset;
    5049  case ALLOCATION_TYPE_DEDICATED:
    5050  return 0;
    5051  default:
    5052  VMA_ASSERT(0);
    5053  return 0;
    5054  }
    5055 }
    5056 
    5057 VkDeviceMemory VmaAllocation_T::GetMemory() const
    5058 {
    5059  switch(m_Type)
    5060  {
    5061  case ALLOCATION_TYPE_BLOCK:
    5062  return m_BlockAllocation.m_Block->GetDeviceMemory();
    5063  case ALLOCATION_TYPE_DEDICATED:
    5064  return m_DedicatedAllocation.m_hMemory;
    5065  default:
    5066  VMA_ASSERT(0);
    5067  return VK_NULL_HANDLE;
    5068  }
    5069 }
    5070 
    5071 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    5072 {
    5073  switch(m_Type)
    5074  {
    5075  case ALLOCATION_TYPE_BLOCK:
    5076  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    5077  case ALLOCATION_TYPE_DEDICATED:
    5078  return m_DedicatedAllocation.m_MemoryTypeIndex;
    5079  default:
    5080  VMA_ASSERT(0);
    5081  return UINT32_MAX;
    5082  }
    5083 }
    5084 
    5085 void* VmaAllocation_T::GetMappedData() const
    5086 {
    5087  switch(m_Type)
    5088  {
    5089  case ALLOCATION_TYPE_BLOCK:
    5090  if(m_MapCount != 0)
    5091  {
    5092  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    5093  VMA_ASSERT(pBlockData != VMA_NULL);
    5094  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    5095  }
    5096  else
    5097  {
    5098  return VMA_NULL;
    5099  }
    5100  break;
    5101  case ALLOCATION_TYPE_DEDICATED:
    5102  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    5103  return m_DedicatedAllocation.m_pMappedData;
    5104  default:
    5105  VMA_ASSERT(0);
    5106  return VMA_NULL;
    5107  }
    5108 }
    5109 
    5110 bool VmaAllocation_T::CanBecomeLost() const
    5111 {
    5112  switch(m_Type)
    5113  {
    5114  case ALLOCATION_TYPE_BLOCK:
    5115  return m_BlockAllocation.m_CanBecomeLost;
    5116  case ALLOCATION_TYPE_DEDICATED:
    5117  return false;
    5118  default:
    5119  VMA_ASSERT(0);
    5120  return false;
    5121  }
    5122 }
    5123 
    5124 VmaPool VmaAllocation_T::GetPool() const
    5125 {
    5126  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5127  return m_BlockAllocation.m_hPool;
    5128 }
    5129 
    5130 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5131 {
    5132  VMA_ASSERT(CanBecomeLost());
    5133 
    5134  /*
    5135  Warning: This is a carefully designed algorithm.
    5136  Do not modify unless you really know what you're doing :)
    5137  */
    5138  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    5139  for(;;)
    5140  {
    5141  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    5142  {
    5143  VMA_ASSERT(0);
    5144  return false;
    5145  }
    5146  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    5147  {
    5148  return false;
    5149  }
    5150  else // Last use time earlier than current time.
    5151  {
    5152  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    5153  {
    5154  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    5155  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    5156  return true;
    5157  }
    5158  }
    5159  }
    5160 }
    5161 
    5162 #if VMA_STATS_STRING_ENABLED
    5163 
    5164 // Correspond to values of enum VmaSuballocationType.
    5165 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    5166  "FREE",
    5167  "UNKNOWN",
    5168  "BUFFER",
    5169  "IMAGE_UNKNOWN",
    5170  "IMAGE_LINEAR",
    5171  "IMAGE_OPTIMAL",
    5172 };
    5173 
    5174 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    5175 {
    5176  json.WriteString("Type");
    5177  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    5178 
    5179  json.WriteString("Size");
    5180  json.WriteNumber(m_Size);
    5181 
    5182  if(m_pUserData != VMA_NULL)
    5183  {
    5184  json.WriteString("UserData");
    5185  if(IsUserDataString())
    5186  {
    5187  json.WriteString((const char*)m_pUserData);
    5188  }
    5189  else
    5190  {
    5191  json.BeginString();
    5192  json.ContinueString_Pointer(m_pUserData);
    5193  json.EndString();
    5194  }
    5195  }
    5196 
    5197  json.WriteString("CreationFrameIndex");
    5198  json.WriteNumber(m_CreationFrameIndex);
    5199 
    5200  json.WriteString("LastUseFrameIndex");
    5201  json.WriteNumber(GetLastUseFrameIndex());
    5202 
    5203  if(m_BufferImageUsage != 0)
    5204  {
    5205  json.WriteString("Usage");
    5206  json.WriteNumber(m_BufferImageUsage);
    5207  }
    5208 }
    5209 
    5210 #endif
    5211 
    5212 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    5213 {
    5214  VMA_ASSERT(IsUserDataString());
    5215  if(m_pUserData != VMA_NULL)
    5216  {
    5217  char* const oldStr = (char*)m_pUserData;
    5218  const size_t oldStrLen = strlen(oldStr);
    5219  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    5220  m_pUserData = VMA_NULL;
    5221  }
    5222 }
    5223 
    5224 void VmaAllocation_T::BlockAllocMap()
    5225 {
    5226  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    5227 
    5228  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    5229  {
    5230  ++m_MapCount;
    5231  }
    5232  else
    5233  {
    5234  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    5235  }
    5236 }
    5237 
    5238 void VmaAllocation_T::BlockAllocUnmap()
    5239 {
    5240  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    5241 
    5242  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    5243  {
    5244  --m_MapCount;
    5245  }
    5246  else
    5247  {
    5248  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    5249  }
    5250 }
    5251 
    5252 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    5253 {
    5254  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    5255 
    5256  if(m_MapCount != 0)
    5257  {
    5258  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    5259  {
    5260  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    5261  *ppData = m_DedicatedAllocation.m_pMappedData;
    5262  ++m_MapCount;
    5263  return VK_SUCCESS;
    5264  }
    5265  else
    5266  {
    5267  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    5268  return VK_ERROR_MEMORY_MAP_FAILED;
    5269  }
    5270  }
    5271  else
    5272  {
    5273  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5274  hAllocator->m_hDevice,
    5275  m_DedicatedAllocation.m_hMemory,
    5276  0, // offset
    5277  VK_WHOLE_SIZE,
    5278  0, // flags
    5279  ppData);
    5280  if(result == VK_SUCCESS)
    5281  {
    5282  m_DedicatedAllocation.m_pMappedData = *ppData;
    5283  m_MapCount = 1;
    5284  }
    5285  return result;
    5286  }
    5287 }
    5288 
    5289 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    5290 {
    5291  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    5292 
    5293  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    5294  {
    5295  --m_MapCount;
    5296  if(m_MapCount == 0)
    5297  {
    5298  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    5299  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    5300  hAllocator->m_hDevice,
    5301  m_DedicatedAllocation.m_hMemory);
    5302  }
    5303  }
    5304  else
    5305  {
    5306  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    5307  }
    5308 }
    5309 
    5310 #if VMA_STATS_STRING_ENABLED
    5311 
    5312 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    5313 {
    5314  json.BeginObject();
    5315 
    5316  json.WriteString("Blocks");
    5317  json.WriteNumber(stat.blockCount);
    5318 
    5319  json.WriteString("Allocations");
    5320  json.WriteNumber(stat.allocationCount);
    5321 
    5322  json.WriteString("UnusedRanges");
    5323  json.WriteNumber(stat.unusedRangeCount);
    5324 
    5325  json.WriteString("UsedBytes");
    5326  json.WriteNumber(stat.usedBytes);
    5327 
    5328  json.WriteString("UnusedBytes");
    5329  json.WriteNumber(stat.unusedBytes);
    5330 
    5331  if(stat.allocationCount > 1)
    5332  {
    5333  json.WriteString("AllocationSize");
    5334  json.BeginObject(true);
    5335  json.WriteString("Min");
    5336  json.WriteNumber(stat.allocationSizeMin);
    5337  json.WriteString("Avg");
    5338  json.WriteNumber(stat.allocationSizeAvg);
    5339  json.WriteString("Max");
    5340  json.WriteNumber(stat.allocationSizeMax);
    5341  json.EndObject();
    5342  }
    5343 
    5344  if(stat.unusedRangeCount > 1)
    5345  {
    5346  json.WriteString("UnusedRangeSize");
    5347  json.BeginObject(true);
    5348  json.WriteString("Min");
    5349  json.WriteNumber(stat.unusedRangeSizeMin);
    5350  json.WriteString("Avg");
    5351  json.WriteNumber(stat.unusedRangeSizeAvg);
    5352  json.WriteString("Max");
    5353  json.WriteNumber(stat.unusedRangeSizeMax);
    5354  json.EndObject();
    5355  }
    5356 
    5357  json.EndObject();
    5358 }
    5359 
    5360 #endif // #if VMA_STATS_STRING_ENABLED
    5361 
    5362 struct VmaSuballocationItemSizeLess
    5363 {
    5364  bool operator()(
    5365  const VmaSuballocationList::iterator lhs,
    5366  const VmaSuballocationList::iterator rhs) const
    5367  {
    5368  return lhs->size < rhs->size;
    5369  }
    5370  bool operator()(
    5371  const VmaSuballocationList::iterator lhs,
    5372  VkDeviceSize rhsSize) const
    5373  {
    5374  return lhs->size < rhsSize;
    5375  }
    5376 };
    5377 
    5379 // class VmaBlockMetadata
    5380 
    5381 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    5382  m_Size(0),
    5383  m_FreeCount(0),
    5384  m_SumFreeSize(0),
    5385  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    5386  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    5387 {
    5388 }
    5389 
    5390 VmaBlockMetadata::~VmaBlockMetadata()
    5391 {
    5392 }
    5393 
    5394 void VmaBlockMetadata::Init(VkDeviceSize size)
    5395 {
    5396  m_Size = size;
    5397  m_FreeCount = 1;
    5398  m_SumFreeSize = size;
    5399 
    5400  VmaSuballocation suballoc = {};
    5401  suballoc.offset = 0;
    5402  suballoc.size = size;
    5403  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5404  suballoc.hAllocation = VK_NULL_HANDLE;
    5405 
    5406  m_Suballocations.push_back(suballoc);
    5407  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    5408  --suballocItem;
    5409  m_FreeSuballocationsBySize.push_back(suballocItem);
    5410 }
    5411 
    5412 bool VmaBlockMetadata::Validate() const
    5413 {
    5414  if(m_Suballocations.empty())
    5415  {
    5416  return false;
    5417  }
    5418 
    5419  // Expected offset of new suballocation as calculates from previous ones.
    5420  VkDeviceSize calculatedOffset = 0;
    5421  // Expected number of free suballocations as calculated from traversing their list.
    5422  uint32_t calculatedFreeCount = 0;
    5423  // Expected sum size of free suballocations as calculated from traversing their list.
    5424  VkDeviceSize calculatedSumFreeSize = 0;
    5425  // Expected number of free suballocations that should be registered in
    5426  // m_FreeSuballocationsBySize calculated from traversing their list.
    5427  size_t freeSuballocationsToRegister = 0;
    5428  // True if previous visisted suballocation was free.
    5429  bool prevFree = false;
    5430 
    5431  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5432  suballocItem != m_Suballocations.cend();
    5433  ++suballocItem)
    5434  {
    5435  const VmaSuballocation& subAlloc = *suballocItem;
    5436 
    5437  // Actual offset of this suballocation doesn't match expected one.
    5438  if(subAlloc.offset != calculatedOffset)
    5439  {
    5440  return false;
    5441  }
    5442 
    5443  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5444  // Two adjacent free suballocations are invalid. They should be merged.
    5445  if(prevFree && currFree)
    5446  {
    5447  return false;
    5448  }
    5449 
    5450  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    5451  {
    5452  return false;
    5453  }
    5454 
    5455  if(currFree)
    5456  {
    5457  calculatedSumFreeSize += subAlloc.size;
    5458  ++calculatedFreeCount;
    5459  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5460  {
    5461  ++freeSuballocationsToRegister;
    5462  }
    5463  }
    5464  else
    5465  {
    5466  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
    5467  {
    5468  return false;
    5469  }
    5470  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
    5471  {
    5472  return false;
    5473  }
    5474  }
    5475 
    5476  calculatedOffset += subAlloc.size;
    5477  prevFree = currFree;
    5478  }
    5479 
    5480  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    5481  // match expected one.
    5482  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    5483  {
    5484  return false;
    5485  }
    5486 
    5487  VkDeviceSize lastSize = 0;
    5488  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    5489  {
    5490  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    5491 
    5492  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    5493  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5494  {
    5495  return false;
    5496  }
    5497  // They must be sorted by size ascending.
    5498  if(suballocItem->size < lastSize)
    5499  {
    5500  return false;
    5501  }
    5502 
    5503  lastSize = suballocItem->size;
    5504  }
    5505 
    5506  // Check if totals match calculacted values.
    5507  if(!ValidateFreeSuballocationList() ||
    5508  (calculatedOffset != m_Size) ||
    5509  (calculatedSumFreeSize != m_SumFreeSize) ||
    5510  (calculatedFreeCount != m_FreeCount))
    5511  {
    5512  return false;
    5513  }
    5514 
    5515  return true;
    5516 }
    5517 
    5518 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    5519 {
    5520  if(!m_FreeSuballocationsBySize.empty())
    5521  {
    5522  return m_FreeSuballocationsBySize.back()->size;
    5523  }
    5524  else
    5525  {
    5526  return 0;
    5527  }
    5528 }
    5529 
    5530 bool VmaBlockMetadata::IsEmpty() const
    5531 {
    5532  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    5533 }
    5534 
    5535 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    5536 {
    5537  outInfo.blockCount = 1;
    5538 
    5539  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5540  outInfo.allocationCount = rangeCount - m_FreeCount;
    5541  outInfo.unusedRangeCount = m_FreeCount;
    5542 
    5543  outInfo.unusedBytes = m_SumFreeSize;
    5544  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    5545 
    5546  outInfo.allocationSizeMin = UINT64_MAX;
    5547  outInfo.allocationSizeMax = 0;
    5548  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5549  outInfo.unusedRangeSizeMax = 0;
    5550 
    5551  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5552  suballocItem != m_Suballocations.cend();
    5553  ++suballocItem)
    5554  {
    5555  const VmaSuballocation& suballoc = *suballocItem;
    5556  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    5557  {
    5558  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    5559  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    5560  }
    5561  else
    5562  {
    5563  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    5564  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    5565  }
    5566  }
    5567 }
    5568 
    5569 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    5570 {
    5571  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5572 
    5573  inoutStats.size += m_Size;
    5574  inoutStats.unusedSize += m_SumFreeSize;
    5575  inoutStats.allocationCount += rangeCount - m_FreeCount;
    5576  inoutStats.unusedRangeCount += m_FreeCount;
    5577  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    5578 }
    5579 
    5580 #if VMA_STATS_STRING_ENABLED
    5581 
    5582 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    5583 {
    5584  json.BeginObject();
    5585 
    5586  json.WriteString("TotalBytes");
    5587  json.WriteNumber(m_Size);
    5588 
    5589  json.WriteString("UnusedBytes");
    5590  json.WriteNumber(m_SumFreeSize);
    5591 
    5592  json.WriteString("Allocations");
    5593  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
    5594 
    5595  json.WriteString("UnusedRanges");
    5596  json.WriteNumber(m_FreeCount);
    5597 
    5598  json.WriteString("Suballocations");
    5599  json.BeginArray();
    5600  size_t i = 0;
    5601  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5602  suballocItem != m_Suballocations.cend();
    5603  ++suballocItem, ++i)
    5604  {
    5605  json.BeginObject(true);
    5606 
    5607  json.WriteString("Offset");
    5608  json.WriteNumber(suballocItem->offset);
    5609 
    5610  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5611  {
    5612  json.WriteString("Type");
    5613  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    5614 
    5615  json.WriteString("Size");
    5616  json.WriteNumber(suballocItem->size);
    5617  }
    5618  else
    5619  {
    5620  suballocItem->hAllocation->PrintParameters(json);
    5621  }
    5622 
    5623  json.EndObject();
    5624  }
    5625  json.EndArray();
    5626 
    5627  json.EndObject();
    5628 }
    5629 
    5630 #endif // #if VMA_STATS_STRING_ENABLED
    5631 
    5632 /*
    5633 How many suitable free suballocations to analyze before choosing best one.
    5634 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    5635  be chosen.
    5636 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    5637  suballocations will be analized and best one will be chosen.
    5638 - Any other value is also acceptable.
    5639 */
    5640 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    5641 
    5642 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    5643 {
    5644  VMA_ASSERT(IsEmpty());
    5645  pAllocationRequest->offset = 0;
    5646  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    5647  pAllocationRequest->sumItemSize = 0;
    5648  pAllocationRequest->item = m_Suballocations.begin();
    5649  pAllocationRequest->itemsToMakeLostCount = 0;
    5650 }
    5651 
    5652 bool VmaBlockMetadata::CreateAllocationRequest(
    5653  uint32_t currentFrameIndex,
    5654  uint32_t frameInUseCount,
    5655  VkDeviceSize bufferImageGranularity,
    5656  VkDeviceSize allocSize,
    5657  VkDeviceSize allocAlignment,
    5658  VmaSuballocationType allocType,
    5659  bool canMakeOtherLost,
    5660  VmaAllocationRequest* pAllocationRequest)
    5661 {
    5662  VMA_ASSERT(allocSize > 0);
    5663  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5664  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    5665  VMA_HEAVY_ASSERT(Validate());
    5666 
    5667  // There is not enough total free space in this block to fullfill the request: Early return.
    5668  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    5669  {
    5670  return false;
    5671  }
    5672 
    5673  // New algorithm, efficiently searching freeSuballocationsBySize.
    5674  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    5675  if(freeSuballocCount > 0)
    5676  {
    5677  if(VMA_BEST_FIT)
    5678  {
    5679  // Find first free suballocation with size not less than allocSize.
    5680  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5681  m_FreeSuballocationsBySize.data(),
    5682  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    5683  allocSize,
    5684  VmaSuballocationItemSizeLess());
    5685  size_t index = it - m_FreeSuballocationsBySize.data();
    5686  for(; index < freeSuballocCount; ++index)
    5687  {
    5688  if(CheckAllocation(
    5689  currentFrameIndex,
    5690  frameInUseCount,
    5691  bufferImageGranularity,
    5692  allocSize,
    5693  allocAlignment,
    5694  allocType,
    5695  m_FreeSuballocationsBySize[index],
    5696  false, // canMakeOtherLost
    5697  &pAllocationRequest->offset,
    5698  &pAllocationRequest->itemsToMakeLostCount,
    5699  &pAllocationRequest->sumFreeSize,
    5700  &pAllocationRequest->sumItemSize))
    5701  {
    5702  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5703  return true;
    5704  }
    5705  }
    5706  }
    5707  else
    5708  {
    5709  // Search staring from biggest suballocations.
    5710  for(size_t index = freeSuballocCount; index--; )
    5711  {
    5712  if(CheckAllocation(
    5713  currentFrameIndex,
    5714  frameInUseCount,
    5715  bufferImageGranularity,
    5716  allocSize,
    5717  allocAlignment,
    5718  allocType,
    5719  m_FreeSuballocationsBySize[index],
    5720  false, // canMakeOtherLost
    5721  &pAllocationRequest->offset,
    5722  &pAllocationRequest->itemsToMakeLostCount,
    5723  &pAllocationRequest->sumFreeSize,
    5724  &pAllocationRequest->sumItemSize))
    5725  {
    5726  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5727  return true;
    5728  }
    5729  }
    5730  }
    5731  }
    5732 
    5733  if(canMakeOtherLost)
    5734  {
    5735  // Brute-force algorithm. TODO: Come up with something better.
    5736 
    5737  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    5738  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    5739 
    5740  VmaAllocationRequest tmpAllocRequest = {};
    5741  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    5742  suballocIt != m_Suballocations.end();
    5743  ++suballocIt)
    5744  {
    5745  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    5746  suballocIt->hAllocation->CanBecomeLost())
    5747  {
    5748  if(CheckAllocation(
    5749  currentFrameIndex,
    5750  frameInUseCount,
    5751  bufferImageGranularity,
    5752  allocSize,
    5753  allocAlignment,
    5754  allocType,
    5755  suballocIt,
    5756  canMakeOtherLost,
    5757  &tmpAllocRequest.offset,
    5758  &tmpAllocRequest.itemsToMakeLostCount,
    5759  &tmpAllocRequest.sumFreeSize,
    5760  &tmpAllocRequest.sumItemSize))
    5761  {
    5762  tmpAllocRequest.item = suballocIt;
    5763 
    5764  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    5765  {
    5766  *pAllocationRequest = tmpAllocRequest;
    5767  }
    5768  }
    5769  }
    5770  }
    5771 
    5772  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    5773  {
    5774  return true;
    5775  }
    5776  }
    5777 
    5778  return false;
    5779 }
    5780 
    5781 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    5782  uint32_t currentFrameIndex,
    5783  uint32_t frameInUseCount,
    5784  VmaAllocationRequest* pAllocationRequest)
    5785 {
    5786  while(pAllocationRequest->itemsToMakeLostCount > 0)
    5787  {
    5788  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    5789  {
    5790  ++pAllocationRequest->item;
    5791  }
    5792  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5793  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    5794  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    5795  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5796  {
    5797  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    5798  --pAllocationRequest->itemsToMakeLostCount;
    5799  }
    5800  else
    5801  {
    5802  return false;
    5803  }
    5804  }
    5805 
    5806  VMA_HEAVY_ASSERT(Validate());
    5807  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5808  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5809 
    5810  return true;
    5811 }
    5812 
    5813 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5814 {
    5815  uint32_t lostAllocationCount = 0;
    5816  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    5817  it != m_Suballocations.end();
    5818  ++it)
    5819  {
    5820  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    5821  it->hAllocation->CanBecomeLost() &&
    5822  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5823  {
    5824  it = FreeSuballocation(it);
    5825  ++lostAllocationCount;
    5826  }
    5827  }
    5828  return lostAllocationCount;
    5829 }
    5830 
    5831 void VmaBlockMetadata::Alloc(
    5832  const VmaAllocationRequest& request,
    5833  VmaSuballocationType type,
    5834  VkDeviceSize allocSize,
    5835  VmaAllocation hAllocation)
    5836 {
    5837  VMA_ASSERT(request.item != m_Suballocations.end());
    5838  VmaSuballocation& suballoc = *request.item;
    5839  // Given suballocation is a free block.
    5840  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5841  // Given offset is inside this suballocation.
    5842  VMA_ASSERT(request.offset >= suballoc.offset);
    5843  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    5844  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    5845  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    5846 
    5847  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    5848  // it to become used.
    5849  UnregisterFreeSuballocation(request.item);
    5850 
    5851  suballoc.offset = request.offset;
    5852  suballoc.size = allocSize;
    5853  suballoc.type = type;
    5854  suballoc.hAllocation = hAllocation;
    5855 
    5856  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    5857  if(paddingEnd)
    5858  {
    5859  VmaSuballocation paddingSuballoc = {};
    5860  paddingSuballoc.offset = request.offset + allocSize;
    5861  paddingSuballoc.size = paddingEnd;
    5862  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5863  VmaSuballocationList::iterator next = request.item;
    5864  ++next;
    5865  const VmaSuballocationList::iterator paddingEndItem =
    5866  m_Suballocations.insert(next, paddingSuballoc);
    5867  RegisterFreeSuballocation(paddingEndItem);
    5868  }
    5869 
    5870  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    5871  if(paddingBegin)
    5872  {
    5873  VmaSuballocation paddingSuballoc = {};
    5874  paddingSuballoc.offset = request.offset - paddingBegin;
    5875  paddingSuballoc.size = paddingBegin;
    5876  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5877  const VmaSuballocationList::iterator paddingBeginItem =
    5878  m_Suballocations.insert(request.item, paddingSuballoc);
    5879  RegisterFreeSuballocation(paddingBeginItem);
    5880  }
    5881 
    5882  // Update totals.
    5883  m_FreeCount = m_FreeCount - 1;
    5884  if(paddingBegin > 0)
    5885  {
    5886  ++m_FreeCount;
    5887  }
    5888  if(paddingEnd > 0)
    5889  {
    5890  ++m_FreeCount;
    5891  }
    5892  m_SumFreeSize -= allocSize;
    5893 }
    5894 
    5895 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    5896 {
    5897  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5898  suballocItem != m_Suballocations.end();
    5899  ++suballocItem)
    5900  {
    5901  VmaSuballocation& suballoc = *suballocItem;
    5902  if(suballoc.hAllocation == allocation)
    5903  {
    5904  FreeSuballocation(suballocItem);
    5905  VMA_HEAVY_ASSERT(Validate());
    5906  return;
    5907  }
    5908  }
    5909  VMA_ASSERT(0 && "Not found!");
    5910 }
    5911 
    5912 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
    5913 {
    5914  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5915  suballocItem != m_Suballocations.end();
    5916  ++suballocItem)
    5917  {
    5918  VmaSuballocation& suballoc = *suballocItem;
    5919  if(suballoc.offset == offset)
    5920  {
    5921  FreeSuballocation(suballocItem);
    5922  return;
    5923  }
    5924  }
    5925  VMA_ASSERT(0 && "Not found!");
    5926 }
    5927 
    5928 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    5929 {
    5930  VkDeviceSize lastSize = 0;
    5931  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    5932  {
    5933  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    5934 
    5935  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    5936  {
    5937  VMA_ASSERT(0);
    5938  return false;
    5939  }
    5940  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5941  {
    5942  VMA_ASSERT(0);
    5943  return false;
    5944  }
    5945  if(it->size < lastSize)
    5946  {
    5947  VMA_ASSERT(0);
    5948  return false;
    5949  }
    5950 
    5951  lastSize = it->size;
    5952  }
    5953  return true;
    5954 }
    5955 
    5956 bool VmaBlockMetadata::CheckAllocation(
    5957  uint32_t currentFrameIndex,
    5958  uint32_t frameInUseCount,
    5959  VkDeviceSize bufferImageGranularity,
    5960  VkDeviceSize allocSize,
    5961  VkDeviceSize allocAlignment,
    5962  VmaSuballocationType allocType,
    5963  VmaSuballocationList::const_iterator suballocItem,
    5964  bool canMakeOtherLost,
    5965  VkDeviceSize* pOffset,
    5966  size_t* itemsToMakeLostCount,
    5967  VkDeviceSize* pSumFreeSize,
    5968  VkDeviceSize* pSumItemSize) const
    5969 {
    5970  VMA_ASSERT(allocSize > 0);
    5971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5972  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    5973  VMA_ASSERT(pOffset != VMA_NULL);
    5974 
    5975  *itemsToMakeLostCount = 0;
    5976  *pSumFreeSize = 0;
    5977  *pSumItemSize = 0;
    5978 
    5979  if(canMakeOtherLost)
    5980  {
    5981  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5982  {
    5983  *pSumFreeSize = suballocItem->size;
    5984  }
    5985  else
    5986  {
    5987  if(suballocItem->hAllocation->CanBecomeLost() &&
    5988  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5989  {
    5990  ++*itemsToMakeLostCount;
    5991  *pSumItemSize = suballocItem->size;
    5992  }
    5993  else
    5994  {
    5995  return false;
    5996  }
    5997  }
    5998 
    5999  // Remaining size is too small for this request: Early return.
    6000  if(m_Size - suballocItem->offset < allocSize)
    6001  {
    6002  return false;
    6003  }
    6004 
    6005  // Start from offset equal to beginning of this suballocation.
    6006  *pOffset = suballocItem->offset;
    6007 
    6008  // Apply VMA_DEBUG_MARGIN at the beginning.
    6009  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    6010  {
    6011  *pOffset += VMA_DEBUG_MARGIN;
    6012  }
    6013 
    6014  // Apply alignment.
    6015  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    6016  *pOffset = VmaAlignUp(*pOffset, alignment);
    6017 
    6018  // Check previous suballocations for BufferImageGranularity conflicts.
    6019  // Make bigger alignment if necessary.
    6020  if(bufferImageGranularity > 1)
    6021  {
    6022  bool bufferImageGranularityConflict = false;
    6023  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    6024  while(prevSuballocItem != m_Suballocations.cbegin())
    6025  {
    6026  --prevSuballocItem;
    6027  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    6028  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    6029  {
    6030  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    6031  {
    6032  bufferImageGranularityConflict = true;
    6033  break;
    6034  }
    6035  }
    6036  else
    6037  // Already on previous page.
    6038  break;
    6039  }
    6040  if(bufferImageGranularityConflict)
    6041  {
    6042  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    6043  }
    6044  }
    6045 
    6046  // Now that we have final *pOffset, check if we are past suballocItem.
    6047  // If yes, return false - this function should be called for another suballocItem as starting point.
    6048  if(*pOffset >= suballocItem->offset + suballocItem->size)
    6049  {
    6050  return false;
    6051  }
    6052 
    6053  // Calculate padding at the beginning based on current offset.
    6054  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    6055 
    6056  // Calculate required margin at the end if this is not last suballocation.
    6057  VmaSuballocationList::const_iterator next = suballocItem;
    6058  ++next;
    6059  const VkDeviceSize requiredEndMargin =
    6060  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    6061 
    6062  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    6063  // Another early return check.
    6064  if(suballocItem->offset + totalSize > m_Size)
    6065  {
    6066  return false;
    6067  }
    6068 
    6069  // Advance lastSuballocItem until desired size is reached.
    6070  // Update itemsToMakeLostCount.
    6071  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    6072  if(totalSize > suballocItem->size)
    6073  {
    6074  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    6075  while(remainingSize > 0)
    6076  {
    6077  ++lastSuballocItem;
    6078  if(lastSuballocItem == m_Suballocations.cend())
    6079  {
    6080  return false;
    6081  }
    6082  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6083  {
    6084  *pSumFreeSize += lastSuballocItem->size;
    6085  }
    6086  else
    6087  {
    6088  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    6089  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    6090  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    6091  {
    6092  ++*itemsToMakeLostCount;
    6093  *pSumItemSize += lastSuballocItem->size;
    6094  }
    6095  else
    6096  {
    6097  return false;
    6098  }
    6099  }
    6100  remainingSize = (lastSuballocItem->size < remainingSize) ?
    6101  remainingSize - lastSuballocItem->size : 0;
    6102  }
    6103  }
    6104 
    6105  // Check next suballocations for BufferImageGranularity conflicts.
    6106  // If conflict exists, we must mark more allocations lost or fail.
    6107  if(bufferImageGranularity > 1)
    6108  {
    6109  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    6110  ++nextSuballocItem;
    6111  while(nextSuballocItem != m_Suballocations.cend())
    6112  {
    6113  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    6114  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    6115  {
    6116  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    6117  {
    6118  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    6119  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    6120  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    6121  {
    6122  ++*itemsToMakeLostCount;
    6123  }
    6124  else
    6125  {
    6126  return false;
    6127  }
    6128  }
    6129  }
    6130  else
    6131  {
    6132  // Already on next page.
    6133  break;
    6134  }
    6135  ++nextSuballocItem;
    6136  }
    6137  }
    6138  }
    6139  else
    6140  {
    6141  const VmaSuballocation& suballoc = *suballocItem;
    6142  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6143 
    6144  *pSumFreeSize = suballoc.size;
    6145 
    6146  // Size of this suballocation is too small for this request: Early return.
    6147  if(suballoc.size < allocSize)
    6148  {
    6149  return false;
    6150  }
    6151 
    6152  // Start from offset equal to beginning of this suballocation.
    6153  *pOffset = suballoc.offset;
    6154 
    6155  // Apply VMA_DEBUG_MARGIN at the beginning.
    6156  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    6157  {
    6158  *pOffset += VMA_DEBUG_MARGIN;
    6159  }
    6160 
    6161  // Apply alignment.
    6162  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    6163  *pOffset = VmaAlignUp(*pOffset, alignment);
    6164 
    6165  // Check previous suballocations for BufferImageGranularity conflicts.
    6166  // Make bigger alignment if necessary.
    6167  if(bufferImageGranularity > 1)
    6168  {
    6169  bool bufferImageGranularityConflict = false;
    6170  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    6171  while(prevSuballocItem != m_Suballocations.cbegin())
    6172  {
    6173  --prevSuballocItem;
    6174  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    6175  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    6176  {
    6177  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    6178  {
    6179  bufferImageGranularityConflict = true;
    6180  break;
    6181  }
    6182  }
    6183  else
    6184  // Already on previous page.
    6185  break;
    6186  }
    6187  if(bufferImageGranularityConflict)
    6188  {
    6189  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    6190  }
    6191  }
    6192 
    6193  // Calculate padding at the beginning based on current offset.
    6194  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    6195 
    6196  // Calculate required margin at the end if this is not last suballocation.
    6197  VmaSuballocationList::const_iterator next = suballocItem;
    6198  ++next;
    6199  const VkDeviceSize requiredEndMargin =
    6200  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    6201 
    6202  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    6203  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    6204  {
    6205  return false;
    6206  }
    6207 
    6208  // Check next suballocations for BufferImageGranularity conflicts.
    6209  // If conflict exists, allocation cannot be made here.
    6210  if(bufferImageGranularity > 1)
    6211  {
    6212  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    6213  ++nextSuballocItem;
    6214  while(nextSuballocItem != m_Suballocations.cend())
    6215  {
    6216  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    6217  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    6218  {
    6219  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    6220  {
    6221  return false;
    6222  }
    6223  }
    6224  else
    6225  {
    6226  // Already on next page.
    6227  break;
    6228  }
    6229  ++nextSuballocItem;
    6230  }
    6231  }
    6232  }
    6233 
    6234  // All tests passed: Success. pOffset is already filled.
    6235  return true;
    6236 }
    6237 
    6238 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    6239 {
    6240  VMA_ASSERT(item != m_Suballocations.end());
    6241  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6242 
    6243  VmaSuballocationList::iterator nextItem = item;
    6244  ++nextItem;
    6245  VMA_ASSERT(nextItem != m_Suballocations.end());
    6246  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6247 
    6248  item->size += nextItem->size;
    6249  --m_FreeCount;
    6250  m_Suballocations.erase(nextItem);
    6251 }
    6252 
    6253 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    6254 {
    6255  // Change this suballocation to be marked as free.
    6256  VmaSuballocation& suballoc = *suballocItem;
    6257  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6258  suballoc.hAllocation = VK_NULL_HANDLE;
    6259 
    6260  // Update totals.
    6261  ++m_FreeCount;
    6262  m_SumFreeSize += suballoc.size;
    6263 
    6264  // Merge with previous and/or next suballocation if it's also free.
    6265  bool mergeWithNext = false;
    6266  bool mergeWithPrev = false;
    6267 
    6268  VmaSuballocationList::iterator nextItem = suballocItem;
    6269  ++nextItem;
    6270  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    6271  {
    6272  mergeWithNext = true;
    6273  }
    6274 
    6275  VmaSuballocationList::iterator prevItem = suballocItem;
    6276  if(suballocItem != m_Suballocations.begin())
    6277  {
    6278  --prevItem;
    6279  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6280  {
    6281  mergeWithPrev = true;
    6282  }
    6283  }
    6284 
    6285  if(mergeWithNext)
    6286  {
    6287  UnregisterFreeSuballocation(nextItem);
    6288  MergeFreeWithNext(suballocItem);
    6289  }
    6290 
    6291  if(mergeWithPrev)
    6292  {
    6293  UnregisterFreeSuballocation(prevItem);
    6294  MergeFreeWithNext(prevItem);
    6295  RegisterFreeSuballocation(prevItem);
    6296  return prevItem;
    6297  }
    6298  else
    6299  {
    6300  RegisterFreeSuballocation(suballocItem);
    6301  return suballocItem;
    6302  }
    6303 }
    6304 
    6305 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    6306 {
    6307  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6308  VMA_ASSERT(item->size > 0);
    6309 
    6310  // You may want to enable this validation at the beginning or at the end of
    6311  // this function, depending on what do you want to check.
    6312  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6313 
    6314  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6315  {
    6316  if(m_FreeSuballocationsBySize.empty())
    6317  {
    6318  m_FreeSuballocationsBySize.push_back(item);
    6319  }
    6320  else
    6321  {
    6322  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    6323  }
    6324  }
    6325 
    6326  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6327 }
    6328 
    6329 
    6330 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    6331 {
    6332  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6333  VMA_ASSERT(item->size > 0);
    6334 
    6335  // You may want to enable this validation at the beginning or at the end of
    6336  // this function, depending on what do you want to check.
    6337  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6338 
    6339  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6340  {
    6341  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6342  m_FreeSuballocationsBySize.data(),
    6343  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    6344  item,
    6345  VmaSuballocationItemSizeLess());
    6346  for(size_t index = it - m_FreeSuballocationsBySize.data();
    6347  index < m_FreeSuballocationsBySize.size();
    6348  ++index)
    6349  {
    6350  if(m_FreeSuballocationsBySize[index] == item)
    6351  {
    6352  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    6353  return;
    6354  }
    6355  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    6356  }
    6357  VMA_ASSERT(0 && "Not found.");
    6358  }
    6359 
    6360  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6361 }
    6362 
    6364 // class VmaDeviceMemoryBlock
    6365 
    6366 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    6367  m_Metadata(hAllocator),
    6368  m_MemoryTypeIndex(UINT32_MAX),
    6369  m_hMemory(VK_NULL_HANDLE),
    6370  m_MapCount(0),
    6371  m_pMappedData(VMA_NULL)
    6372 {
    6373 }
    6374 
    6375 void VmaDeviceMemoryBlock::Init(
    6376  uint32_t newMemoryTypeIndex,
    6377  VkDeviceMemory newMemory,
    6378  VkDeviceSize newSize)
    6379 {
    6380  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    6381 
    6382  m_MemoryTypeIndex = newMemoryTypeIndex;
    6383  m_hMemory = newMemory;
    6384 
    6385  m_Metadata.Init(newSize);
    6386 }
    6387 
    6388 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    6389 {
    6390  // This is the most important assert in the entire library.
    6391  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    6392  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    6393 
    6394  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    6395  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    6396  m_hMemory = VK_NULL_HANDLE;
    6397 }
    6398 
    6399 bool VmaDeviceMemoryBlock::Validate() const
    6400 {
    6401  if((m_hMemory == VK_NULL_HANDLE) ||
    6402  (m_Metadata.GetSize() == 0))
    6403  {
    6404  return false;
    6405  }
    6406 
    6407  return m_Metadata.Validate();
    6408 }
    6409 
    6410 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    6411 {
    6412  if(count == 0)
    6413  {
    6414  return VK_SUCCESS;
    6415  }
    6416 
    6417  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6418  if(m_MapCount != 0)
    6419  {
    6420  m_MapCount += count;
    6421  VMA_ASSERT(m_pMappedData != VMA_NULL);
    6422  if(ppData != VMA_NULL)
    6423  {
    6424  *ppData = m_pMappedData;
    6425  }
    6426  return VK_SUCCESS;
    6427  }
    6428  else
    6429  {
    6430  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6431  hAllocator->m_hDevice,
    6432  m_hMemory,
    6433  0, // offset
    6434  VK_WHOLE_SIZE,
    6435  0, // flags
    6436  &m_pMappedData);
    6437  if(result == VK_SUCCESS)
    6438  {
    6439  if(ppData != VMA_NULL)
    6440  {
    6441  *ppData = m_pMappedData;
    6442  }
    6443  m_MapCount = count;
    6444  }
    6445  return result;
    6446  }
    6447 }
    6448 
    6449 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    6450 {
    6451  if(count == 0)
    6452  {
    6453  return;
    6454  }
    6455 
    6456  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6457  if(m_MapCount >= count)
    6458  {
    6459  m_MapCount -= count;
    6460  if(m_MapCount == 0)
    6461  {
    6462  m_pMappedData = VMA_NULL;
    6463  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    6464  }
    6465  }
    6466  else
    6467  {
    6468  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    6469  }
    6470 }
    6471 
    6472 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    6473  const VmaAllocator hAllocator,
    6474  const VmaAllocation hAllocation,
    6475  VkBuffer hBuffer)
    6476 {
    6477  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    6478  hAllocation->GetBlock() == this);
    6479  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    6480  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6481  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    6482  hAllocator->m_hDevice,
    6483  hBuffer,
    6484  m_hMemory,
    6485  hAllocation->GetOffset());
    6486 }
    6487 
    6488 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    6489  const VmaAllocator hAllocator,
    6490  const VmaAllocation hAllocation,
    6491  VkImage hImage)
    6492 {
    6493  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    6494  hAllocation->GetBlock() == this);
    6495  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    6496  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6497  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    6498  hAllocator->m_hDevice,
    6499  hImage,
    6500  m_hMemory,
    6501  hAllocation->GetOffset());
    6502 }
    6503 
    6504 static void InitStatInfo(VmaStatInfo& outInfo)
    6505 {
    6506  memset(&outInfo, 0, sizeof(outInfo));
    6507  outInfo.allocationSizeMin = UINT64_MAX;
    6508  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6509 }
    6510 
    6511 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    6512 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    6513 {
    6514  inoutInfo.blockCount += srcInfo.blockCount;
    6515  inoutInfo.allocationCount += srcInfo.allocationCount;
    6516  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    6517  inoutInfo.usedBytes += srcInfo.usedBytes;
    6518  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    6519  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    6520  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    6521  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    6522  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    6523 }
    6524 
    6525 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    6526 {
    6527  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    6528  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    6529  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    6530  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    6531 }
    6532 
    6533 VmaPool_T::VmaPool_T(
    6534  VmaAllocator hAllocator,
    6535  const VmaPoolCreateInfo& createInfo) :
    6536  m_BlockVector(
    6537  hAllocator,
    6538  createInfo.memoryTypeIndex,
    6539  createInfo.blockSize,
    6540  createInfo.minBlockCount,
    6541  createInfo.maxBlockCount,
    6542  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    6543  createInfo.frameInUseCount,
    6544  true) // isCustomPool
    6545 {
    6546 }
    6547 
    6548 VmaPool_T::~VmaPool_T()
    6549 {
    6550 }
    6551 
    6552 #if VMA_STATS_STRING_ENABLED
    6553 
    6554 #endif // #if VMA_STATS_STRING_ENABLED
    6555 
    6556 VmaBlockVector::VmaBlockVector(
    6557  VmaAllocator hAllocator,
    6558  uint32_t memoryTypeIndex,
    6559  VkDeviceSize preferredBlockSize,
    6560  size_t minBlockCount,
    6561  size_t maxBlockCount,
    6562  VkDeviceSize bufferImageGranularity,
    6563  uint32_t frameInUseCount,
    6564  bool isCustomPool) :
    6565  m_hAllocator(hAllocator),
    6566  m_MemoryTypeIndex(memoryTypeIndex),
    6567  m_PreferredBlockSize(preferredBlockSize),
    6568  m_MinBlockCount(minBlockCount),
    6569  m_MaxBlockCount(maxBlockCount),
    6570  m_BufferImageGranularity(bufferImageGranularity),
    6571  m_FrameInUseCount(frameInUseCount),
    6572  m_IsCustomPool(isCustomPool),
    6573  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    6574  m_HasEmptyBlock(false),
    6575  m_pDefragmentator(VMA_NULL)
    6576 {
    6577 }
    6578 
    6579 VmaBlockVector::~VmaBlockVector()
    6580 {
    6581  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    6582 
    6583  for(size_t i = m_Blocks.size(); i--; )
    6584  {
    6585  m_Blocks[i]->Destroy(m_hAllocator);
    6586  vma_delete(m_hAllocator, m_Blocks[i]);
    6587  }
    6588 }
    6589 
    6590 VkResult VmaBlockVector::CreateMinBlocks()
    6591 {
    6592  for(size_t i = 0; i < m_MinBlockCount; ++i)
    6593  {
    6594  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    6595  if(res != VK_SUCCESS)
    6596  {
    6597  return res;
    6598  }
    6599  }
    6600  return VK_SUCCESS;
    6601 }
    6602 
    6603 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    6604 {
    6605  pStats->size = 0;
    6606  pStats->unusedSize = 0;
    6607  pStats->allocationCount = 0;
    6608  pStats->unusedRangeCount = 0;
    6609  pStats->unusedRangeSizeMax = 0;
    6610 
    6611  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6612 
    6613  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6614  {
    6615  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6616  VMA_ASSERT(pBlock);
    6617  VMA_HEAVY_ASSERT(pBlock->Validate());
    6618  pBlock->m_Metadata.AddPoolStats(*pStats);
    6619  }
    6620 }
    6621 
    6622 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    6623 
    6624 VkResult VmaBlockVector::Allocate(
    6625  VmaPool hCurrentPool,
    6626  uint32_t currentFrameIndex,
    6627  const VkMemoryRequirements& vkMemReq,
    6628  const VmaAllocationCreateInfo& createInfo,
    6629  VmaSuballocationType suballocType,
    6630  VmaAllocation* pAllocation)
    6631 {
    6632  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    6633  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    6634 
    6635  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6636 
    6637  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    6638  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6639  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6640  {
    6641  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6642  VMA_ASSERT(pCurrBlock);
    6643  VmaAllocationRequest currRequest = {};
    6644  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6645  currentFrameIndex,
    6646  m_FrameInUseCount,
    6647  m_BufferImageGranularity,
    6648  vkMemReq.size,
    6649  vkMemReq.alignment,
    6650  suballocType,
    6651  false, // canMakeOtherLost
    6652  &currRequest))
    6653  {
    6654  // Allocate from pCurrBlock.
    6655  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    6656 
    6657  if(mapped)
    6658  {
    6659  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
    6660  if(res != VK_SUCCESS)
    6661  {
    6662  return res;
    6663  }
    6664  }
    6665 
    6666  // We no longer have an empty Allocation.
    6667  if(pCurrBlock->m_Metadata.IsEmpty())
    6668  {
    6669  m_HasEmptyBlock = false;
    6670  }
    6671 
    6672  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6673  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    6674  (*pAllocation)->InitBlockAllocation(
    6675  hCurrentPool,
    6676  pCurrBlock,
    6677  currRequest.offset,
    6678  vkMemReq.alignment,
    6679  vkMemReq.size,
    6680  suballocType,
    6681  mapped,
    6682  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6683  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    6684  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6685  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6686  return VK_SUCCESS;
    6687  }
    6688  }
    6689 
    6690  const bool canCreateNewBlock =
    6691  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    6692  (m_Blocks.size() < m_MaxBlockCount);
    6693 
    6694  // 2. Try to create new block.
    6695  if(canCreateNewBlock)
    6696  {
    6697  // Calculate optimal size for new block.
    6698  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    6699  uint32_t newBlockSizeShift = 0;
    6700  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    6701 
    6702  // Allocating blocks of other sizes is allowed only in default pools.
    6703  // In custom pools block size is fixed.
    6704  if(m_IsCustomPool == false)
    6705  {
    6706  // Allocate 1/8, 1/4, 1/2 as first blocks.
    6707  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    6708  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    6709  {
    6710  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6711  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
    6712  {
    6713  newBlockSize = smallerNewBlockSize;
    6714  ++newBlockSizeShift;
    6715  }
    6716  else
    6717  {
    6718  break;
    6719  }
    6720  }
    6721  }
    6722 
    6723  size_t newBlockIndex = 0;
    6724  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    6725  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    6726  if(m_IsCustomPool == false)
    6727  {
    6728  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    6729  {
    6730  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6731  if(smallerNewBlockSize >= vkMemReq.size)
    6732  {
    6733  newBlockSize = smallerNewBlockSize;
    6734  ++newBlockSizeShift;
    6735  res = CreateBlock(newBlockSize, &newBlockIndex);
    6736  }
    6737  else
    6738  {
    6739  break;
    6740  }
    6741  }
    6742  }
    6743 
    6744  if(res == VK_SUCCESS)
    6745  {
    6746  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    6747  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    6748 
    6749  if(mapped)
    6750  {
    6751  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    6752  if(res != VK_SUCCESS)
    6753  {
    6754  return res;
    6755  }
    6756  }
    6757 
    6758  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    6759  VmaAllocationRequest allocRequest;
    6760  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    6761  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6762  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    6763  (*pAllocation)->InitBlockAllocation(
    6764  hCurrentPool,
    6765  pBlock,
    6766  allocRequest.offset,
    6767  vkMemReq.alignment,
    6768  vkMemReq.size,
    6769  suballocType,
    6770  mapped,
    6771  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6772  VMA_HEAVY_ASSERT(pBlock->Validate());
    6773  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    6774  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6775  return VK_SUCCESS;
    6776  }
    6777  }
    6778 
    6779  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    6780 
    6781  // 3. Try to allocate from existing blocks with making other allocations lost.
    6782  if(canMakeOtherLost)
    6783  {
    6784  uint32_t tryIndex = 0;
    6785  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    6786  {
    6787  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    6788  VmaAllocationRequest bestRequest = {};
    6789  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    6790 
    6791  // 1. Search existing allocations.
    6792  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6793  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6794  {
    6795  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6796  VMA_ASSERT(pCurrBlock);
    6797  VmaAllocationRequest currRequest = {};
    6798  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6799  currentFrameIndex,
    6800  m_FrameInUseCount,
    6801  m_BufferImageGranularity,
    6802  vkMemReq.size,
    6803  vkMemReq.alignment,
    6804  suballocType,
    6805  canMakeOtherLost,
    6806  &currRequest))
    6807  {
    6808  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    6809  if(pBestRequestBlock == VMA_NULL ||
    6810  currRequestCost < bestRequestCost)
    6811  {
    6812  pBestRequestBlock = pCurrBlock;
    6813  bestRequest = currRequest;
    6814  bestRequestCost = currRequestCost;
    6815 
    6816  if(bestRequestCost == 0)
    6817  {
    6818  break;
    6819  }
    6820  }
    6821  }
    6822  }
    6823 
    6824  if(pBestRequestBlock != VMA_NULL)
    6825  {
    6826  if(mapped)
    6827  {
    6828  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    6829  if(res != VK_SUCCESS)
    6830  {
    6831  return res;
    6832  }
    6833  }
    6834 
    6835  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    6836  currentFrameIndex,
    6837  m_FrameInUseCount,
    6838  &bestRequest))
    6839  {
    6840  // We no longer have an empty Allocation.
    6841  if(pBestRequestBlock->m_Metadata.IsEmpty())
    6842  {
    6843  m_HasEmptyBlock = false;
    6844  }
    6845  // Allocate from this pBlock.
    6846  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6847  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    6848  (*pAllocation)->InitBlockAllocation(
    6849  hCurrentPool,
    6850  pBestRequestBlock,
    6851  bestRequest.offset,
    6852  vkMemReq.alignment,
    6853  vkMemReq.size,
    6854  suballocType,
    6855  mapped,
    6856  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6857  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    6858  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6859  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6860  return VK_SUCCESS;
    6861  }
    6862  // else: Some allocations must have been touched while we are here. Next try.
    6863  }
    6864  else
    6865  {
    6866  // Could not find place in any of the blocks - break outer loop.
    6867  break;
    6868  }
    6869  }
    6870  /* Maximum number of tries exceeded - a very unlike event when many other
    6871  threads are simultaneously touching allocations making it impossible to make
    6872  lost at the same time as we try to allocate. */
    6873  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    6874  {
    6875  return VK_ERROR_TOO_MANY_OBJECTS;
    6876  }
    6877  }
    6878 
    6879  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6880 }
    6881 
    6882 void VmaBlockVector::Free(
    6883  VmaAllocation hAllocation)
    6884 {
    6885  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    6886 
    6887  // Scope for lock.
    6888  {
    6889  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6890 
    6891  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    6892 
    6893  if(hAllocation->IsPersistentMap())
    6894  {
    6895  pBlock->Unmap(m_hAllocator, 1);
    6896  }
    6897 
    6898  pBlock->m_Metadata.Free(hAllocation);
    6899  VMA_HEAVY_ASSERT(pBlock->Validate());
    6900 
    6901  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    6902 
    6903  // pBlock became empty after this deallocation.
    6904  if(pBlock->m_Metadata.IsEmpty())
    6905  {
    6906  // Already has empty Allocation. We don't want to have two, so delete this one.
    6907  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    6908  {
    6909  pBlockToDelete = pBlock;
    6910  Remove(pBlock);
    6911  }
    6912  // We now have first empty Allocation.
    6913  else
    6914  {
    6915  m_HasEmptyBlock = true;
    6916  }
    6917  }
    6918  // pBlock didn't become empty, but we have another empty block - find and free that one.
    6919  // (This is optional, heuristics.)
    6920  else if(m_HasEmptyBlock)
    6921  {
    6922  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    6923  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    6924  {
    6925  pBlockToDelete = pLastBlock;
    6926  m_Blocks.pop_back();
    6927  m_HasEmptyBlock = false;
    6928  }
    6929  }
    6930 
    6931  IncrementallySortBlocks();
    6932  }
    6933 
    6934  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    6935  // lock, for performance reason.
    6936  if(pBlockToDelete != VMA_NULL)
    6937  {
    6938  VMA_DEBUG_LOG(" Deleted empty allocation");
    6939  pBlockToDelete->Destroy(m_hAllocator);
    6940  vma_delete(m_hAllocator, pBlockToDelete);
    6941  }
    6942 }
    6943 
    6944 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    6945 {
    6946  VkDeviceSize result = 0;
    6947  for(size_t i = m_Blocks.size(); i--; )
    6948  {
    6949  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
    6950  if(result >= m_PreferredBlockSize)
    6951  {
    6952  break;
    6953  }
    6954  }
    6955  return result;
    6956 }
    6957 
    6958 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    6959 {
    6960  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6961  {
    6962  if(m_Blocks[blockIndex] == pBlock)
    6963  {
    6964  VmaVectorRemove(m_Blocks, blockIndex);
    6965  return;
    6966  }
    6967  }
    6968  VMA_ASSERT(0);
    6969 }
    6970 
    6971 void VmaBlockVector::IncrementallySortBlocks()
    6972 {
    6973  // Bubble sort only until first swap.
    6974  for(size_t i = 1; i < m_Blocks.size(); ++i)
    6975  {
    6976  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    6977  {
    6978  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    6979  return;
    6980  }
    6981  }
    6982 }
    6983 
    6984 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    6985 {
    6986  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6987  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    6988  allocInfo.allocationSize = blockSize;
    6989  VkDeviceMemory mem = VK_NULL_HANDLE;
    6990  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    6991  if(res < 0)
    6992  {
    6993  return res;
    6994  }
    6995 
    6996  // New VkDeviceMemory successfully created.
    6997 
    6998  // Create new Allocation for it.
    6999  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    7000  pBlock->Init(
    7001  m_MemoryTypeIndex,
    7002  mem,
    7003  allocInfo.allocationSize);
    7004 
    7005  m_Blocks.push_back(pBlock);
    7006  if(pNewBlockIndex != VMA_NULL)
    7007  {
    7008  *pNewBlockIndex = m_Blocks.size() - 1;
    7009  }
    7010 
    7011  return VK_SUCCESS;
    7012 }
    7013 
    7014 #if VMA_STATS_STRING_ENABLED
    7015 
    7016 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    7017 {
    7018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7019 
    7020  json.BeginObject();
    7021 
    7022  if(m_IsCustomPool)
    7023  {
    7024  json.WriteString("MemoryTypeIndex");
    7025  json.WriteNumber(m_MemoryTypeIndex);
    7026 
    7027  json.WriteString("BlockSize");
    7028  json.WriteNumber(m_PreferredBlockSize);
    7029 
    7030  json.WriteString("BlockCount");
    7031  json.BeginObject(true);
    7032  if(m_MinBlockCount > 0)
    7033  {
    7034  json.WriteString("Min");
    7035  json.WriteNumber((uint64_t)m_MinBlockCount);
    7036  }
    7037  if(m_MaxBlockCount < SIZE_MAX)
    7038  {
    7039  json.WriteString("Max");
    7040  json.WriteNumber((uint64_t)m_MaxBlockCount);
    7041  }
    7042  json.WriteString("Cur");
    7043  json.WriteNumber((uint64_t)m_Blocks.size());
    7044  json.EndObject();
    7045 
    7046  if(m_FrameInUseCount > 0)
    7047  {
    7048  json.WriteString("FrameInUseCount");
    7049  json.WriteNumber(m_FrameInUseCount);
    7050  }
    7051  }
    7052  else
    7053  {
    7054  json.WriteString("PreferredBlockSize");
    7055  json.WriteNumber(m_PreferredBlockSize);
    7056  }
    7057 
    7058  json.WriteString("Blocks");
    7059  json.BeginArray();
    7060  for(size_t i = 0; i < m_Blocks.size(); ++i)
    7061  {
    7062  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    7063  }
    7064  json.EndArray();
    7065 
    7066  json.EndObject();
    7067 }
    7068 
    7069 #endif // #if VMA_STATS_STRING_ENABLED
    7070 
    7071 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    7072  VmaAllocator hAllocator,
    7073  uint32_t currentFrameIndex)
    7074 {
    7075  if(m_pDefragmentator == VMA_NULL)
    7076  {
    7077  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    7078  hAllocator,
    7079  this,
    7080  currentFrameIndex);
    7081  }
    7082 
    7083  return m_pDefragmentator;
    7084 }
    7085 
    7086 VkResult VmaBlockVector::Defragment(
    7087  VmaDefragmentationStats* pDefragmentationStats,
    7088  VkDeviceSize& maxBytesToMove,
    7089  uint32_t& maxAllocationsToMove)
    7090 {
    7091  if(m_pDefragmentator == VMA_NULL)
    7092  {
    7093  return VK_SUCCESS;
    7094  }
    7095 
    7096  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7097 
    7098  // Defragment.
    7099  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    7100 
    7101  // Accumulate statistics.
    7102  if(pDefragmentationStats != VMA_NULL)
    7103  {
    7104  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    7105  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    7106  pDefragmentationStats->bytesMoved += bytesMoved;
    7107  pDefragmentationStats->allocationsMoved += allocationsMoved;
    7108  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    7109  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    7110  maxBytesToMove -= bytesMoved;
    7111  maxAllocationsToMove -= allocationsMoved;
    7112  }
    7113 
    7114  // Free empty blocks.
    7115  m_HasEmptyBlock = false;
    7116  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    7117  {
    7118  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    7119  if(pBlock->m_Metadata.IsEmpty())
    7120  {
    7121  if(m_Blocks.size() > m_MinBlockCount)
    7122  {
    7123  if(pDefragmentationStats != VMA_NULL)
    7124  {
    7125  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    7126  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    7127  }
    7128 
    7129  VmaVectorRemove(m_Blocks, blockIndex);
    7130  pBlock->Destroy(m_hAllocator);
    7131  vma_delete(m_hAllocator, pBlock);
    7132  }
    7133  else
    7134  {
    7135  m_HasEmptyBlock = true;
    7136  }
    7137  }
    7138  }
    7139 
    7140  return result;
    7141 }
    7142 
    7143 void VmaBlockVector::DestroyDefragmentator()
    7144 {
    7145  if(m_pDefragmentator != VMA_NULL)
    7146  {
    7147  vma_delete(m_hAllocator, m_pDefragmentator);
    7148  m_pDefragmentator = VMA_NULL;
    7149  }
    7150 }
    7151 
    7152 void VmaBlockVector::MakePoolAllocationsLost(
    7153  uint32_t currentFrameIndex,
    7154  size_t* pLostAllocationCount)
    7155 {
    7156  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7157  size_t lostAllocationCount = 0;
    7158  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    7159  {
    7160  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    7161  VMA_ASSERT(pBlock);
    7162  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    7163  }
    7164  if(pLostAllocationCount != VMA_NULL)
    7165  {
    7166  *pLostAllocationCount = lostAllocationCount;
    7167  }
    7168 }
    7169 
    7170 void VmaBlockVector::AddStats(VmaStats* pStats)
    7171 {
    7172  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    7173  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    7174 
    7175  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7176 
    7177  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    7178  {
    7179  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    7180  VMA_ASSERT(pBlock);
    7181  VMA_HEAVY_ASSERT(pBlock->Validate());
    7182  VmaStatInfo allocationStatInfo;
    7183  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    7184  VmaAddStatInfo(pStats->total, allocationStatInfo);
    7185  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    7186  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    7187  }
    7188 }
    7189 
    7191 // VmaDefragmentator members definition
    7192 
    7193 VmaDefragmentator::VmaDefragmentator(
    7194  VmaAllocator hAllocator,
    7195  VmaBlockVector* pBlockVector,
    7196  uint32_t currentFrameIndex) :
    7197  m_hAllocator(hAllocator),
    7198  m_pBlockVector(pBlockVector),
    7199  m_CurrentFrameIndex(currentFrameIndex),
    7200  m_BytesMoved(0),
    7201  m_AllocationsMoved(0),
    7202  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    7203  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    7204 {
    7205 }
    7206 
    7207 VmaDefragmentator::~VmaDefragmentator()
    7208 {
    7209  for(size_t i = m_Blocks.size(); i--; )
    7210  {
    7211  vma_delete(m_hAllocator, m_Blocks[i]);
    7212  }
    7213 }
    7214 
    7215 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    7216 {
    7217  AllocationInfo allocInfo;
    7218  allocInfo.m_hAllocation = hAlloc;
    7219  allocInfo.m_pChanged = pChanged;
    7220  m_Allocations.push_back(allocInfo);
    7221 }
    7222 
    7223 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    7224 {
    7225  // It has already been mapped for defragmentation.
    7226  if(m_pMappedDataForDefragmentation)
    7227  {
    7228  *ppMappedData = m_pMappedDataForDefragmentation;
    7229  return VK_SUCCESS;
    7230  }
    7231 
    7232  // It is originally mapped.
    7233  if(m_pBlock->GetMappedData())
    7234  {
    7235  *ppMappedData = m_pBlock->GetMappedData();
    7236  return VK_SUCCESS;
    7237  }
    7238 
    7239  // Map on first usage.
    7240  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    7241  *ppMappedData = m_pMappedDataForDefragmentation;
    7242  return res;
    7243 }
    7244 
    7245 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    7246 {
    7247  if(m_pMappedDataForDefragmentation != VMA_NULL)
    7248  {
    7249  m_pBlock->Unmap(hAllocator, 1);
    7250  }
    7251 }
    7252 
    7253 VkResult VmaDefragmentator::DefragmentRound(
    7254  VkDeviceSize maxBytesToMove,
    7255  uint32_t maxAllocationsToMove)
    7256 {
    7257  if(m_Blocks.empty())
    7258  {
    7259  return VK_SUCCESS;
    7260  }
    7261 
    7262  size_t srcBlockIndex = m_Blocks.size() - 1;
    7263  size_t srcAllocIndex = SIZE_MAX;
    7264  for(;;)
    7265  {
    7266  // 1. Find next allocation to move.
    7267  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    7268  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    7269  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    7270  {
    7271  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    7272  {
    7273  // Finished: no more allocations to process.
    7274  if(srcBlockIndex == 0)
    7275  {
    7276  return VK_SUCCESS;
    7277  }
    7278  else
    7279  {
    7280  --srcBlockIndex;
    7281  srcAllocIndex = SIZE_MAX;
    7282  }
    7283  }
    7284  else
    7285  {
    7286  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    7287  }
    7288  }
    7289 
    7290  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    7291  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    7292 
    7293  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    7294  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    7295  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    7296  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    7297 
    7298  // 2. Try to find new place for this allocation in preceding or current block.
    7299  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    7300  {
    7301  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    7302  VmaAllocationRequest dstAllocRequest;
    7303  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    7304  m_CurrentFrameIndex,
    7305  m_pBlockVector->GetFrameInUseCount(),
    7306  m_pBlockVector->GetBufferImageGranularity(),
    7307  size,
    7308  alignment,
    7309  suballocType,
    7310  false, // canMakeOtherLost
    7311  &dstAllocRequest) &&
    7312  MoveMakesSense(
    7313  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    7314  {
    7315  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    7316 
    7317  // Reached limit on number of allocations or bytes to move.
    7318  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    7319  (m_BytesMoved + size > maxBytesToMove))
    7320  {
    7321  return VK_INCOMPLETE;
    7322  }
    7323 
    7324  void* pDstMappedData = VMA_NULL;
    7325  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    7326  if(res != VK_SUCCESS)
    7327  {
    7328  return res;
    7329  }
    7330 
    7331  void* pSrcMappedData = VMA_NULL;
    7332  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    7333  if(res != VK_SUCCESS)
    7334  {
    7335  return res;
    7336  }
    7337 
    7338  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    7339  memcpy(
    7340  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    7341  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    7342  static_cast<size_t>(size));
    7343 
    7344  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    7345  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
    7346 
    7347  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    7348 
    7349  if(allocInfo.m_pChanged != VMA_NULL)
    7350  {
    7351  *allocInfo.m_pChanged = VK_TRUE;
    7352  }
    7353 
    7354  ++m_AllocationsMoved;
    7355  m_BytesMoved += size;
    7356 
    7357  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    7358 
    7359  break;
    7360  }
    7361  }
    7362 
    7363  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    7364 
    7365  if(srcAllocIndex > 0)
    7366  {
    7367  --srcAllocIndex;
    7368  }
    7369  else
    7370  {
    7371  if(srcBlockIndex > 0)
    7372  {
    7373  --srcBlockIndex;
    7374  srcAllocIndex = SIZE_MAX;
    7375  }
    7376  else
    7377  {
    7378  return VK_SUCCESS;
    7379  }
    7380  }
    7381  }
    7382 }
    7383 
    7384 VkResult VmaDefragmentator::Defragment(
    7385  VkDeviceSize maxBytesToMove,
    7386  uint32_t maxAllocationsToMove)
    7387 {
    7388  if(m_Allocations.empty())
    7389  {
    7390  return VK_SUCCESS;
    7391  }
    7392 
    7393  // Create block info for each block.
    7394  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    7395  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7396  {
    7397  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    7398  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    7399  m_Blocks.push_back(pBlockInfo);
    7400  }
    7401 
    7402  // Sort them by m_pBlock pointer value.
    7403  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    7404 
    7405  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    7406  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    7407  {
    7408  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    7409  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    7410  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    7411  {
    7412  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    7413  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    7414  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    7415  {
    7416  (*it)->m_Allocations.push_back(allocInfo);
    7417  }
    7418  else
    7419  {
    7420  VMA_ASSERT(0);
    7421  }
    7422  }
    7423  }
    7424  m_Allocations.clear();
    7425 
    7426  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7427  {
    7428  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    7429  pBlockInfo->CalcHasNonMovableAllocations();
    7430  pBlockInfo->SortAllocationsBySizeDescecnding();
    7431  }
    7432 
    7433  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    7434  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    7435 
    7436  // Execute defragmentation rounds (the main part).
    7437  VkResult result = VK_SUCCESS;
    7438  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    7439  {
    7440  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    7441  }
    7442 
    7443  // Unmap blocks that were mapped for defragmentation.
    7444  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7445  {
    7446  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    7447  }
    7448 
    7449  return result;
    7450 }
    7451 
    7452 bool VmaDefragmentator::MoveMakesSense(
    7453  size_t dstBlockIndex, VkDeviceSize dstOffset,
    7454  size_t srcBlockIndex, VkDeviceSize srcOffset)
    7455 {
    7456  if(dstBlockIndex < srcBlockIndex)
    7457  {
    7458  return true;
    7459  }
    7460  if(dstBlockIndex > srcBlockIndex)
    7461  {
    7462  return false;
    7463  }
    7464  if(dstOffset < srcOffset)
    7465  {
    7466  return true;
    7467  }
    7468  return false;
    7469 }
    7470 
    7472 // VmaAllocator_T
    7473 
    7474 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    7475  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    7476  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    7477  m_hDevice(pCreateInfo->device),
    7478  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    7479  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    7480  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    7481  m_PreferredLargeHeapBlockSize(0),
    7482  m_PhysicalDevice(pCreateInfo->physicalDevice),
    7483  m_CurrentFrameIndex(0),
    7484  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    7485 {
    7486  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    7487 
    7488 #if !(VMA_DEDICATED_ALLOCATION)
    7490  {
    7491  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    7492  }
    7493 #endif
    7494 
    7495  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    7496  memset(&m_MemProps, 0, sizeof(m_MemProps));
    7497  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    7498 
    7499  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    7500  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    7501 
    7502  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7503  {
    7504  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    7505  }
    7506 
    7507  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    7508  {
    7509  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    7510  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    7511  }
    7512 
    7513  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    7514 
    7515  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    7516  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    7517 
    7518  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    7519  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    7520 
    7521  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    7522  {
    7523  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    7524  {
    7525  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    7526  if(limit != VK_WHOLE_SIZE)
    7527  {
    7528  m_HeapSizeLimit[heapIndex] = limit;
    7529  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    7530  {
    7531  m_MemProps.memoryHeaps[heapIndex].size = limit;
    7532  }
    7533  }
    7534  }
    7535  }
    7536 
    7537  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7538  {
    7539  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    7540 
    7541  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    7542  this,
    7543  memTypeIndex,
    7544  preferredBlockSize,
    7545  0,
    7546  SIZE_MAX,
    7547  GetBufferImageGranularity(),
    7548  pCreateInfo->frameInUseCount,
    7549  false); // isCustomPool
    7550  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    7551  // becase minBlockCount is 0.
    7552  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    7553  }
    7554 }
    7555 
    7556 VmaAllocator_T::~VmaAllocator_T()
    7557 {
    7558  VMA_ASSERT(m_Pools.empty());
    7559 
    7560  for(size_t i = GetMemoryTypeCount(); i--; )
    7561  {
    7562  vma_delete(this, m_pDedicatedAllocations[i]);
    7563  vma_delete(this, m_pBlockVectors[i]);
    7564  }
    7565 }
    7566 
    7567 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    7568 {
    7569 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7570  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    7571  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    7572  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    7573  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    7574  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    7575  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    7576  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    7577  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    7578  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    7579  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    7580  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    7581  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    7582  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    7583  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    7584 #if VMA_DEDICATED_ALLOCATION
    7585  if(m_UseKhrDedicatedAllocation)
    7586  {
    7587  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    7588  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    7589  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    7590  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    7591  }
    7592 #endif // #if VMA_DEDICATED_ALLOCATION
    7593 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7594 
    7595 #define VMA_COPY_IF_NOT_NULL(funcName) \
    7596  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    7597 
    7598  if(pVulkanFunctions != VMA_NULL)
    7599  {
    7600  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    7601  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    7602  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    7603  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    7604  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    7605  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    7606  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    7607  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    7608  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    7609  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    7610  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    7611  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    7612  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    7613  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    7614 #if VMA_DEDICATED_ALLOCATION
    7615  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    7616  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    7617 #endif
    7618  }
    7619 
    7620 #undef VMA_COPY_IF_NOT_NULL
    7621 
    7622  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    7623  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    7624  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    7625  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    7626  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    7627  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    7628  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    7629  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    7630  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    7631  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    7632  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    7633  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    7634  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    7635  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    7636  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    7637  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    7638 #if VMA_DEDICATED_ALLOCATION
    7639  if(m_UseKhrDedicatedAllocation)
    7640  {
    7641  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    7642  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    7643  }
    7644 #endif
    7645 }
    7646 
    7647 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    7648 {
    7649  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7650  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    7651  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    7652  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    7653 }
    7654 
    7655 VkResult VmaAllocator_T::AllocateMemoryOfType(
    7656  const VkMemoryRequirements& vkMemReq,
    7657  bool dedicatedAllocation,
    7658  VkBuffer dedicatedBuffer,
    7659  VkImage dedicatedImage,
    7660  const VmaAllocationCreateInfo& createInfo,
    7661  uint32_t memTypeIndex,
    7662  VmaSuballocationType suballocType,
    7663  VmaAllocation* pAllocation)
    7664 {
    7665  VMA_ASSERT(pAllocation != VMA_NULL);
    7666  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    7667 
    7668  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    7669 
    7670  // If memory type is not HOST_VISIBLE, disable MAPPED.
    7671  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7672  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    7673  {
    7674  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    7675  }
    7676 
    7677  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    7678  VMA_ASSERT(blockVector);
    7679 
    7680  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    7681  bool preferDedicatedMemory =
    7682  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    7683  dedicatedAllocation ||
    7684  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    7685  vkMemReq.size > preferredBlockSize / 2;
    7686 
    7687  if(preferDedicatedMemory &&
    7688  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    7689  finalCreateInfo.pool == VK_NULL_HANDLE)
    7690  {
    7692  }
    7693 
    7694  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    7695  {
    7696  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7697  {
    7698  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7699  }
    7700  else
    7701  {
    7702  return AllocateDedicatedMemory(
    7703  vkMemReq.size,
    7704  suballocType,
    7705  memTypeIndex,
    7706  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7707  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7708  finalCreateInfo.pUserData,
    7709  dedicatedBuffer,
    7710  dedicatedImage,
    7711  pAllocation);
    7712  }
    7713  }
    7714  else
    7715  {
    7716  VkResult res = blockVector->Allocate(
    7717  VK_NULL_HANDLE, // hCurrentPool
    7718  m_CurrentFrameIndex.load(),
    7719  vkMemReq,
    7720  finalCreateInfo,
    7721  suballocType,
    7722  pAllocation);
    7723  if(res == VK_SUCCESS)
    7724  {
    7725  return res;
    7726  }
    7727 
    7728  // 5. Try dedicated memory.
    7729  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7730  {
    7731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7732  }
    7733  else
    7734  {
    7735  res = AllocateDedicatedMemory(
    7736  vkMemReq.size,
    7737  suballocType,
    7738  memTypeIndex,
    7739  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7740  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7741  finalCreateInfo.pUserData,
    7742  dedicatedBuffer,
    7743  dedicatedImage,
    7744  pAllocation);
    7745  if(res == VK_SUCCESS)
    7746  {
    7747  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    7748  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    7749  return VK_SUCCESS;
    7750  }
    7751  else
    7752  {
    7753  // Everything failed: Return error code.
    7754  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7755  return res;
    7756  }
    7757  }
    7758  }
    7759 }
    7760 
    7761 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    7762  VkDeviceSize size,
    7763  VmaSuballocationType suballocType,
    7764  uint32_t memTypeIndex,
    7765  bool map,
    7766  bool isUserDataString,
    7767  void* pUserData,
    7768  VkBuffer dedicatedBuffer,
    7769  VkImage dedicatedImage,
    7770  VmaAllocation* pAllocation)
    7771 {
    7772  VMA_ASSERT(pAllocation);
    7773 
    7774  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    7775  allocInfo.memoryTypeIndex = memTypeIndex;
    7776  allocInfo.allocationSize = size;
    7777 
    7778 #if VMA_DEDICATED_ALLOCATION
    7779  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    7780  if(m_UseKhrDedicatedAllocation)
    7781  {
    7782  if(dedicatedBuffer != VK_NULL_HANDLE)
    7783  {
    7784  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    7785  dedicatedAllocInfo.buffer = dedicatedBuffer;
    7786  allocInfo.pNext = &dedicatedAllocInfo;
    7787  }
    7788  else if(dedicatedImage != VK_NULL_HANDLE)
    7789  {
    7790  dedicatedAllocInfo.image = dedicatedImage;
    7791  allocInfo.pNext = &dedicatedAllocInfo;
    7792  }
    7793  }
    7794 #endif // #if VMA_DEDICATED_ALLOCATION
    7795 
    7796  // Allocate VkDeviceMemory.
    7797  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    7798  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    7799  if(res < 0)
    7800  {
    7801  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7802  return res;
    7803  }
    7804 
    7805  void* pMappedData = VMA_NULL;
    7806  if(map)
    7807  {
    7808  res = (*m_VulkanFunctions.vkMapMemory)(
    7809  m_hDevice,
    7810  hMemory,
    7811  0,
    7812  VK_WHOLE_SIZE,
    7813  0,
    7814  &pMappedData);
    7815  if(res < 0)
    7816  {
    7817  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    7818  FreeVulkanMemory(memTypeIndex, size, hMemory);
    7819  return res;
    7820  }
    7821  }
    7822 
    7823  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    7824  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    7825  (*pAllocation)->SetUserData(this, pUserData);
    7826 
    7827  // Register it in m_pDedicatedAllocations.
    7828  {
    7829  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7830  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    7831  VMA_ASSERT(pDedicatedAllocations);
    7832  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    7833  }
    7834 
    7835  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    7836 
    7837  return VK_SUCCESS;
    7838 }
    7839 
    7840 void VmaAllocator_T::GetBufferMemoryRequirements(
    7841  VkBuffer hBuffer,
    7842  VkMemoryRequirements& memReq,
    7843  bool& requiresDedicatedAllocation,
    7844  bool& prefersDedicatedAllocation) const
    7845 {
    7846 #if VMA_DEDICATED_ALLOCATION
    7847  if(m_UseKhrDedicatedAllocation)
    7848  {
    7849  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7850  memReqInfo.buffer = hBuffer;
    7851 
    7852  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7853 
    7854  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7855  memReq2.pNext = &memDedicatedReq;
    7856 
    7857  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7858 
    7859  memReq = memReq2.memoryRequirements;
    7860  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7861  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7862  }
    7863  else
    7864 #endif // #if VMA_DEDICATED_ALLOCATION
    7865  {
    7866  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    7867  requiresDedicatedAllocation = false;
    7868  prefersDedicatedAllocation = false;
    7869  }
    7870 }
    7871 
    7872 void VmaAllocator_T::GetImageMemoryRequirements(
    7873  VkImage hImage,
    7874  VkMemoryRequirements& memReq,
    7875  bool& requiresDedicatedAllocation,
    7876  bool& prefersDedicatedAllocation) const
    7877 {
    7878 #if VMA_DEDICATED_ALLOCATION
    7879  if(m_UseKhrDedicatedAllocation)
    7880  {
    7881  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7882  memReqInfo.image = hImage;
    7883 
    7884  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7885 
    7886  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7887  memReq2.pNext = &memDedicatedReq;
    7888 
    7889  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7890 
    7891  memReq = memReq2.memoryRequirements;
    7892  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7893  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7894  }
    7895  else
    7896 #endif // #if VMA_DEDICATED_ALLOCATION
    7897  {
    7898  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    7899  requiresDedicatedAllocation = false;
    7900  prefersDedicatedAllocation = false;
    7901  }
    7902 }
    7903 
    7904 VkResult VmaAllocator_T::AllocateMemory(
    7905  const VkMemoryRequirements& vkMemReq,
    7906  bool requiresDedicatedAllocation,
    7907  bool prefersDedicatedAllocation,
    7908  VkBuffer dedicatedBuffer,
    7909  VkImage dedicatedImage,
    7910  const VmaAllocationCreateInfo& createInfo,
    7911  VmaSuballocationType suballocType,
    7912  VmaAllocation* pAllocation)
    7913 {
    7914  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    7915  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7916  {
    7917  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    7918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7919  }
    7920  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7922  {
    7923  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    7924  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7925  }
    7926  if(requiresDedicatedAllocation)
    7927  {
    7928  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7929  {
    7930  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    7931  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7932  }
    7933  if(createInfo.pool != VK_NULL_HANDLE)
    7934  {
    7935  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    7936  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7937  }
    7938  }
    7939  if((createInfo.pool != VK_NULL_HANDLE) &&
    7940  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    7941  {
    7942  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    7943  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7944  }
    7945 
    7946  if(createInfo.pool != VK_NULL_HANDLE)
    7947  {
    7948  return createInfo.pool->m_BlockVector.Allocate(
    7949  createInfo.pool,
    7950  m_CurrentFrameIndex.load(),
    7951  vkMemReq,
    7952  createInfo,
    7953  suballocType,
    7954  pAllocation);
    7955  }
    7956  else
    7957  {
    7958  // Bit mask of memory Vulkan types acceptable for this allocation.
    7959  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    7960  uint32_t memTypeIndex = UINT32_MAX;
    7961  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7962  if(res == VK_SUCCESS)
    7963  {
    7964  res = AllocateMemoryOfType(
    7965  vkMemReq,
    7966  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7967  dedicatedBuffer,
    7968  dedicatedImage,
    7969  createInfo,
    7970  memTypeIndex,
    7971  suballocType,
    7972  pAllocation);
    7973  // Succeeded on first try.
    7974  if(res == VK_SUCCESS)
    7975  {
    7976  return res;
    7977  }
    7978  // Allocation from this memory type failed. Try other compatible memory types.
    7979  else
    7980  {
    7981  for(;;)
    7982  {
    7983  // Remove old memTypeIndex from list of possibilities.
    7984  memoryTypeBits &= ~(1u << memTypeIndex);
    7985  // Find alternative memTypeIndex.
    7986  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7987  if(res == VK_SUCCESS)
    7988  {
    7989  res = AllocateMemoryOfType(
    7990  vkMemReq,
    7991  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7992  dedicatedBuffer,
    7993  dedicatedImage,
    7994  createInfo,
    7995  memTypeIndex,
    7996  suballocType,
    7997  pAllocation);
    7998  // Allocation from this alternative memory type succeeded.
    7999  if(res == VK_SUCCESS)
    8000  {
    8001  return res;
    8002  }
    8003  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    8004  }
    8005  // No other matching memory type index could be found.
    8006  else
    8007  {
    8008  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    8009  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8010  }
    8011  }
    8012  }
    8013  }
    8014  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    8015  else
    8016  return res;
    8017  }
    8018 }
    8019 
    8020 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    8021 {
    8022  VMA_ASSERT(allocation);
    8023 
    8024  if(allocation->CanBecomeLost() == false ||
    8025  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    8026  {
    8027  switch(allocation->GetType())
    8028  {
    8029  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8030  {
    8031  VmaBlockVector* pBlockVector = VMA_NULL;
    8032  VmaPool hPool = allocation->GetPool();
    8033  if(hPool != VK_NULL_HANDLE)
    8034  {
    8035  pBlockVector = &hPool->m_BlockVector;
    8036  }
    8037  else
    8038  {
    8039  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8040  pBlockVector = m_pBlockVectors[memTypeIndex];
    8041  }
    8042  pBlockVector->Free(allocation);
    8043  }
    8044  break;
    8045  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8046  FreeDedicatedMemory(allocation);
    8047  break;
    8048  default:
    8049  VMA_ASSERT(0);
    8050  }
    8051  }
    8052 
    8053  allocation->SetUserData(this, VMA_NULL);
    8054  vma_delete(this, allocation);
    8055 }
    8056 
    8057 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    8058 {
    8059  // Initialize.
    8060  InitStatInfo(pStats->total);
    8061  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    8062  InitStatInfo(pStats->memoryType[i]);
    8063  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    8064  InitStatInfo(pStats->memoryHeap[i]);
    8065 
    8066  // Process default pools.
    8067  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8068  {
    8069  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    8070  VMA_ASSERT(pBlockVector);
    8071  pBlockVector->AddStats(pStats);
    8072  }
    8073 
    8074  // Process custom pools.
    8075  {
    8076  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8077  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    8078  {
    8079  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    8080  }
    8081  }
    8082 
    8083  // Process dedicated allocations.
    8084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8085  {
    8086  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    8087  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8088  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8089  VMA_ASSERT(pDedicatedAllocVector);
    8090  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    8091  {
    8092  VmaStatInfo allocationStatInfo;
    8093  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    8094  VmaAddStatInfo(pStats->total, allocationStatInfo);
    8095  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    8096  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    8097  }
    8098  }
    8099 
    8100  // Postprocess.
    8101  VmaPostprocessCalcStatInfo(pStats->total);
    8102  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    8103  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    8104  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    8105  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    8106 }
    8107 
    8108 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    8109 
    8110 VkResult VmaAllocator_T::Defragment(
    8111  VmaAllocation* pAllocations,
    8112  size_t allocationCount,
    8113  VkBool32* pAllocationsChanged,
    8114  const VmaDefragmentationInfo* pDefragmentationInfo,
    8115  VmaDefragmentationStats* pDefragmentationStats)
    8116 {
    8117  if(pAllocationsChanged != VMA_NULL)
    8118  {
    8119  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    8120  }
    8121  if(pDefragmentationStats != VMA_NULL)
    8122  {
    8123  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    8124  }
    8125 
    8126  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    8127 
    8128  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    8129 
    8130  const size_t poolCount = m_Pools.size();
    8131 
    8132  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    8133  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    8134  {
    8135  VmaAllocation hAlloc = pAllocations[allocIndex];
    8136  VMA_ASSERT(hAlloc);
    8137  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    8138  // DedicatedAlloc cannot be defragmented.
    8139  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    8140  // Only HOST_VISIBLE memory types can be defragmented.
    8141  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    8142  // Lost allocation cannot be defragmented.
    8143  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    8144  {
    8145  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    8146 
    8147  const VmaPool hAllocPool = hAlloc->GetPool();
    8148  // This allocation belongs to custom pool.
    8149  if(hAllocPool != VK_NULL_HANDLE)
    8150  {
    8151  pAllocBlockVector = &hAllocPool->GetBlockVector();
    8152  }
    8153  // This allocation belongs to general pool.
    8154  else
    8155  {
    8156  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    8157  }
    8158 
    8159  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    8160 
    8161  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    8162  &pAllocationsChanged[allocIndex] : VMA_NULL;
    8163  pDefragmentator->AddAllocation(hAlloc, pChanged);
    8164  }
    8165  }
    8166 
    8167  VkResult result = VK_SUCCESS;
    8168 
    8169  // ======== Main processing.
    8170 
    8171  VkDeviceSize maxBytesToMove = SIZE_MAX;
    8172  uint32_t maxAllocationsToMove = UINT32_MAX;
    8173  if(pDefragmentationInfo != VMA_NULL)
    8174  {
    8175  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    8176  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    8177  }
    8178 
    8179  // Process standard memory.
    8180  for(uint32_t memTypeIndex = 0;
    8181  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    8182  ++memTypeIndex)
    8183  {
    8184  // Only HOST_VISIBLE memory types can be defragmented.
    8185  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8186  {
    8187  result = m_pBlockVectors[memTypeIndex]->Defragment(
    8188  pDefragmentationStats,
    8189  maxBytesToMove,
    8190  maxAllocationsToMove);
    8191  }
    8192  }
    8193 
    8194  // Process custom pools.
    8195  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    8196  {
    8197  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    8198  pDefragmentationStats,
    8199  maxBytesToMove,
    8200  maxAllocationsToMove);
    8201  }
    8202 
    8203  // ======== Destroy defragmentators.
    8204 
    8205  // Process custom pools.
    8206  for(size_t poolIndex = poolCount; poolIndex--; )
    8207  {
    8208  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    8209  }
    8210 
    8211  // Process standard memory.
    8212  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    8213  {
    8214  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8215  {
    8216  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    8217  }
    8218  }
    8219 
    8220  return result;
    8221 }
    8222 
    8223 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    8224 {
    8225  if(hAllocation->CanBecomeLost())
    8226  {
    8227  /*
    8228  Warning: This is a carefully designed algorithm.
    8229  Do not modify unless you really know what you're doing :)
    8230  */
    8231  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8232  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8233  for(;;)
    8234  {
    8235  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    8236  {
    8237  pAllocationInfo->memoryType = UINT32_MAX;
    8238  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    8239  pAllocationInfo->offset = 0;
    8240  pAllocationInfo->size = hAllocation->GetSize();
    8241  pAllocationInfo->pMappedData = VMA_NULL;
    8242  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8243  return;
    8244  }
    8245  else if(localLastUseFrameIndex == localCurrFrameIndex)
    8246  {
    8247  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    8248  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    8249  pAllocationInfo->offset = hAllocation->GetOffset();
    8250  pAllocationInfo->size = hAllocation->GetSize();
    8251  pAllocationInfo->pMappedData = VMA_NULL;
    8252  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8253  return;
    8254  }
    8255  else // Last use time earlier than current time.
    8256  {
    8257  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8258  {
    8259  localLastUseFrameIndex = localCurrFrameIndex;
    8260  }
    8261  }
    8262  }
    8263  }
    8264  else
    8265  {
    8266 #if VMA_STATS_STRING_ENABLED
    8267  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8268  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8269  for(;;)
    8270  {
    8271  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    8272  if(localLastUseFrameIndex == localCurrFrameIndex)
    8273  {
    8274  break;
    8275  }
    8276  else // Last use time earlier than current time.
    8277  {
    8278  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8279  {
    8280  localLastUseFrameIndex = localCurrFrameIndex;
    8281  }
    8282  }
    8283  }
    8284 #endif
    8285 
    8286  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    8287  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    8288  pAllocationInfo->offset = hAllocation->GetOffset();
    8289  pAllocationInfo->size = hAllocation->GetSize();
    8290  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    8291  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8292  }
    8293 }
    8294 
    8295 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    8296 {
    8297  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    8298  if(hAllocation->CanBecomeLost())
    8299  {
    8300  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8301  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8302  for(;;)
    8303  {
    8304  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    8305  {
    8306  return false;
    8307  }
    8308  else if(localLastUseFrameIndex == localCurrFrameIndex)
    8309  {
    8310  return true;
    8311  }
    8312  else // Last use time earlier than current time.
    8313  {
    8314  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8315  {
    8316  localLastUseFrameIndex = localCurrFrameIndex;
    8317  }
    8318  }
    8319  }
    8320  }
    8321  else
    8322  {
    8323 #if VMA_STATS_STRING_ENABLED
    8324  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8325  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8326  for(;;)
    8327  {
    8328  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    8329  if(localLastUseFrameIndex == localCurrFrameIndex)
    8330  {
    8331  break;
    8332  }
    8333  else // Last use time earlier than current time.
    8334  {
    8335  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8336  {
    8337  localLastUseFrameIndex = localCurrFrameIndex;
    8338  }
    8339  }
    8340  }
    8341 #endif
    8342 
    8343  return true;
    8344  }
    8345 }
    8346 
    8347 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    8348 {
    8349  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    8350 
    8351  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    8352 
    8353  if(newCreateInfo.maxBlockCount == 0)
    8354  {
    8355  newCreateInfo.maxBlockCount = SIZE_MAX;
    8356  }
    8357  if(newCreateInfo.blockSize == 0)
    8358  {
    8359  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    8360  }
    8361 
    8362  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    8363 
    8364  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    8365  if(res != VK_SUCCESS)
    8366  {
    8367  vma_delete(this, *pPool);
    8368  *pPool = VMA_NULL;
    8369  return res;
    8370  }
    8371 
    8372  // Add to m_Pools.
    8373  {
    8374  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8375  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    8376  }
    8377 
    8378  return VK_SUCCESS;
    8379 }
    8380 
    8381 void VmaAllocator_T::DestroyPool(VmaPool pool)
    8382 {
    8383  // Remove from m_Pools.
    8384  {
    8385  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8386  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    8387  VMA_ASSERT(success && "Pool not found in Allocator.");
    8388  }
    8389 
    8390  vma_delete(this, pool);
    8391 }
    8392 
    8393 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    8394 {
    8395  pool->m_BlockVector.GetPoolStats(pPoolStats);
    8396 }
    8397 
    8398 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    8399 {
    8400  m_CurrentFrameIndex.store(frameIndex);
    8401 }
    8402 
    8403 void VmaAllocator_T::MakePoolAllocationsLost(
    8404  VmaPool hPool,
    8405  size_t* pLostAllocationCount)
    8406 {
    8407  hPool->m_BlockVector.MakePoolAllocationsLost(
    8408  m_CurrentFrameIndex.load(),
    8409  pLostAllocationCount);
    8410 }
    8411 
    8412 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    8413 {
    8414  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    8415  (*pAllocation)->InitLost();
    8416 }
    8417 
    8418 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    8419 {
    8420  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    8421 
    8422  VkResult res;
    8423  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    8424  {
    8425  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    8426  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    8427  {
    8428  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    8429  if(res == VK_SUCCESS)
    8430  {
    8431  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    8432  }
    8433  }
    8434  else
    8435  {
    8436  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8437  }
    8438  }
    8439  else
    8440  {
    8441  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    8442  }
    8443 
    8444  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    8445  {
    8446  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    8447  }
    8448 
    8449  return res;
    8450 }
    8451 
    8452 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    8453 {
    8454  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    8455  {
    8456  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    8457  }
    8458 
    8459  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    8460 
    8461  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    8462  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    8463  {
    8464  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    8465  m_HeapSizeLimit[heapIndex] += size;
    8466  }
    8467 }
    8468 
    8469 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    8470 {
    8471  if(hAllocation->CanBecomeLost())
    8472  {
    8473  return VK_ERROR_MEMORY_MAP_FAILED;
    8474  }
    8475 
    8476  switch(hAllocation->GetType())
    8477  {
    8478  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8479  {
    8480  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    8481  char *pBytes = VMA_NULL;
    8482  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    8483  if(res == VK_SUCCESS)
    8484  {
    8485  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    8486  hAllocation->BlockAllocMap();
    8487  }
    8488  return res;
    8489  }
    8490  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8491  return hAllocation->DedicatedAllocMap(this, ppData);
    8492  default:
    8493  VMA_ASSERT(0);
    8494  return VK_ERROR_MEMORY_MAP_FAILED;
    8495  }
    8496 }
    8497 
    8498 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    8499 {
    8500  switch(hAllocation->GetType())
    8501  {
    8502  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8503  {
    8504  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    8505  hAllocation->BlockAllocUnmap();
    8506  pBlock->Unmap(this, 1);
    8507  }
    8508  break;
    8509  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8510  hAllocation->DedicatedAllocUnmap(this);
    8511  break;
    8512  default:
    8513  VMA_ASSERT(0);
    8514  }
    8515 }
    8516 
    8517 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    8518 {
    8519  VkResult res = VK_SUCCESS;
    8520  switch(hAllocation->GetType())
    8521  {
    8522  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8523  res = GetVulkanFunctions().vkBindBufferMemory(
    8524  m_hDevice,
    8525  hBuffer,
    8526  hAllocation->GetMemory(),
    8527  0); //memoryOffset
    8528  break;
    8529  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8530  {
    8531  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    8532  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    8533  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    8534  break;
    8535  }
    8536  default:
    8537  VMA_ASSERT(0);
    8538  }
    8539  return res;
    8540 }
    8541 
    8542 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    8543 {
    8544  VkResult res = VK_SUCCESS;
    8545  switch(hAllocation->GetType())
    8546  {
    8547  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8548  res = GetVulkanFunctions().vkBindImageMemory(
    8549  m_hDevice,
    8550  hImage,
    8551  hAllocation->GetMemory(),
    8552  0); //memoryOffset
    8553  break;
    8554  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8555  {
    8556  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    8557  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    8558  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    8559  break;
    8560  }
    8561  default:
    8562  VMA_ASSERT(0);
    8563  }
    8564  return res;
    8565 }
    8566 
    8567 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    8568 {
    8569  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    8570 
    8571  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8572  {
    8573  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8574  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    8575  VMA_ASSERT(pDedicatedAllocations);
    8576  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    8577  VMA_ASSERT(success);
    8578  }
    8579 
    8580  VkDeviceMemory hMemory = allocation->GetMemory();
    8581 
    8582  if(allocation->GetMappedData() != VMA_NULL)
    8583  {
    8584  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    8585  }
    8586 
    8587  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    8588 
    8589  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    8590 }
    8591 
    8592 #if VMA_STATS_STRING_ENABLED
    8593 
    8594 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    8595 {
    8596  bool dedicatedAllocationsStarted = false;
    8597  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8598  {
    8599  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8600  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8601  VMA_ASSERT(pDedicatedAllocVector);
    8602  if(pDedicatedAllocVector->empty() == false)
    8603  {
    8604  if(dedicatedAllocationsStarted == false)
    8605  {
    8606  dedicatedAllocationsStarted = true;
    8607  json.WriteString("DedicatedAllocations");
    8608  json.BeginObject();
    8609  }
    8610 
    8611  json.BeginString("Type ");
    8612  json.ContinueString(memTypeIndex);
    8613  json.EndString();
    8614 
    8615  json.BeginArray();
    8616 
    8617  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    8618  {
    8619  json.BeginObject(true);
    8620  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    8621  hAlloc->PrintParameters(json);
    8622  json.EndObject();
    8623  }
    8624 
    8625  json.EndArray();
    8626  }
    8627  }
    8628  if(dedicatedAllocationsStarted)
    8629  {
    8630  json.EndObject();
    8631  }
    8632 
    8633  {
    8634  bool allocationsStarted = false;
    8635  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8636  {
    8637  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    8638  {
    8639  if(allocationsStarted == false)
    8640  {
    8641  allocationsStarted = true;
    8642  json.WriteString("DefaultPools");
    8643  json.BeginObject();
    8644  }
    8645 
    8646  json.BeginString("Type ");
    8647  json.ContinueString(memTypeIndex);
    8648  json.EndString();
    8649 
    8650  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    8651  }
    8652  }
    8653  if(allocationsStarted)
    8654  {
    8655  json.EndObject();
    8656  }
    8657  }
    8658 
    8659  {
    8660  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8661  const size_t poolCount = m_Pools.size();
    8662  if(poolCount > 0)
    8663  {
    8664  json.WriteString("Pools");
    8665  json.BeginArray();
    8666  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    8667  {
    8668  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    8669  }
    8670  json.EndArray();
    8671  }
    8672  }
    8673 }
    8674 
    8675 #endif // #if VMA_STATS_STRING_ENABLED
    8676 
    8677 static VkResult AllocateMemoryForImage(
    8678  VmaAllocator allocator,
    8679  VkImage image,
    8680  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8681  VmaSuballocationType suballocType,
    8682  VmaAllocation* pAllocation)
    8683 {
    8684  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    8685 
    8686  VkMemoryRequirements vkMemReq = {};
    8687  bool requiresDedicatedAllocation = false;
    8688  bool prefersDedicatedAllocation = false;
    8689  allocator->GetImageMemoryRequirements(image, vkMemReq,
    8690  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8691 
    8692  return allocator->AllocateMemory(
    8693  vkMemReq,
    8694  requiresDedicatedAllocation,
    8695  prefersDedicatedAllocation,
    8696  VK_NULL_HANDLE, // dedicatedBuffer
    8697  image, // dedicatedImage
    8698  *pAllocationCreateInfo,
    8699  suballocType,
    8700  pAllocation);
    8701 }
    8702 
    8704 // Public interface
    8705 
    8706 VkResult vmaCreateAllocator(
    8707  const VmaAllocatorCreateInfo* pCreateInfo,
    8708  VmaAllocator* pAllocator)
    8709 {
    8710  VMA_ASSERT(pCreateInfo && pAllocator);
    8711  VMA_DEBUG_LOG("vmaCreateAllocator");
    8712  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    8713  return VK_SUCCESS;
    8714 }
    8715 
    8716 void vmaDestroyAllocator(
    8717  VmaAllocator allocator)
    8718 {
    8719  if(allocator != VK_NULL_HANDLE)
    8720  {
    8721  VMA_DEBUG_LOG("vmaDestroyAllocator");
    8722  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    8723  vma_delete(&allocationCallbacks, allocator);
    8724  }
    8725 }
    8726 
    8728  VmaAllocator allocator,
    8729  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    8730 {
    8731  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    8732  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    8733 }
    8734 
    8736  VmaAllocator allocator,
    8737  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    8738 {
    8739  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    8740  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    8741 }
    8742 
    8744  VmaAllocator allocator,
    8745  uint32_t memoryTypeIndex,
    8746  VkMemoryPropertyFlags* pFlags)
    8747 {
    8748  VMA_ASSERT(allocator && pFlags);
    8749  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    8750  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    8751 }
    8752 
    8754  VmaAllocator allocator,
    8755  uint32_t frameIndex)
    8756 {
    8757  VMA_ASSERT(allocator);
    8758  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    8759 
    8760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8761 
    8762  allocator->SetCurrentFrameIndex(frameIndex);
    8763 }
    8764 
    8765 void vmaCalculateStats(
    8766  VmaAllocator allocator,
    8767  VmaStats* pStats)
    8768 {
    8769  VMA_ASSERT(allocator && pStats);
    8770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8771  allocator->CalculateStats(pStats);
    8772 }
    8773 
    8774 #if VMA_STATS_STRING_ENABLED
    8775 
    8776 void vmaBuildStatsString(
    8777  VmaAllocator allocator,
    8778  char** ppStatsString,
    8779  VkBool32 detailedMap)
    8780 {
    8781  VMA_ASSERT(allocator && ppStatsString);
    8782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8783 
    8784  VmaStringBuilder sb(allocator);
    8785  {
    8786  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    8787  json.BeginObject();
    8788 
    8789  VmaStats stats;
    8790  allocator->CalculateStats(&stats);
    8791 
    8792  json.WriteString("Total");
    8793  VmaPrintStatInfo(json, stats.total);
    8794 
    8795  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    8796  {
    8797  json.BeginString("Heap ");
    8798  json.ContinueString(heapIndex);
    8799  json.EndString();
    8800  json.BeginObject();
    8801 
    8802  json.WriteString("Size");
    8803  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    8804 
    8805  json.WriteString("Flags");
    8806  json.BeginArray(true);
    8807  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    8808  {
    8809  json.WriteString("DEVICE_LOCAL");
    8810  }
    8811  json.EndArray();
    8812 
    8813  if(stats.memoryHeap[heapIndex].blockCount > 0)
    8814  {
    8815  json.WriteString("Stats");
    8816  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    8817  }
    8818 
    8819  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    8820  {
    8821  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    8822  {
    8823  json.BeginString("Type ");
    8824  json.ContinueString(typeIndex);
    8825  json.EndString();
    8826 
    8827  json.BeginObject();
    8828 
    8829  json.WriteString("Flags");
    8830  json.BeginArray(true);
    8831  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    8832  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    8833  {
    8834  json.WriteString("DEVICE_LOCAL");
    8835  }
    8836  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8837  {
    8838  json.WriteString("HOST_VISIBLE");
    8839  }
    8840  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    8841  {
    8842  json.WriteString("HOST_COHERENT");
    8843  }
    8844  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    8845  {
    8846  json.WriteString("HOST_CACHED");
    8847  }
    8848  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    8849  {
    8850  json.WriteString("LAZILY_ALLOCATED");
    8851  }
    8852  json.EndArray();
    8853 
    8854  if(stats.memoryType[typeIndex].blockCount > 0)
    8855  {
    8856  json.WriteString("Stats");
    8857  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    8858  }
    8859 
    8860  json.EndObject();
    8861  }
    8862  }
    8863 
    8864  json.EndObject();
    8865  }
    8866  if(detailedMap == VK_TRUE)
    8867  {
    8868  allocator->PrintDetailedMap(json);
    8869  }
    8870 
    8871  json.EndObject();
    8872  }
    8873 
    8874  const size_t len = sb.GetLength();
    8875  char* const pChars = vma_new_array(allocator, char, len + 1);
    8876  if(len > 0)
    8877  {
    8878  memcpy(pChars, sb.GetData(), len);
    8879  }
    8880  pChars[len] = '\0';
    8881  *ppStatsString = pChars;
    8882 }
    8883 
    8884 void vmaFreeStatsString(
    8885  VmaAllocator allocator,
    8886  char* pStatsString)
    8887 {
    8888  if(pStatsString != VMA_NULL)
    8889  {
    8890  VMA_ASSERT(allocator);
    8891  size_t len = strlen(pStatsString);
    8892  vma_delete_array(allocator, pStatsString, len + 1);
    8893  }
    8894 }
    8895 
    8896 #endif // #if VMA_STATS_STRING_ENABLED
    8897 
    8898 /*
    8899 This function is not protected by any mutex because it just reads immutable data.
    8900 */
    8901 VkResult vmaFindMemoryTypeIndex(
    8902  VmaAllocator allocator,
    8903  uint32_t memoryTypeBits,
    8904  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8905  uint32_t* pMemoryTypeIndex)
    8906 {
    8907  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8908  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8909  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8910 
    8911  if(pAllocationCreateInfo->memoryTypeBits != 0)
    8912  {
    8913  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    8914  }
    8915 
    8916  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    8917  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    8918 
    8919  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    8920  if(mapped)
    8921  {
    8922  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8923  }
    8924 
    8925  // Convert usage to requiredFlags and preferredFlags.
    8926  switch(pAllocationCreateInfo->usage)
    8927  {
    8929  break;
    8931  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    8932  {
    8933  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8934  }
    8935  break;
    8937  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    8938  break;
    8940  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8941  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    8942  {
    8943  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8944  }
    8945  break;
    8947  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8948  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    8949  break;
    8950  default:
    8951  break;
    8952  }
    8953 
    8954  *pMemoryTypeIndex = UINT32_MAX;
    8955  uint32_t minCost = UINT32_MAX;
    8956  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    8957  memTypeIndex < allocator->GetMemoryTypeCount();
    8958  ++memTypeIndex, memTypeBit <<= 1)
    8959  {
    8960  // This memory type is acceptable according to memoryTypeBits bitmask.
    8961  if((memTypeBit & memoryTypeBits) != 0)
    8962  {
    8963  const VkMemoryPropertyFlags currFlags =
    8964  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    8965  // This memory type contains requiredFlags.
    8966  if((requiredFlags & ~currFlags) == 0)
    8967  {
    8968  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    8969  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    8970  // Remember memory type with lowest cost.
    8971  if(currCost < minCost)
    8972  {
    8973  *pMemoryTypeIndex = memTypeIndex;
    8974  if(currCost == 0)
    8975  {
    8976  return VK_SUCCESS;
    8977  }
    8978  minCost = currCost;
    8979  }
    8980  }
    8981  }
    8982  }
    8983  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    8984 }
    8985 
    8987  VmaAllocator allocator,
    8988  const VkBufferCreateInfo* pBufferCreateInfo,
    8989  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8990  uint32_t* pMemoryTypeIndex)
    8991 {
    8992  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8993  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    8994  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8995  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8996 
    8997  const VkDevice hDev = allocator->m_hDevice;
    8998  VkBuffer hBuffer = VK_NULL_HANDLE;
    8999  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    9000  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    9001  if(res == VK_SUCCESS)
    9002  {
    9003  VkMemoryRequirements memReq = {};
    9004  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    9005  hDev, hBuffer, &memReq);
    9006 
    9007  res = vmaFindMemoryTypeIndex(
    9008  allocator,
    9009  memReq.memoryTypeBits,
    9010  pAllocationCreateInfo,
    9011  pMemoryTypeIndex);
    9012 
    9013  allocator->GetVulkanFunctions().vkDestroyBuffer(
    9014  hDev, hBuffer, allocator->GetAllocationCallbacks());
    9015  }
    9016  return res;
    9017 }
    9018 
    9020  VmaAllocator allocator,
    9021  const VkImageCreateInfo* pImageCreateInfo,
    9022  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9023  uint32_t* pMemoryTypeIndex)
    9024 {
    9025  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    9026  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    9027  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    9028  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    9029 
    9030  const VkDevice hDev = allocator->m_hDevice;
    9031  VkImage hImage = VK_NULL_HANDLE;
    9032  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    9033  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    9034  if(res == VK_SUCCESS)
    9035  {
    9036  VkMemoryRequirements memReq = {};
    9037  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    9038  hDev, hImage, &memReq);
    9039 
    9040  res = vmaFindMemoryTypeIndex(
    9041  allocator,
    9042  memReq.memoryTypeBits,
    9043  pAllocationCreateInfo,
    9044  pMemoryTypeIndex);
    9045 
    9046  allocator->GetVulkanFunctions().vkDestroyImage(
    9047  hDev, hImage, allocator->GetAllocationCallbacks());
    9048  }
    9049  return res;
    9050 }
    9051 
    9052 VkResult vmaCreatePool(
    9053  VmaAllocator allocator,
    9054  const VmaPoolCreateInfo* pCreateInfo,
    9055  VmaPool* pPool)
    9056 {
    9057  VMA_ASSERT(allocator && pCreateInfo && pPool);
    9058 
    9059  VMA_DEBUG_LOG("vmaCreatePool");
    9060 
    9061  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9062 
    9063  return allocator->CreatePool(pCreateInfo, pPool);
    9064 }
    9065 
    9066 void vmaDestroyPool(
    9067  VmaAllocator allocator,
    9068  VmaPool pool)
    9069 {
    9070  VMA_ASSERT(allocator);
    9071 
    9072  if(pool == VK_NULL_HANDLE)
    9073  {
    9074  return;
    9075  }
    9076 
    9077  VMA_DEBUG_LOG("vmaDestroyPool");
    9078 
    9079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9080 
    9081  allocator->DestroyPool(pool);
    9082 }
    9083 
    9084 void vmaGetPoolStats(
    9085  VmaAllocator allocator,
    9086  VmaPool pool,
    9087  VmaPoolStats* pPoolStats)
    9088 {
    9089  VMA_ASSERT(allocator && pool && pPoolStats);
    9090 
    9091  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9092 
    9093  allocator->GetPoolStats(pool, pPoolStats);
    9094 }
    9095 
    9097  VmaAllocator allocator,
    9098  VmaPool pool,
    9099  size_t* pLostAllocationCount)
    9100 {
    9101  VMA_ASSERT(allocator && pool);
    9102 
    9103  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9104 
    9105  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    9106 }
    9107 
    9108 VkResult vmaAllocateMemory(
    9109  VmaAllocator allocator,
    9110  const VkMemoryRequirements* pVkMemoryRequirements,
    9111  const VmaAllocationCreateInfo* pCreateInfo,
    9112  VmaAllocation* pAllocation,
    9113  VmaAllocationInfo* pAllocationInfo)
    9114 {
    9115  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    9116 
    9117  VMA_DEBUG_LOG("vmaAllocateMemory");
    9118 
    9119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9120 
    9121  VkResult result = allocator->AllocateMemory(
    9122  *pVkMemoryRequirements,
    9123  false, // requiresDedicatedAllocation
    9124  false, // prefersDedicatedAllocation
    9125  VK_NULL_HANDLE, // dedicatedBuffer
    9126  VK_NULL_HANDLE, // dedicatedImage
    9127  *pCreateInfo,
    9128  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    9129  pAllocation);
    9130 
    9131  if(pAllocationInfo && result == VK_SUCCESS)
    9132  {
    9133  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9134  }
    9135 
    9136  return result;
    9137 }
    9138 
    9140  VmaAllocator allocator,
    9141  VkBuffer buffer,
    9142  const VmaAllocationCreateInfo* pCreateInfo,
    9143  VmaAllocation* pAllocation,
    9144  VmaAllocationInfo* pAllocationInfo)
    9145 {
    9146  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    9147 
    9148  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    9149 
    9150  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9151 
    9152  VkMemoryRequirements vkMemReq = {};
    9153  bool requiresDedicatedAllocation = false;
    9154  bool prefersDedicatedAllocation = false;
    9155  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    9156  requiresDedicatedAllocation,
    9157  prefersDedicatedAllocation);
    9158 
    9159  VkResult result = allocator->AllocateMemory(
    9160  vkMemReq,
    9161  requiresDedicatedAllocation,
    9162  prefersDedicatedAllocation,
    9163  buffer, // dedicatedBuffer
    9164  VK_NULL_HANDLE, // dedicatedImage
    9165  *pCreateInfo,
    9166  VMA_SUBALLOCATION_TYPE_BUFFER,
    9167  pAllocation);
    9168 
    9169  if(pAllocationInfo && result == VK_SUCCESS)
    9170  {
    9171  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9172  }
    9173 
    9174  return result;
    9175 }
    9176 
    9177 VkResult vmaAllocateMemoryForImage(
    9178  VmaAllocator allocator,
    9179  VkImage image,
    9180  const VmaAllocationCreateInfo* pCreateInfo,
    9181  VmaAllocation* pAllocation,
    9182  VmaAllocationInfo* pAllocationInfo)
    9183 {
    9184  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    9185 
    9186  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    9187 
    9188  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9189 
    9190  VkResult result = AllocateMemoryForImage(
    9191  allocator,
    9192  image,
    9193  pCreateInfo,
    9194  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    9195  pAllocation);
    9196 
    9197  if(pAllocationInfo && result == VK_SUCCESS)
    9198  {
    9199  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9200  }
    9201 
    9202  return result;
    9203 }
    9204 
    9205 void vmaFreeMemory(
    9206  VmaAllocator allocator,
    9207  VmaAllocation allocation)
    9208 {
    9209  VMA_ASSERT(allocator);
    9210  VMA_DEBUG_LOG("vmaFreeMemory");
    9211  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9212  if(allocation != VK_NULL_HANDLE)
    9213  {
    9214  allocator->FreeMemory(allocation);
    9215  }
    9216 }
    9217 
    9219  VmaAllocator allocator,
    9220  VmaAllocation allocation,
    9221  VmaAllocationInfo* pAllocationInfo)
    9222 {
    9223  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    9224 
    9225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9226 
    9227  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    9228 }
    9229 
    9230 VkBool32 vmaTouchAllocation(
    9231  VmaAllocator allocator,
    9232  VmaAllocation allocation)
    9233 {
    9234  VMA_ASSERT(allocator && allocation);
    9235 
    9236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9237 
    9238  return allocator->TouchAllocation(allocation);
    9239 }
    9240 
    9242  VmaAllocator allocator,
    9243  VmaAllocation allocation,
    9244  void* pUserData)
    9245 {
    9246  VMA_ASSERT(allocator && allocation);
    9247 
    9248  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9249 
    9250  allocation->SetUserData(allocator, pUserData);
    9251 }
    9252 
    9254  VmaAllocator allocator,
    9255  VmaAllocation* pAllocation)
    9256 {
    9257  VMA_ASSERT(allocator && pAllocation);
    9258 
    9259  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    9260 
    9261  allocator->CreateLostAllocation(pAllocation);
    9262 }
    9263 
    9264 VkResult vmaMapMemory(
    9265  VmaAllocator allocator,
    9266  VmaAllocation allocation,
    9267  void** ppData)
    9268 {
    9269  VMA_ASSERT(allocator && allocation && ppData);
    9270 
    9271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9272 
    9273  return allocator->Map(allocation, ppData);
    9274 }
    9275 
    9276 void vmaUnmapMemory(
    9277  VmaAllocator allocator,
    9278  VmaAllocation allocation)
    9279 {
    9280  VMA_ASSERT(allocator && allocation);
    9281 
    9282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9283 
    9284  allocator->Unmap(allocation);
    9285 }
    9286 
    9287 VkResult vmaDefragment(
    9288  VmaAllocator allocator,
    9289  VmaAllocation* pAllocations,
    9290  size_t allocationCount,
    9291  VkBool32* pAllocationsChanged,
    9292  const VmaDefragmentationInfo *pDefragmentationInfo,
    9293  VmaDefragmentationStats* pDefragmentationStats)
    9294 {
    9295  VMA_ASSERT(allocator && pAllocations);
    9296 
    9297  VMA_DEBUG_LOG("vmaDefragment");
    9298 
    9299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9300 
    9301  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    9302 }
    9303 
    9304 VkResult vmaBindBufferMemory(
    9305  VmaAllocator allocator,
    9306  VmaAllocation allocation,
    9307  VkBuffer buffer)
    9308 {
    9309  VMA_ASSERT(allocator && allocation && buffer);
    9310 
    9311  VMA_DEBUG_LOG("vmaBindBufferMemory");
    9312 
    9313  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9314 
    9315  return allocator->BindBufferMemory(allocation, buffer);
    9316 }
    9317 
    9318 VkResult vmaBindImageMemory(
    9319  VmaAllocator allocator,
    9320  VmaAllocation allocation,
    9321  VkImage image)
    9322 {
    9323  VMA_ASSERT(allocator && allocation && image);
    9324 
    9325  VMA_DEBUG_LOG("vmaBindImageMemory");
    9326 
    9327  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9328 
    9329  return allocator->BindImageMemory(allocation, image);
    9330 }
    9331 
    9332 VkResult vmaCreateBuffer(
    9333  VmaAllocator allocator,
    9334  const VkBufferCreateInfo* pBufferCreateInfo,
    9335  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9336  VkBuffer* pBuffer,
    9337  VmaAllocation* pAllocation,
    9338  VmaAllocationInfo* pAllocationInfo)
    9339 {
    9340  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    9341 
    9342  VMA_DEBUG_LOG("vmaCreateBuffer");
    9343 
    9344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9345 
    9346  *pBuffer = VK_NULL_HANDLE;
    9347  *pAllocation = VK_NULL_HANDLE;
    9348 
    9349  // 1. Create VkBuffer.
    9350  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    9351  allocator->m_hDevice,
    9352  pBufferCreateInfo,
    9353  allocator->GetAllocationCallbacks(),
    9354  pBuffer);
    9355  if(res >= 0)
    9356  {
    9357  // 2. vkGetBufferMemoryRequirements.
    9358  VkMemoryRequirements vkMemReq = {};
    9359  bool requiresDedicatedAllocation = false;
    9360  bool prefersDedicatedAllocation = false;
    9361  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    9362  requiresDedicatedAllocation, prefersDedicatedAllocation);
    9363 
    9364  // Make sure alignment requirements for specific buffer usages reported
    9365  // in Physical Device Properties are included in alignment reported by memory requirements.
    9366  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    9367  {
    9368  VMA_ASSERT(vkMemReq.alignment %
    9369  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    9370  }
    9371  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    9372  {
    9373  VMA_ASSERT(vkMemReq.alignment %
    9374  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    9375  }
    9376  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    9377  {
    9378  VMA_ASSERT(vkMemReq.alignment %
    9379  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    9380  }
    9381 
    9382  // 3. Allocate memory using allocator.
    9383  res = allocator->AllocateMemory(
    9384  vkMemReq,
    9385  requiresDedicatedAllocation,
    9386  prefersDedicatedAllocation,
    9387  *pBuffer, // dedicatedBuffer
    9388  VK_NULL_HANDLE, // dedicatedImage
    9389  *pAllocationCreateInfo,
    9390  VMA_SUBALLOCATION_TYPE_BUFFER,
    9391  pAllocation);
    9392  if(res >= 0)
    9393  {
    9394  // 3. Bind buffer with memory.
    9395  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    9396  if(res >= 0)
    9397  {
    9398  // All steps succeeded.
    9399  #if VMA_STATS_STRING_ENABLED
    9400  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    9401  #endif
    9402  if(pAllocationInfo != VMA_NULL)
    9403  {
    9404  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9405  }
    9406  return VK_SUCCESS;
    9407  }
    9408  allocator->FreeMemory(*pAllocation);
    9409  *pAllocation = VK_NULL_HANDLE;
    9410  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    9411  *pBuffer = VK_NULL_HANDLE;
    9412  return res;
    9413  }
    9414  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    9415  *pBuffer = VK_NULL_HANDLE;
    9416  return res;
    9417  }
    9418  return res;
    9419 }
    9420 
    9421 void vmaDestroyBuffer(
    9422  VmaAllocator allocator,
    9423  VkBuffer buffer,
    9424  VmaAllocation allocation)
    9425 {
    9426  VMA_ASSERT(allocator);
    9427  VMA_DEBUG_LOG("vmaDestroyBuffer");
    9428  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9429  if(buffer != VK_NULL_HANDLE)
    9430  {
    9431  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    9432  }
    9433  if(allocation != VK_NULL_HANDLE)
    9434  {
    9435  allocator->FreeMemory(allocation);
    9436  }
    9437 }
    9438 
    9439 VkResult vmaCreateImage(
    9440  VmaAllocator allocator,
    9441  const VkImageCreateInfo* pImageCreateInfo,
    9442  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9443  VkImage* pImage,
    9444  VmaAllocation* pAllocation,
    9445  VmaAllocationInfo* pAllocationInfo)
    9446 {
    9447  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    9448 
    9449  VMA_DEBUG_LOG("vmaCreateImage");
    9450 
    9451  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9452 
    9453  *pImage = VK_NULL_HANDLE;
    9454  *pAllocation = VK_NULL_HANDLE;
    9455 
    9456  // 1. Create VkImage.
    9457  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    9458  allocator->m_hDevice,
    9459  pImageCreateInfo,
    9460  allocator->GetAllocationCallbacks(),
    9461  pImage);
    9462  if(res >= 0)
    9463  {
    9464  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    9465  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    9466  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    9467 
    9468  // 2. Allocate memory using allocator.
    9469  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    9470  if(res >= 0)
    9471  {
    9472  // 3. Bind image with memory.
    9473  res = allocator->BindImageMemory(*pAllocation, *pImage);
    9474  if(res >= 0)
    9475  {
    9476  // All steps succeeded.
    9477  #if VMA_STATS_STRING_ENABLED
    9478  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    9479  #endif
    9480  if(pAllocationInfo != VMA_NULL)
    9481  {
    9482  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9483  }
    9484  return VK_SUCCESS;
    9485  }
    9486  allocator->FreeMemory(*pAllocation);
    9487  *pAllocation = VK_NULL_HANDLE;
    9488  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    9489  *pImage = VK_NULL_HANDLE;
    9490  return res;
    9491  }
    9492  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    9493  *pImage = VK_NULL_HANDLE;
    9494  return res;
    9495  }
    9496  return res;
    9497 }
    9498 
    9499 void vmaDestroyImage(
    9500  VmaAllocator allocator,
    9501  VkImage image,
    9502  VmaAllocation allocation)
    9503 {
    9504  VMA_ASSERT(allocator);
    9505  VMA_DEBUG_LOG("vmaDestroyImage");
    9506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9507  if(image != VK_NULL_HANDLE)
    9508  {
    9509  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    9510  }
    9511  if(allocation != VK_NULL_HANDLE)
    9512  {
    9513  allocator->FreeMemory(allocation);
    9514  }
    9515 }
    9516 
    9517 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1179
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1443
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1084 #include <vulkan/vulkan.h>
    1085 
    1086 #if !defined(VMA_DEDICATED_ALLOCATION)
    1087  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1088  #define VMA_DEDICATED_ALLOCATION 1
    1089  #else
    1090  #define VMA_DEDICATED_ALLOCATION 0
    1091  #endif
    1092 #endif
    1093 
    1103 VK_DEFINE_HANDLE(VmaAllocator)
    1104 
    1105 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1107  VmaAllocator allocator,
    1108  uint32_t memoryType,
    1109  VkDeviceMemory memory,
    1110  VkDeviceSize size);
    1112 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1113  VmaAllocator allocator,
    1114  uint32_t memoryType,
    1115  VkDeviceMemory memory,
    1116  VkDeviceSize size);
    1117 
    1131 
    1161 
    1164 typedef VkFlags VmaAllocatorCreateFlags;
    1165 
    1170 typedef struct VmaVulkanFunctions {
    1171  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1172  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1173  PFN_vkAllocateMemory vkAllocateMemory;
    1174  PFN_vkFreeMemory vkFreeMemory;
    1175  PFN_vkMapMemory vkMapMemory;
    1176  PFN_vkUnmapMemory vkUnmapMemory;
    1177  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1178  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1179  PFN_vkBindBufferMemory vkBindBufferMemory;
    1180  PFN_vkBindImageMemory vkBindImageMemory;
    1181  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1182  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1183  PFN_vkCreateBuffer vkCreateBuffer;
    1184  PFN_vkDestroyBuffer vkDestroyBuffer;
    1185  PFN_vkCreateImage vkCreateImage;
    1186  PFN_vkDestroyImage vkDestroyImage;
    1187 #if VMA_DEDICATED_ALLOCATION
    1188  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1189  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1190 #endif
    1192 
    1195 {
    1197  VmaAllocatorCreateFlags flags;
    1199 
    1200  VkPhysicalDevice physicalDevice;
    1202 
    1203  VkDevice device;
    1205 
    1208 
    1209  const VkAllocationCallbacks* pAllocationCallbacks;
    1211 
    1250  const VkDeviceSize* pHeapSizeLimit;
    1264 
    1266 VkResult vmaCreateAllocator(
    1267  const VmaAllocatorCreateInfo* pCreateInfo,
    1268  VmaAllocator* pAllocator);
    1269 
    1271 void vmaDestroyAllocator(
    1272  VmaAllocator allocator);
    1273 
    1279  VmaAllocator allocator,
    1280  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1281 
    1287  VmaAllocator allocator,
    1288  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1289 
    1297  VmaAllocator allocator,
    1298  uint32_t memoryTypeIndex,
    1299  VkMemoryPropertyFlags* pFlags);
    1300 
    1310  VmaAllocator allocator,
    1311  uint32_t frameIndex);
    1312 
    1315 typedef struct VmaStatInfo
    1316 {
    1318  uint32_t blockCount;
    1324  VkDeviceSize usedBytes;
    1326  VkDeviceSize unusedBytes;
    1327  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    1328  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    1329 } VmaStatInfo;
    1330 
    1332 typedef struct VmaStats
    1333 {
    1334  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1335  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1337 } VmaStats;
    1338 
    1340 void vmaCalculateStats(
    1341  VmaAllocator allocator,
    1342  VmaStats* pStats);
    1343 
    1344 #define VMA_STATS_STRING_ENABLED 1
    1345 
    1346 #if VMA_STATS_STRING_ENABLED
    1347 
    1349 
    1351 void vmaBuildStatsString(
    1352  VmaAllocator allocator,
    1353  char** ppStatsString,
    1354  VkBool32 detailedMap);
    1355 
    1356 void vmaFreeStatsString(
    1357  VmaAllocator allocator,
    1358  char* pStatsString);
    1359 
    1360 #endif // #if VMA_STATS_STRING_ENABLED
    1361 
    1370 VK_DEFINE_HANDLE(VmaPool)
    1371 
    1372 typedef enum VmaMemoryUsage
    1373 {
    1422 } VmaMemoryUsage;
    1423 
    1438 
    1488 
    1492 
    1494 {
    1496  VmaAllocationCreateFlags flags;
    1507  VkMemoryPropertyFlags requiredFlags;
    1512  VkMemoryPropertyFlags preferredFlags;
    1520  uint32_t memoryTypeBits;
    1533  void* pUserData;
    1535 
    1552 VkResult vmaFindMemoryTypeIndex(
    1553  VmaAllocator allocator,
    1554  uint32_t memoryTypeBits,
    1555  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1556  uint32_t* pMemoryTypeIndex);
    1557 
    1571  VmaAllocator allocator,
    1572  const VkBufferCreateInfo* pBufferCreateInfo,
    1573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1574  uint32_t* pMemoryTypeIndex);
    1575 
    1589  VmaAllocator allocator,
    1590  const VkImageCreateInfo* pImageCreateInfo,
    1591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1592  uint32_t* pMemoryTypeIndex);
    1593 
    1614 
    1617 typedef VkFlags VmaPoolCreateFlags;
    1618 
    1621 typedef struct VmaPoolCreateInfo {
    1627  VmaPoolCreateFlags flags;
    1632  VkDeviceSize blockSize;
    1661 
    1664 typedef struct VmaPoolStats {
    1667  VkDeviceSize size;
    1670  VkDeviceSize unusedSize;
    1683  VkDeviceSize unusedRangeSizeMax;
    1684 } VmaPoolStats;
    1685 
    1692 VkResult vmaCreatePool(
    1693  VmaAllocator allocator,
    1694  const VmaPoolCreateInfo* pCreateInfo,
    1695  VmaPool* pPool);
    1696 
    1699 void vmaDestroyPool(
    1700  VmaAllocator allocator,
    1701  VmaPool pool);
    1702 
    1709 void vmaGetPoolStats(
    1710  VmaAllocator allocator,
    1711  VmaPool pool,
    1712  VmaPoolStats* pPoolStats);
    1713 
    1721  VmaAllocator allocator,
    1722  VmaPool pool,
    1723  size_t* pLostAllocationCount);
    1724 
    1749 VK_DEFINE_HANDLE(VmaAllocation)
    1750 
    1751 
    1753 typedef struct VmaAllocationInfo {
    1758  uint32_t memoryType;
    1767  VkDeviceMemory deviceMemory;
    1772  VkDeviceSize offset;
    1777  VkDeviceSize size;
    1791  void* pUserData;
    1793 
    1804 VkResult vmaAllocateMemory(
    1805  VmaAllocator allocator,
    1806  const VkMemoryRequirements* pVkMemoryRequirements,
    1807  const VmaAllocationCreateInfo* pCreateInfo,
    1808  VmaAllocation* pAllocation,
    1809  VmaAllocationInfo* pAllocationInfo);
    1810 
    1818  VmaAllocator allocator,
    1819  VkBuffer buffer,
    1820  const VmaAllocationCreateInfo* pCreateInfo,
    1821  VmaAllocation* pAllocation,
    1822  VmaAllocationInfo* pAllocationInfo);
    1823 
    1825 VkResult vmaAllocateMemoryForImage(
    1826  VmaAllocator allocator,
    1827  VkImage image,
    1828  const VmaAllocationCreateInfo* pCreateInfo,
    1829  VmaAllocation* pAllocation,
    1830  VmaAllocationInfo* pAllocationInfo);
    1831 
    1833 void vmaFreeMemory(
    1834  VmaAllocator allocator,
    1835  VmaAllocation allocation);
    1836 
    1854  VmaAllocator allocator,
    1855  VmaAllocation allocation,
    1856  VmaAllocationInfo* pAllocationInfo);
    1857 
    1872 VkBool32 vmaTouchAllocation(
    1873  VmaAllocator allocator,
    1874  VmaAllocation allocation);
    1875 
    1890  VmaAllocator allocator,
    1891  VmaAllocation allocation,
    1892  void* pUserData);
    1893 
    1905  VmaAllocator allocator,
    1906  VmaAllocation* pAllocation);
    1907 
    1942 VkResult vmaMapMemory(
    1943  VmaAllocator allocator,
    1944  VmaAllocation allocation,
    1945  void** ppData);
    1946 
    1951 void vmaUnmapMemory(
    1952  VmaAllocator allocator,
    1953  VmaAllocation allocation);
    1954 
    1967 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    1968 
    1981 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    1982 
    1984 typedef struct VmaDefragmentationInfo {
    1989  VkDeviceSize maxBytesToMove;
    1996 
    1998 typedef struct VmaDefragmentationStats {
    2000  VkDeviceSize bytesMoved;
    2002  VkDeviceSize bytesFreed;
    2008 
    2091 VkResult vmaDefragment(
    2092  VmaAllocator allocator,
    2093  VmaAllocation* pAllocations,
    2094  size_t allocationCount,
    2095  VkBool32* pAllocationsChanged,
    2096  const VmaDefragmentationInfo *pDefragmentationInfo,
    2097  VmaDefragmentationStats* pDefragmentationStats);
    2098 
    2111 VkResult vmaBindBufferMemory(
    2112  VmaAllocator allocator,
    2113  VmaAllocation allocation,
    2114  VkBuffer buffer);
    2115 
    2128 VkResult vmaBindImageMemory(
    2129  VmaAllocator allocator,
    2130  VmaAllocation allocation,
    2131  VkImage image);
    2132 
    2159 VkResult vmaCreateBuffer(
    2160  VmaAllocator allocator,
    2161  const VkBufferCreateInfo* pBufferCreateInfo,
    2162  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2163  VkBuffer* pBuffer,
    2164  VmaAllocation* pAllocation,
    2165  VmaAllocationInfo* pAllocationInfo);
    2166 
    2178 void vmaDestroyBuffer(
    2179  VmaAllocator allocator,
    2180  VkBuffer buffer,
    2181  VmaAllocation allocation);
    2182 
    2184 VkResult vmaCreateImage(
    2185  VmaAllocator allocator,
    2186  const VkImageCreateInfo* pImageCreateInfo,
    2187  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2188  VkImage* pImage,
    2189  VmaAllocation* pAllocation,
    2190  VmaAllocationInfo* pAllocationInfo);
    2191 
    2203 void vmaDestroyImage(
    2204  VmaAllocator allocator,
    2205  VkImage image,
    2206  VmaAllocation allocation);
    2207 
    2208 #ifdef __cplusplus
    2209 }
    2210 #endif
    2211 
    2212 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2213 
    2214 // For Visual Studio IntelliSense.
    2215 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2216 #define VMA_IMPLEMENTATION
    2217 #endif
    2218 
    2219 #ifdef VMA_IMPLEMENTATION
    2220 #undef VMA_IMPLEMENTATION
    2221 
    2222 #include <cstdint>
    2223 #include <cstdlib>
    2224 #include <cstring>
    2225 
    2226 /*******************************************************************************
    2227 CONFIGURATION SECTION
    2228 
    2229 Define some of these macros before each #include of this header or change them
    2230 here if you need other then default behavior depending on your environment.
    2231 */
    2232 
    2233 /*
    2234 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2235 internally, like:
    2236 
    2237  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2238 
    2239 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2240 VmaAllocatorCreateInfo::pVulkanFunctions.
    2241 */
    2242 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2243 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2244 #endif
    2245 
    2246 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2247 //#define VMA_USE_STL_CONTAINERS 1
    2248 
    2249 /* Set this macro to 1 to make the library including and using STL containers:
    2250 std::pair, std::vector, std::list, std::unordered_map.
    2251 
    2252 Set it to 0 or undefined to make the library using its own implementation of
    2253 the containers.
    2254 */
    2255 #if VMA_USE_STL_CONTAINERS
    2256  #define VMA_USE_STL_VECTOR 1
    2257  #define VMA_USE_STL_UNORDERED_MAP 1
    2258  #define VMA_USE_STL_LIST 1
    2259 #endif
    2260 
    2261 #if VMA_USE_STL_VECTOR
    2262  #include <vector>
    2263 #endif
    2264 
    2265 #if VMA_USE_STL_UNORDERED_MAP
    2266  #include <unordered_map>
    2267 #endif
    2268 
    2269 #if VMA_USE_STL_LIST
    2270  #include <list>
    2271 #endif
    2272 
    2273 /*
    2274 Following headers are used in this CONFIGURATION section only, so feel free to
    2275 remove them if not needed.
    2276 */
    2277 #include <cassert> // for assert
    2278 #include <algorithm> // for min, max
    2279 #include <mutex> // for std::mutex
    2280 #include <atomic> // for std::atomic
    2281 
    2282 #ifndef VMA_NULL
    2283  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2284  #define VMA_NULL nullptr
    2285 #endif
    2286 
    2287 #if defined(__APPLE__) || defined(__ANDROID__)
    2288 #include <cstdlib>
    2289 void *aligned_alloc(size_t alignment, size_t size)
    2290 {
    2291  // alignment must be >= sizeof(void*)
    2292  if(alignment < sizeof(void*))
    2293  {
    2294  alignment = sizeof(void*);
    2295  }
    2296 
    2297  void *pointer;
    2298  if(posix_memalign(&pointer, alignment, size) == 0)
    2299  return pointer;
    2300  return VMA_NULL;
    2301 }
    2302 #endif
    2303 
    2304 // If your compiler is not compatible with C++11 and definition of
    2305 // aligned_alloc() function is missing, uncommeting following line may help:
    2306 
    2307 //#include <malloc.h>
    2308 
    2309 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2310 #ifndef VMA_ASSERT
    2311  #ifdef _DEBUG
    2312  #define VMA_ASSERT(expr) assert(expr)
    2313  #else
    2314  #define VMA_ASSERT(expr)
    2315  #endif
    2316 #endif
    2317 
    2318 // Assert that will be called very often, like inside data structures e.g. operator[].
    2319 // Making it non-empty can make program slow.
    2320 #ifndef VMA_HEAVY_ASSERT
    2321  #ifdef _DEBUG
    2322  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2323  #else
    2324  #define VMA_HEAVY_ASSERT(expr)
    2325  #endif
    2326 #endif
    2327 
    2328 #ifndef VMA_ALIGN_OF
    2329  #define VMA_ALIGN_OF(type) (__alignof(type))
    2330 #endif
    2331 
    2332 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2333  #if defined(_WIN32)
    2334  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2335  #else
    2336  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2337  #endif
    2338 #endif
    2339 
    2340 #ifndef VMA_SYSTEM_FREE
    2341  #if defined(_WIN32)
    2342  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2343  #else
    2344  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2345  #endif
    2346 #endif
    2347 
    2348 #ifndef VMA_MIN
    2349  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2350 #endif
    2351 
    2352 #ifndef VMA_MAX
    2353  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2354 #endif
    2355 
    2356 #ifndef VMA_SWAP
    2357  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2358 #endif
    2359 
    2360 #ifndef VMA_SORT
    2361  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2362 #endif
    2363 
    2364 #ifndef VMA_DEBUG_LOG
    2365  #define VMA_DEBUG_LOG(format, ...)
    2366  /*
    2367  #define VMA_DEBUG_LOG(format, ...) do { \
    2368  printf(format, __VA_ARGS__); \
    2369  printf("\n"); \
    2370  } while(false)
    2371  */
    2372 #endif
    2373 
    2374 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2375 #if VMA_STATS_STRING_ENABLED
    2376  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2377  {
    2378  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2379  }
    2380  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2381  {
    2382  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2383  }
    2384  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2385  {
    2386  snprintf(outStr, strLen, "%p", ptr);
    2387  }
    2388 #endif
    2389 
    2390 #ifndef VMA_MUTEX
    2391  class VmaMutex
    2392  {
    2393  public:
    2394  VmaMutex() { }
    2395  ~VmaMutex() { }
    2396  void Lock() { m_Mutex.lock(); }
    2397  void Unlock() { m_Mutex.unlock(); }
    2398  private:
    2399  std::mutex m_Mutex;
    2400  };
    2401  #define VMA_MUTEX VmaMutex
    2402 #endif
    2403 
    2404 /*
    2405 If providing your own implementation, you need to implement a subset of std::atomic:
    2406 
    2407 - Constructor(uint32_t desired)
    2408 - uint32_t load() const
    2409 - void store(uint32_t desired)
    2410 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2411 */
    2412 #ifndef VMA_ATOMIC_UINT32
    2413  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2414 #endif
    2415 
    2416 #ifndef VMA_BEST_FIT
    2417 
    2429  #define VMA_BEST_FIT (1)
    2430 #endif
    2431 
    2432 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2433 
    2437  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2438 #endif
    2439 
    2440 #ifndef VMA_DEBUG_ALIGNMENT
    2441 
    2445  #define VMA_DEBUG_ALIGNMENT (1)
    2446 #endif
    2447 
    2448 #ifndef VMA_DEBUG_MARGIN
    2449 
    2453  #define VMA_DEBUG_MARGIN (0)
    2454 #endif
    2455 
    2456 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2457 
    2461  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2462 #endif
    2463 
    2464 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2465 
    2469  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2470 #endif
    2471 
    2472 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2473  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2475 #endif
    2476 
    2477 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2478  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2480 #endif
    2481 
    2482 #ifndef VMA_CLASS_NO_COPY
    2483  #define VMA_CLASS_NO_COPY(className) \
    2484  private: \
    2485  className(const className&) = delete; \
    2486  className& operator=(const className&) = delete;
    2487 #endif
    2488 
    2489 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2490 
    2491 /*******************************************************************************
    2492 END OF CONFIGURATION
    2493 */
    2494 
    2495 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    2496  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    2497 
    2498 // Returns number of bits set to 1 in (v).
    2499 static inline uint32_t VmaCountBitsSet(uint32_t v)
    2500 {
    2501  uint32_t c = v - ((v >> 1) & 0x55555555);
    2502  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    2503  c = ((c >> 4) + c) & 0x0F0F0F0F;
    2504  c = ((c >> 8) + c) & 0x00FF00FF;
    2505  c = ((c >> 16) + c) & 0x0000FFFF;
    2506  return c;
    2507 }
    2508 
    2509 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    2510 // Use types like uint32_t, uint64_t as T.
    2511 template <typename T>
    2512 static inline T VmaAlignUp(T val, T align)
    2513 {
    2514  return (val + align - 1) / align * align;
    2515 }
    2516 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    2517 // Use types like uint32_t, uint64_t as T.
    2518 template <typename T>
    2519 static inline T VmaAlignDown(T val, T align)
    2520 {
    2521  return val / align * align;
    2522 }
    2523 
    2524 // Division with mathematical rounding to nearest number.
    2525 template <typename T>
    2526 inline T VmaRoundDiv(T x, T y)
    2527 {
    2528  return (x + (y / (T)2)) / y;
    2529 }
    2530 
    2531 #ifndef VMA_SORT
    2532 
    2533 template<typename Iterator, typename Compare>
    2534 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    2535 {
    2536  Iterator centerValue = end; --centerValue;
    2537  Iterator insertIndex = beg;
    2538  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    2539  {
    2540  if(cmp(*memTypeIndex, *centerValue))
    2541  {
    2542  if(insertIndex != memTypeIndex)
    2543  {
    2544  VMA_SWAP(*memTypeIndex, *insertIndex);
    2545  }
    2546  ++insertIndex;
    2547  }
    2548  }
    2549  if(insertIndex != centerValue)
    2550  {
    2551  VMA_SWAP(*insertIndex, *centerValue);
    2552  }
    2553  return insertIndex;
    2554 }
    2555 
    2556 template<typename Iterator, typename Compare>
    2557 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    2558 {
    2559  if(beg < end)
    2560  {
    2561  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    2562  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    2563  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    2564  }
    2565 }
    2566 
    2567 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    2568 
    2569 #endif // #ifndef VMA_SORT
    2570 
    2571 /*
    2572 Returns true if two memory blocks occupy overlapping pages.
    2573 ResourceA must be in less memory offset than ResourceB.
    2574 
    2575 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    2576 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    2577 */
    2578 static inline bool VmaBlocksOnSamePage(
    2579  VkDeviceSize resourceAOffset,
    2580  VkDeviceSize resourceASize,
    2581  VkDeviceSize resourceBOffset,
    2582  VkDeviceSize pageSize)
    2583 {
    2584  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    2585  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    2586  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    2587  VkDeviceSize resourceBStart = resourceBOffset;
    2588  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    2589  return resourceAEndPage == resourceBStartPage;
    2590 }
    2591 
    2592 enum VmaSuballocationType
    2593 {
    2594  VMA_SUBALLOCATION_TYPE_FREE = 0,
    2595  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    2596  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    2597  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    2598  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    2599  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    2600  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    2601 };
    2602 
    2603 /*
    2604 Returns true if given suballocation types could conflict and must respect
    2605 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    2606 or linear image and another one is optimal image. If type is unknown, behave
    2607 conservatively.
    2608 */
    2609 static inline bool VmaIsBufferImageGranularityConflict(
    2610  VmaSuballocationType suballocType1,
    2611  VmaSuballocationType suballocType2)
    2612 {
    2613  if(suballocType1 > suballocType2)
    2614  {
    2615  VMA_SWAP(suballocType1, suballocType2);
    2616  }
    2617 
    2618  switch(suballocType1)
    2619  {
    2620  case VMA_SUBALLOCATION_TYPE_FREE:
    2621  return false;
    2622  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    2623  return true;
    2624  case VMA_SUBALLOCATION_TYPE_BUFFER:
    2625  return
    2626  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2627  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2628  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    2629  return
    2630  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    2632  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2633  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    2634  return
    2635  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2636  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    2637  return false;
    2638  default:
    2639  VMA_ASSERT(0);
    2640  return true;
    2641  }
    2642 }
    2643 
    2644 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    2645 struct VmaMutexLock
    2646 {
    2647  VMA_CLASS_NO_COPY(VmaMutexLock)
    2648 public:
    2649  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    2650  m_pMutex(useMutex ? &mutex : VMA_NULL)
    2651  {
    2652  if(m_pMutex)
    2653  {
    2654  m_pMutex->Lock();
    2655  }
    2656  }
    2657 
    2658  ~VmaMutexLock()
    2659  {
    2660  if(m_pMutex)
    2661  {
    2662  m_pMutex->Unlock();
    2663  }
    2664  }
    2665 
    2666 private:
    2667  VMA_MUTEX* m_pMutex;
    2668 };
    2669 
    2670 #if VMA_DEBUG_GLOBAL_MUTEX
    2671  static VMA_MUTEX gDebugGlobalMutex;
    2672  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    2673 #else
    2674  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    2675 #endif
    2676 
    2677 // Minimum size of a free suballocation to register it in the free suballocation collection.
    2678 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    2679 
    2680 /*
    2681 Performs binary search and returns iterator to first element that is greater or
    2682 equal to (key), according to comparison (cmp).
    2683 
    2684 Cmp should return true if first argument is less than second argument.
    2685 
    2686 Returned value is the found element, if present in the collection or place where
    2687 new element with value (key) should be inserted.
    2688 */
    2689 template <typename IterT, typename KeyT, typename CmpT>
    2690 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    2691 {
    2692  size_t down = 0, up = (end - beg);
    2693  while(down < up)
    2694  {
    2695  const size_t mid = (down + up) / 2;
    2696  if(cmp(*(beg+mid), key))
    2697  {
    2698  down = mid + 1;
    2699  }
    2700  else
    2701  {
    2702  up = mid;
    2703  }
    2704  }
    2705  return beg + down;
    2706 }
    2707 
    2709 // Memory allocation
    2710 
    2711 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    2712 {
    2713  if((pAllocationCallbacks != VMA_NULL) &&
    2714  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    2715  {
    2716  return (*pAllocationCallbacks->pfnAllocation)(
    2717  pAllocationCallbacks->pUserData,
    2718  size,
    2719  alignment,
    2720  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    2721  }
    2722  else
    2723  {
    2724  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    2725  }
    2726 }
    2727 
    2728 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    2729 {
    2730  if((pAllocationCallbacks != VMA_NULL) &&
    2731  (pAllocationCallbacks->pfnFree != VMA_NULL))
    2732  {
    2733  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    2734  }
    2735  else
    2736  {
    2737  VMA_SYSTEM_FREE(ptr);
    2738  }
    2739 }
    2740 
    2741 template<typename T>
    2742 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    2743 {
    2744  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    2745 }
    2746 
    2747 template<typename T>
    2748 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    2749 {
    2750  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    2751 }
    2752 
    2753 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    2754 
    2755 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    2756 
    2757 template<typename T>
    2758 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    2759 {
    2760  ptr->~T();
    2761  VmaFree(pAllocationCallbacks, ptr);
    2762 }
    2763 
    2764 template<typename T>
    2765 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    2766 {
    2767  if(ptr != VMA_NULL)
    2768  {
    2769  for(size_t i = count; i--; )
    2770  {
    2771  ptr[i].~T();
    2772  }
    2773  VmaFree(pAllocationCallbacks, ptr);
    2774  }
    2775 }
    2776 
    2777 // STL-compatible allocator.
    2778 template<typename T>
    2779 class VmaStlAllocator
    2780 {
    2781 public:
    2782  const VkAllocationCallbacks* const m_pCallbacks;
    2783  typedef T value_type;
    2784 
    2785  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    2786  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    2787 
    2788  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    2789  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    2790 
    2791  template<typename U>
    2792  bool operator==(const VmaStlAllocator<U>& rhs) const
    2793  {
    2794  return m_pCallbacks == rhs.m_pCallbacks;
    2795  }
    2796  template<typename U>
    2797  bool operator!=(const VmaStlAllocator<U>& rhs) const
    2798  {
    2799  return m_pCallbacks != rhs.m_pCallbacks;
    2800  }
    2801 
    2802  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    2803 };
    2804 
    2805 #if VMA_USE_STL_VECTOR
    2806 
    2807 #define VmaVector std::vector
    2808 
    2809 template<typename T, typename allocatorT>
    2810 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    2811 {
    2812  vec.insert(vec.begin() + index, item);
    2813 }
    2814 
    2815 template<typename T, typename allocatorT>
    2816 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    2817 {
    2818  vec.erase(vec.begin() + index);
    2819 }
    2820 
    2821 #else // #if VMA_USE_STL_VECTOR
    2822 
    2823 /* Class with interface compatible with subset of std::vector.
    2824 T must be POD because constructors and destructors are not called and memcpy is
    2825 used for these objects. */
    2826 template<typename T, typename AllocatorT>
    2827 class VmaVector
    2828 {
    2829 public:
    2830  typedef T value_type;
    2831 
    2832  VmaVector(const AllocatorT& allocator) :
    2833  m_Allocator(allocator),
    2834  m_pArray(VMA_NULL),
    2835  m_Count(0),
    2836  m_Capacity(0)
    2837  {
    2838  }
    2839 
    2840  VmaVector(size_t count, const AllocatorT& allocator) :
    2841  m_Allocator(allocator),
    2842  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    2843  m_Count(count),
    2844  m_Capacity(count)
    2845  {
    2846  }
    2847 
    2848  VmaVector(const VmaVector<T, AllocatorT>& src) :
    2849  m_Allocator(src.m_Allocator),
    2850  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    2851  m_Count(src.m_Count),
    2852  m_Capacity(src.m_Count)
    2853  {
    2854  if(m_Count != 0)
    2855  {
    2856  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    2857  }
    2858  }
    2859 
    2860  ~VmaVector()
    2861  {
    2862  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2863  }
    2864 
    2865  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    2866  {
    2867  if(&rhs != this)
    2868  {
    2869  resize(rhs.m_Count);
    2870  if(m_Count != 0)
    2871  {
    2872  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2873  }
    2874  }
    2875  return *this;
    2876  }
    2877 
    2878  bool empty() const { return m_Count == 0; }
    2879  size_t size() const { return m_Count; }
    2880  T* data() { return m_pArray; }
    2881  const T* data() const { return m_pArray; }
    2882 
    2883  T& operator[](size_t index)
    2884  {
    2885  VMA_HEAVY_ASSERT(index < m_Count);
    2886  return m_pArray[index];
    2887  }
    2888  const T& operator[](size_t index) const
    2889  {
    2890  VMA_HEAVY_ASSERT(index < m_Count);
    2891  return m_pArray[index];
    2892  }
    2893 
    2894  T& front()
    2895  {
    2896  VMA_HEAVY_ASSERT(m_Count > 0);
    2897  return m_pArray[0];
    2898  }
    2899  const T& front() const
    2900  {
    2901  VMA_HEAVY_ASSERT(m_Count > 0);
    2902  return m_pArray[0];
    2903  }
    2904  T& back()
    2905  {
    2906  VMA_HEAVY_ASSERT(m_Count > 0);
    2907  return m_pArray[m_Count - 1];
    2908  }
    2909  const T& back() const
    2910  {
    2911  VMA_HEAVY_ASSERT(m_Count > 0);
    2912  return m_pArray[m_Count - 1];
    2913  }
    2914 
    2915  void reserve(size_t newCapacity, bool freeMemory = false)
    2916  {
    2917  newCapacity = VMA_MAX(newCapacity, m_Count);
    2918 
    2919  if((newCapacity < m_Capacity) && !freeMemory)
    2920  {
    2921  newCapacity = m_Capacity;
    2922  }
    2923 
    2924  if(newCapacity != m_Capacity)
    2925  {
    2926  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2927  if(m_Count != 0)
    2928  {
    2929  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2930  }
    2931  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2932  m_Capacity = newCapacity;
    2933  m_pArray = newArray;
    2934  }
    2935  }
    2936 
    2937  void resize(size_t newCount, bool freeMemory = false)
    2938  {
    2939  size_t newCapacity = m_Capacity;
    2940  if(newCount > m_Capacity)
    2941  {
    2942  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2943  }
    2944  else if(freeMemory)
    2945  {
    2946  newCapacity = newCount;
    2947  }
    2948 
    2949  if(newCapacity != m_Capacity)
    2950  {
    2951  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2952  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2953  if(elementsToCopy != 0)
    2954  {
    2955  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2956  }
    2957  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2958  m_Capacity = newCapacity;
    2959  m_pArray = newArray;
    2960  }
    2961 
    2962  m_Count = newCount;
    2963  }
    2964 
    2965  void clear(bool freeMemory = false)
    2966  {
    2967  resize(0, freeMemory);
    2968  }
    2969 
    2970  void insert(size_t index, const T& src)
    2971  {
    2972  VMA_HEAVY_ASSERT(index <= m_Count);
    2973  const size_t oldCount = size();
    2974  resize(oldCount + 1);
    2975  if(index < oldCount)
    2976  {
    2977  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2978  }
    2979  m_pArray[index] = src;
    2980  }
    2981 
    2982  void remove(size_t index)
    2983  {
    2984  VMA_HEAVY_ASSERT(index < m_Count);
    2985  const size_t oldCount = size();
    2986  if(index < oldCount - 1)
    2987  {
    2988  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2989  }
    2990  resize(oldCount - 1);
    2991  }
    2992 
    2993  void push_back(const T& src)
    2994  {
    2995  const size_t newIndex = size();
    2996  resize(newIndex + 1);
    2997  m_pArray[newIndex] = src;
    2998  }
    2999 
    3000  void pop_back()
    3001  {
    3002  VMA_HEAVY_ASSERT(m_Count > 0);
    3003  resize(size() - 1);
    3004  }
    3005 
    3006  void push_front(const T& src)
    3007  {
    3008  insert(0, src);
    3009  }
    3010 
    3011  void pop_front()
    3012  {
    3013  VMA_HEAVY_ASSERT(m_Count > 0);
    3014  remove(0);
    3015  }
    3016 
    3017  typedef T* iterator;
    3018 
    3019  iterator begin() { return m_pArray; }
    3020  iterator end() { return m_pArray + m_Count; }
    3021 
    3022 private:
    3023  AllocatorT m_Allocator;
    3024  T* m_pArray;
    3025  size_t m_Count;
    3026  size_t m_Capacity;
    3027 };
    3028 
    3029 template<typename T, typename allocatorT>
    3030 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3031 {
    3032  vec.insert(index, item);
    3033 }
    3034 
    3035 template<typename T, typename allocatorT>
    3036 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3037 {
    3038  vec.remove(index);
    3039 }
    3040 
    3041 #endif // #if VMA_USE_STL_VECTOR
    3042 
    3043 template<typename CmpLess, typename VectorT>
    3044 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3045 {
    3046  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3047  vector.data(),
    3048  vector.data() + vector.size(),
    3049  value,
    3050  CmpLess()) - vector.data();
    3051  VmaVectorInsert(vector, indexToInsert, value);
    3052  return indexToInsert;
    3053 }
    3054 
    3055 template<typename CmpLess, typename VectorT>
    3056 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3057 {
    3058  CmpLess comparator;
    3059  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3060  vector.begin(),
    3061  vector.end(),
    3062  value,
    3063  comparator);
    3064  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3065  {
    3066  size_t indexToRemove = it - vector.begin();
    3067  VmaVectorRemove(vector, indexToRemove);
    3068  return true;
    3069  }
    3070  return false;
    3071 }
    3072 
    3073 template<typename CmpLess, typename VectorT>
    3074 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    3075 {
    3076  CmpLess comparator;
    3077  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3078  vector.data(),
    3079  vector.data() + vector.size(),
    3080  value,
    3081  comparator);
    3082  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    3083  {
    3084  return it - vector.begin();
    3085  }
    3086  else
    3087  {
    3088  return vector.size();
    3089  }
    3090 }
    3091 
    3093 // class VmaPoolAllocator
    3094 
    3095 /*
    3096 Allocator for objects of type T using a list of arrays (pools) to speed up
    3097 allocation. Number of elements that can be allocated is not bounded because
    3098 allocator can create multiple blocks.
    3099 */
    3100 template<typename T>
    3101 class VmaPoolAllocator
    3102 {
    3103  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3104 public:
    3105  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3106  ~VmaPoolAllocator();
    3107  void Clear();
    3108  T* Alloc();
    3109  void Free(T* ptr);
    3110 
    3111 private:
    3112  union Item
    3113  {
    3114  uint32_t NextFreeIndex;
    3115  T Value;
    3116  };
    3117 
    3118  struct ItemBlock
    3119  {
    3120  Item* pItems;
    3121  uint32_t FirstFreeIndex;
    3122  };
    3123 
    3124  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3125  size_t m_ItemsPerBlock;
    3126  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3127 
    3128  ItemBlock& CreateNewBlock();
    3129 };
    3130 
    3131 template<typename T>
    3132 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3133  m_pAllocationCallbacks(pAllocationCallbacks),
    3134  m_ItemsPerBlock(itemsPerBlock),
    3135  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3136 {
    3137  VMA_ASSERT(itemsPerBlock > 0);
    3138 }
    3139 
    3140 template<typename T>
    3141 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3142 {
    3143  Clear();
    3144 }
    3145 
    3146 template<typename T>
    3147 void VmaPoolAllocator<T>::Clear()
    3148 {
    3149  for(size_t i = m_ItemBlocks.size(); i--; )
    3150  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3151  m_ItemBlocks.clear();
    3152 }
    3153 
    3154 template<typename T>
    3155 T* VmaPoolAllocator<T>::Alloc()
    3156 {
    3157  for(size_t i = m_ItemBlocks.size(); i--; )
    3158  {
    3159  ItemBlock& block = m_ItemBlocks[i];
    3160  // This block has some free items: Use first one.
    3161  if(block.FirstFreeIndex != UINT32_MAX)
    3162  {
    3163  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3164  block.FirstFreeIndex = pItem->NextFreeIndex;
    3165  return &pItem->Value;
    3166  }
    3167  }
    3168 
    3169  // No block has free item: Create new one and use it.
    3170  ItemBlock& newBlock = CreateNewBlock();
    3171  Item* const pItem = &newBlock.pItems[0];
    3172  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3173  return &pItem->Value;
    3174 }
    3175 
    3176 template<typename T>
    3177 void VmaPoolAllocator<T>::Free(T* ptr)
    3178 {
    3179  // Search all memory blocks to find ptr.
    3180  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3181  {
    3182  ItemBlock& block = m_ItemBlocks[i];
    3183 
    3184  // Casting to union.
    3185  Item* pItemPtr;
    3186  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3187 
    3188  // Check if pItemPtr is in address range of this block.
    3189  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3190  {
    3191  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3192  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3193  block.FirstFreeIndex = index;
    3194  return;
    3195  }
    3196  }
    3197  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3198 }
    3199 
    3200 template<typename T>
    3201 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3202 {
    3203  ItemBlock newBlock = {
    3204  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3205 
    3206  m_ItemBlocks.push_back(newBlock);
    3207 
    3208  // Setup singly-linked list of all free items in this block.
    3209  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3210  newBlock.pItems[i].NextFreeIndex = i + 1;
    3211  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3212  return m_ItemBlocks.back();
    3213 }
    3214 
    3216 // class VmaRawList, VmaList
    3217 
    3218 #if VMA_USE_STL_LIST
    3219 
    3220 #define VmaList std::list
    3221 
    3222 #else // #if VMA_USE_STL_LIST
    3223 
    3224 template<typename T>
    3225 struct VmaListItem
    3226 {
    3227  VmaListItem* pPrev;
    3228  VmaListItem* pNext;
    3229  T Value;
    3230 };
    3231 
    3232 // Doubly linked list.
    3233 template<typename T>
    3234 class VmaRawList
    3235 {
    3236  VMA_CLASS_NO_COPY(VmaRawList)
    3237 public:
    3238  typedef VmaListItem<T> ItemType;
    3239 
    3240  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3241  ~VmaRawList();
    3242  void Clear();
    3243 
    3244  size_t GetCount() const { return m_Count; }
    3245  bool IsEmpty() const { return m_Count == 0; }
    3246 
    3247  ItemType* Front() { return m_pFront; }
    3248  const ItemType* Front() const { return m_pFront; }
    3249  ItemType* Back() { return m_pBack; }
    3250  const ItemType* Back() const { return m_pBack; }
    3251 
    3252  ItemType* PushBack();
    3253  ItemType* PushFront();
    3254  ItemType* PushBack(const T& value);
    3255  ItemType* PushFront(const T& value);
    3256  void PopBack();
    3257  void PopFront();
    3258 
    3259  // Item can be null - it means PushBack.
    3260  ItemType* InsertBefore(ItemType* pItem);
    3261  // Item can be null - it means PushFront.
    3262  ItemType* InsertAfter(ItemType* pItem);
    3263 
    3264  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3265  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3266 
    3267  void Remove(ItemType* pItem);
    3268 
    3269 private:
    3270  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3271  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3272  ItemType* m_pFront;
    3273  ItemType* m_pBack;
    3274  size_t m_Count;
    3275 };
    3276 
    3277 template<typename T>
    3278 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3279  m_pAllocationCallbacks(pAllocationCallbacks),
    3280  m_ItemAllocator(pAllocationCallbacks, 128),
    3281  m_pFront(VMA_NULL),
    3282  m_pBack(VMA_NULL),
    3283  m_Count(0)
    3284 {
    3285 }
    3286 
    3287 template<typename T>
    3288 VmaRawList<T>::~VmaRawList()
    3289 {
    3290  // Intentionally not calling Clear, because that would be unnecessary
    3291  // computations to return all items to m_ItemAllocator as free.
    3292 }
    3293 
    3294 template<typename T>
    3295 void VmaRawList<T>::Clear()
    3296 {
    3297  if(IsEmpty() == false)
    3298  {
    3299  ItemType* pItem = m_pBack;
    3300  while(pItem != VMA_NULL)
    3301  {
    3302  ItemType* const pPrevItem = pItem->pPrev;
    3303  m_ItemAllocator.Free(pItem);
    3304  pItem = pPrevItem;
    3305  }
    3306  m_pFront = VMA_NULL;
    3307  m_pBack = VMA_NULL;
    3308  m_Count = 0;
    3309  }
    3310 }
    3311 
    3312 template<typename T>
    3313 VmaListItem<T>* VmaRawList<T>::PushBack()
    3314 {
    3315  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3316  pNewItem->pNext = VMA_NULL;
    3317  if(IsEmpty())
    3318  {
    3319  pNewItem->pPrev = VMA_NULL;
    3320  m_pFront = pNewItem;
    3321  m_pBack = pNewItem;
    3322  m_Count = 1;
    3323  }
    3324  else
    3325  {
    3326  pNewItem->pPrev = m_pBack;
    3327  m_pBack->pNext = pNewItem;
    3328  m_pBack = pNewItem;
    3329  ++m_Count;
    3330  }
    3331  return pNewItem;
    3332 }
    3333 
    3334 template<typename T>
    3335 VmaListItem<T>* VmaRawList<T>::PushFront()
    3336 {
    3337  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3338  pNewItem->pPrev = VMA_NULL;
    3339  if(IsEmpty())
    3340  {
    3341  pNewItem->pNext = VMA_NULL;
    3342  m_pFront = pNewItem;
    3343  m_pBack = pNewItem;
    3344  m_Count = 1;
    3345  }
    3346  else
    3347  {
    3348  pNewItem->pNext = m_pFront;
    3349  m_pFront->pPrev = pNewItem;
    3350  m_pFront = pNewItem;
    3351  ++m_Count;
    3352  }
    3353  return pNewItem;
    3354 }
    3355 
    3356 template<typename T>
    3357 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3358 {
    3359  ItemType* const pNewItem = PushBack();
    3360  pNewItem->Value = value;
    3361  return pNewItem;
    3362 }
    3363 
    3364 template<typename T>
    3365 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3366 {
    3367  ItemType* const pNewItem = PushFront();
    3368  pNewItem->Value = value;
    3369  return pNewItem;
    3370 }
    3371 
    3372 template<typename T>
    3373 void VmaRawList<T>::PopBack()
    3374 {
    3375  VMA_HEAVY_ASSERT(m_Count > 0);
    3376  ItemType* const pBackItem = m_pBack;
    3377  ItemType* const pPrevItem = pBackItem->pPrev;
    3378  if(pPrevItem != VMA_NULL)
    3379  {
    3380  pPrevItem->pNext = VMA_NULL;
    3381  }
    3382  m_pBack = pPrevItem;
    3383  m_ItemAllocator.Free(pBackItem);
    3384  --m_Count;
    3385 }
    3386 
    3387 template<typename T>
    3388 void VmaRawList<T>::PopFront()
    3389 {
    3390  VMA_HEAVY_ASSERT(m_Count > 0);
    3391  ItemType* const pFrontItem = m_pFront;
    3392  ItemType* const pNextItem = pFrontItem->pNext;
    3393  if(pNextItem != VMA_NULL)
    3394  {
    3395  pNextItem->pPrev = VMA_NULL;
    3396  }
    3397  m_pFront = pNextItem;
    3398  m_ItemAllocator.Free(pFrontItem);
    3399  --m_Count;
    3400 }
    3401 
    3402 template<typename T>
    3403 void VmaRawList<T>::Remove(ItemType* pItem)
    3404 {
    3405  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    3406  VMA_HEAVY_ASSERT(m_Count > 0);
    3407 
    3408  if(pItem->pPrev != VMA_NULL)
    3409  {
    3410  pItem->pPrev->pNext = pItem->pNext;
    3411  }
    3412  else
    3413  {
    3414  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3415  m_pFront = pItem->pNext;
    3416  }
    3417 
    3418  if(pItem->pNext != VMA_NULL)
    3419  {
    3420  pItem->pNext->pPrev = pItem->pPrev;
    3421  }
    3422  else
    3423  {
    3424  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3425  m_pBack = pItem->pPrev;
    3426  }
    3427 
    3428  m_ItemAllocator.Free(pItem);
    3429  --m_Count;
    3430 }
    3431 
    3432 template<typename T>
    3433 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    3434 {
    3435  if(pItem != VMA_NULL)
    3436  {
    3437  ItemType* const prevItem = pItem->pPrev;
    3438  ItemType* const newItem = m_ItemAllocator.Alloc();
    3439  newItem->pPrev = prevItem;
    3440  newItem->pNext = pItem;
    3441  pItem->pPrev = newItem;
    3442  if(prevItem != VMA_NULL)
    3443  {
    3444  prevItem->pNext = newItem;
    3445  }
    3446  else
    3447  {
    3448  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3449  m_pFront = newItem;
    3450  }
    3451  ++m_Count;
    3452  return newItem;
    3453  }
    3454  else
    3455  return PushBack();
    3456 }
    3457 
    3458 template<typename T>
    3459 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    3460 {
    3461  if(pItem != VMA_NULL)
    3462  {
    3463  ItemType* const nextItem = pItem->pNext;
    3464  ItemType* const newItem = m_ItemAllocator.Alloc();
    3465  newItem->pNext = nextItem;
    3466  newItem->pPrev = pItem;
    3467  pItem->pNext = newItem;
    3468  if(nextItem != VMA_NULL)
    3469  {
    3470  nextItem->pPrev = newItem;
    3471  }
    3472  else
    3473  {
    3474  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3475  m_pBack = newItem;
    3476  }
    3477  ++m_Count;
    3478  return newItem;
    3479  }
    3480  else
    3481  return PushFront();
    3482 }
    3483 
    3484 template<typename T>
    3485 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    3486 {
    3487  ItemType* const newItem = InsertBefore(pItem);
    3488  newItem->Value = value;
    3489  return newItem;
    3490 }
    3491 
    3492 template<typename T>
    3493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    3494 {
    3495  ItemType* const newItem = InsertAfter(pItem);
    3496  newItem->Value = value;
    3497  return newItem;
    3498 }
    3499 
    3500 template<typename T, typename AllocatorT>
    3501 class VmaList
    3502 {
    3503  VMA_CLASS_NO_COPY(VmaList)
    3504 public:
    3505  class iterator
    3506  {
    3507  public:
    3508  iterator() :
    3509  m_pList(VMA_NULL),
    3510  m_pItem(VMA_NULL)
    3511  {
    3512  }
    3513 
    3514  T& operator*() const
    3515  {
    3516  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3517  return m_pItem->Value;
    3518  }
    3519  T* operator->() const
    3520  {
    3521  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3522  return &m_pItem->Value;
    3523  }
    3524 
    3525  iterator& operator++()
    3526  {
    3527  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3528  m_pItem = m_pItem->pNext;
    3529  return *this;
    3530  }
    3531  iterator& operator--()
    3532  {
    3533  if(m_pItem != VMA_NULL)
    3534  {
    3535  m_pItem = m_pItem->pPrev;
    3536  }
    3537  else
    3538  {
    3539  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3540  m_pItem = m_pList->Back();
    3541  }
    3542  return *this;
    3543  }
    3544 
    3545  iterator operator++(int)
    3546  {
    3547  iterator result = *this;
    3548  ++*this;
    3549  return result;
    3550  }
    3551  iterator operator--(int)
    3552  {
    3553  iterator result = *this;
    3554  --*this;
    3555  return result;
    3556  }
    3557 
    3558  bool operator==(const iterator& rhs) const
    3559  {
    3560  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3561  return m_pItem == rhs.m_pItem;
    3562  }
    3563  bool operator!=(const iterator& rhs) const
    3564  {
    3565  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3566  return m_pItem != rhs.m_pItem;
    3567  }
    3568 
    3569  private:
    3570  VmaRawList<T>* m_pList;
    3571  VmaListItem<T>* m_pItem;
    3572 
    3573  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    3574  m_pList(pList),
    3575  m_pItem(pItem)
    3576  {
    3577  }
    3578 
    3579  friend class VmaList<T, AllocatorT>;
    3580  };
    3581 
    3582  class const_iterator
    3583  {
    3584  public:
    3585  const_iterator() :
    3586  m_pList(VMA_NULL),
    3587  m_pItem(VMA_NULL)
    3588  {
    3589  }
    3590 
    3591  const_iterator(const iterator& src) :
    3592  m_pList(src.m_pList),
    3593  m_pItem(src.m_pItem)
    3594  {
    3595  }
    3596 
    3597  const T& operator*() const
    3598  {
    3599  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3600  return m_pItem->Value;
    3601  }
    3602  const T* operator->() const
    3603  {
    3604  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3605  return &m_pItem->Value;
    3606  }
    3607 
    3608  const_iterator& operator++()
    3609  {
    3610  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3611  m_pItem = m_pItem->pNext;
    3612  return *this;
    3613  }
    3614  const_iterator& operator--()
    3615  {
    3616  if(m_pItem != VMA_NULL)
    3617  {
    3618  m_pItem = m_pItem->pPrev;
    3619  }
    3620  else
    3621  {
    3622  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3623  m_pItem = m_pList->Back();
    3624  }
    3625  return *this;
    3626  }
    3627 
    3628  const_iterator operator++(int)
    3629  {
    3630  const_iterator result = *this;
    3631  ++*this;
    3632  return result;
    3633  }
    3634  const_iterator operator--(int)
    3635  {
    3636  const_iterator result = *this;
    3637  --*this;
    3638  return result;
    3639  }
    3640 
    3641  bool operator==(const const_iterator& rhs) const
    3642  {
    3643  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3644  return m_pItem == rhs.m_pItem;
    3645  }
    3646  bool operator!=(const const_iterator& rhs) const
    3647  {
    3648  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3649  return m_pItem != rhs.m_pItem;
    3650  }
    3651 
    3652  private:
    3653  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    3654  m_pList(pList),
    3655  m_pItem(pItem)
    3656  {
    3657  }
    3658 
    3659  const VmaRawList<T>* m_pList;
    3660  const VmaListItem<T>* m_pItem;
    3661 
    3662  friend class VmaList<T, AllocatorT>;
    3663  };
    3664 
    3665  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    3666 
    3667  bool empty() const { return m_RawList.IsEmpty(); }
    3668  size_t size() const { return m_RawList.GetCount(); }
    3669 
    3670  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    3671  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    3672 
    3673  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    3674  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    3675 
    3676  void clear() { m_RawList.Clear(); }
    3677  void push_back(const T& value) { m_RawList.PushBack(value); }
    3678  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    3679  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    3680 
    3681 private:
    3682  VmaRawList<T> m_RawList;
    3683 };
    3684 
    3685 #endif // #if VMA_USE_STL_LIST
    3686 
    3688 // class VmaMap
    3689 
    3690 // Unused in this version.
    3691 #if 0
    3692 
    3693 #if VMA_USE_STL_UNORDERED_MAP
    3694 
    3695 #define VmaPair std::pair
    3696 
    3697 #define VMA_MAP_TYPE(KeyT, ValueT) \
    3698  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    3699 
    3700 #else // #if VMA_USE_STL_UNORDERED_MAP
    3701 
    3702 template<typename T1, typename T2>
    3703 struct VmaPair
    3704 {
    3705  T1 first;
    3706  T2 second;
    3707 
    3708  VmaPair() : first(), second() { }
    3709  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    3710 };
    3711 
    3712 /* Class compatible with subset of interface of std::unordered_map.
    3713 KeyT, ValueT must be POD because they will be stored in VmaVector.
    3714 */
    3715 template<typename KeyT, typename ValueT>
    3716 class VmaMap
    3717 {
    3718 public:
    3719  typedef VmaPair<KeyT, ValueT> PairType;
    3720  typedef PairType* iterator;
    3721 
    3722  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    3723 
    3724  iterator begin() { return m_Vector.begin(); }
    3725  iterator end() { return m_Vector.end(); }
    3726 
    3727  void insert(const PairType& pair);
    3728  iterator find(const KeyT& key);
    3729  void erase(iterator it);
    3730 
    3731 private:
    3732  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    3733 };
    3734 
    3735 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    3736 
    3737 template<typename FirstT, typename SecondT>
    3738 struct VmaPairFirstLess
    3739 {
    3740  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    3741  {
    3742  return lhs.first < rhs.first;
    3743  }
    3744  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    3745  {
    3746  return lhs.first < rhsFirst;
    3747  }
    3748 };
    3749 
    3750 template<typename KeyT, typename ValueT>
    3751 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    3752 {
    3753  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3754  m_Vector.data(),
    3755  m_Vector.data() + m_Vector.size(),
    3756  pair,
    3757  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    3758  VmaVectorInsert(m_Vector, indexToInsert, pair);
    3759 }
    3760 
    3761 template<typename KeyT, typename ValueT>
    3762 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    3763 {
    3764  PairType* it = VmaBinaryFindFirstNotLess(
    3765  m_Vector.data(),
    3766  m_Vector.data() + m_Vector.size(),
    3767  key,
    3768  VmaPairFirstLess<KeyT, ValueT>());
    3769  if((it != m_Vector.end()) && (it->first == key))
    3770  {
    3771  return it;
    3772  }
    3773  else
    3774  {
    3775  return m_Vector.end();
    3776  }
    3777 }
    3778 
    3779 template<typename KeyT, typename ValueT>
    3780 void VmaMap<KeyT, ValueT>::erase(iterator it)
    3781 {
    3782  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    3783 }
    3784 
    3785 #endif // #if VMA_USE_STL_UNORDERED_MAP
    3786 
    3787 #endif // #if 0
    3788 
    3790 
    3791 class VmaDeviceMemoryBlock;
    3792 
    3793 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    3794 
    3795 struct VmaAllocation_T
    3796 {
    3797  VMA_CLASS_NO_COPY(VmaAllocation_T)
    3798 private:
    3799  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    3800 
    3801  enum FLAGS
    3802  {
    3803  FLAG_USER_DATA_STRING = 0x01,
    3804  };
    3805 
    3806 public:
    3807  enum ALLOCATION_TYPE
    3808  {
    3809  ALLOCATION_TYPE_NONE,
    3810  ALLOCATION_TYPE_BLOCK,
    3811  ALLOCATION_TYPE_DEDICATED,
    3812  };
    3813 
    3814  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    3815  m_Alignment(1),
    3816  m_Size(0),
    3817  m_pUserData(VMA_NULL),
    3818  m_LastUseFrameIndex(currentFrameIndex),
    3819  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    3820  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    3821  m_MapCount(0),
    3822  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    3823  {
    3824 #if VMA_STATS_STRING_ENABLED
    3825  m_CreationFrameIndex = currentFrameIndex;
    3826  m_BufferImageUsage = 0;
    3827 #endif
    3828  }
    3829 
    3830  ~VmaAllocation_T()
    3831  {
    3832  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    3833 
    3834  // Check if owned string was freed.
    3835  VMA_ASSERT(m_pUserData == VMA_NULL);
    3836  }
    3837 
    3838  void InitBlockAllocation(
    3839  VmaPool hPool,
    3840  VmaDeviceMemoryBlock* block,
    3841  VkDeviceSize offset,
    3842  VkDeviceSize alignment,
    3843  VkDeviceSize size,
    3844  VmaSuballocationType suballocationType,
    3845  bool mapped,
    3846  bool canBecomeLost)
    3847  {
    3848  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3849  VMA_ASSERT(block != VMA_NULL);
    3850  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3851  m_Alignment = alignment;
    3852  m_Size = size;
    3853  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3854  m_SuballocationType = (uint8_t)suballocationType;
    3855  m_BlockAllocation.m_hPool = hPool;
    3856  m_BlockAllocation.m_Block = block;
    3857  m_BlockAllocation.m_Offset = offset;
    3858  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    3859  }
    3860 
    3861  void InitLost()
    3862  {
    3863  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3864  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    3865  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3866  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    3867  m_BlockAllocation.m_Block = VMA_NULL;
    3868  m_BlockAllocation.m_Offset = 0;
    3869  m_BlockAllocation.m_CanBecomeLost = true;
    3870  }
    3871 
    3872  void ChangeBlockAllocation(
    3873  VmaAllocator hAllocator,
    3874  VmaDeviceMemoryBlock* block,
    3875  VkDeviceSize offset);
    3876 
    3877  // pMappedData not null means allocation is created with MAPPED flag.
    3878  void InitDedicatedAllocation(
    3879  uint32_t memoryTypeIndex,
    3880  VkDeviceMemory hMemory,
    3881  VmaSuballocationType suballocationType,
    3882  void* pMappedData,
    3883  VkDeviceSize size)
    3884  {
    3885  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3886  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3887  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    3888  m_Alignment = 0;
    3889  m_Size = size;
    3890  m_SuballocationType = (uint8_t)suballocationType;
    3891  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3892  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3893  m_DedicatedAllocation.m_hMemory = hMemory;
    3894  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3895  }
    3896 
    3897  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    3898  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3899  VkDeviceSize GetSize() const { return m_Size; }
    3900  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    3901  void* GetUserData() const { return m_pUserData; }
    3902  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    3903  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    3904 
    3905  VmaDeviceMemoryBlock* GetBlock() const
    3906  {
    3907  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3908  return m_BlockAllocation.m_Block;
    3909  }
    3910  VkDeviceSize GetOffset() const;
    3911  VkDeviceMemory GetMemory() const;
    3912  uint32_t GetMemoryTypeIndex() const;
    3913  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    3914  void* GetMappedData() const;
    3915  bool CanBecomeLost() const;
    3916  VmaPool GetPool() const;
    3917 
    3918  uint32_t GetLastUseFrameIndex() const
    3919  {
    3920  return m_LastUseFrameIndex.load();
    3921  }
    3922  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3923  {
    3924  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3925  }
    3926  /*
    3927  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3928  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3929  - Else, returns false.
    3930 
    3931  If hAllocation is already lost, assert - you should not call it then.
    3932  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3933  */
    3934  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3935 
    3936  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3937  {
    3938  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3939  outInfo.blockCount = 1;
    3940  outInfo.allocationCount = 1;
    3941  outInfo.unusedRangeCount = 0;
    3942  outInfo.usedBytes = m_Size;
    3943  outInfo.unusedBytes = 0;
    3944  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3945  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3946  outInfo.unusedRangeSizeMax = 0;
    3947  }
    3948 
    3949  void BlockAllocMap();
    3950  void BlockAllocUnmap();
    3951  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    3952  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    3953 
    3954 #if VMA_STATS_STRING_ENABLED
    3955  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    3956  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    3957 
    3958  void InitBufferImageUsage(uint32_t bufferImageUsage)
    3959  {
    3960  VMA_ASSERT(m_BufferImageUsage == 0);
    3961  m_BufferImageUsage = bufferImageUsage;
    3962  }
    3963 
    3964  void PrintParameters(class VmaJsonWriter& json) const;
    3965 #endif
    3966 
    3967 private:
    3968  VkDeviceSize m_Alignment;
    3969  VkDeviceSize m_Size;
    3970  void* m_pUserData;
    3971  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3972  uint8_t m_Type; // ALLOCATION_TYPE
    3973  uint8_t m_SuballocationType; // VmaSuballocationType
    3974  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    3975  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    3976  uint8_t m_MapCount;
    3977  uint8_t m_Flags; // enum FLAGS
    3978 
    3979  // Allocation out of VmaDeviceMemoryBlock.
    3980  struct BlockAllocation
    3981  {
    3982  VmaPool m_hPool; // Null if belongs to general memory.
    3983  VmaDeviceMemoryBlock* m_Block;
    3984  VkDeviceSize m_Offset;
    3985  bool m_CanBecomeLost;
    3986  };
    3987 
    3988  // Allocation for an object that has its own private VkDeviceMemory.
    3989  struct DedicatedAllocation
    3990  {
    3991  uint32_t m_MemoryTypeIndex;
    3992  VkDeviceMemory m_hMemory;
    3993  void* m_pMappedData; // Not null means memory is mapped.
    3994  };
    3995 
    3996  union
    3997  {
    3998  // Allocation out of VmaDeviceMemoryBlock.
    3999  BlockAllocation m_BlockAllocation;
    4000  // Allocation for an object that has its own private VkDeviceMemory.
    4001  DedicatedAllocation m_DedicatedAllocation;
    4002  };
    4003 
    4004 #if VMA_STATS_STRING_ENABLED
    4005  uint32_t m_CreationFrameIndex;
    4006  uint32_t m_BufferImageUsage; // 0 if unknown.
    4007 #endif
    4008 
    4009  void FreeUserDataString(VmaAllocator hAllocator);
    4010 };
    4011 
    4012 /*
    4013 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4014 allocated memory block or free.
    4015 */
    4016 struct VmaSuballocation
    4017 {
    4018  VkDeviceSize offset;
    4019  VkDeviceSize size;
    4020  VmaAllocation hAllocation;
    4021  VmaSuballocationType type;
    4022 };
    4023 
    4024 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4025 
    4026 // Cost of one additional allocation lost, as equivalent in bytes.
    4027 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4028 
    4029 /*
    4030 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4031 
    4032 If canMakeOtherLost was false:
    4033 - item points to a FREE suballocation.
    4034 - itemsToMakeLostCount is 0.
    4035 
    4036 If canMakeOtherLost was true:
    4037 - item points to first of sequence of suballocations, which are either FREE,
    4038  or point to VmaAllocations that can become lost.
    4039 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4040  the requested allocation to succeed.
    4041 */
    4042 struct VmaAllocationRequest
    4043 {
    4044  VkDeviceSize offset;
    4045  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4046  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4047  VmaSuballocationList::iterator item;
    4048  size_t itemsToMakeLostCount;
    4049 
    4050  VkDeviceSize CalcCost() const
    4051  {
    4052  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4053  }
    4054 };
    4055 
    4056 /*
    4057 Data structure used for bookkeeping of allocations and unused ranges of memory
    4058 in a single VkDeviceMemory block.
    4059 */
    4060 class VmaBlockMetadata
    4061 {
    4062  VMA_CLASS_NO_COPY(VmaBlockMetadata)
    4063 public:
    4064  VmaBlockMetadata(VmaAllocator hAllocator);
    4065  ~VmaBlockMetadata();
    4066  void Init(VkDeviceSize size);
    4067 
    4068  // Validates all data structures inside this object. If not valid, returns false.
    4069  bool Validate() const;
    4070  VkDeviceSize GetSize() const { return m_Size; }
    4071  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4072  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4073  VkDeviceSize GetUnusedRangeSizeMax() const;
    4074  // Returns true if this block is empty - contains only single free suballocation.
    4075  bool IsEmpty() const;
    4076 
    4077  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4078  void AddPoolStats(VmaPoolStats& inoutStats) const;
    4079 
    4080 #if VMA_STATS_STRING_ENABLED
    4081  void PrintDetailedMap(class VmaJsonWriter& json) const;
    4082 #endif
    4083 
    4084  // Creates trivial request for case when block is empty.
    4085  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    4086 
    4087  // Tries to find a place for suballocation with given parameters inside this block.
    4088  // If succeeded, fills pAllocationRequest and returns true.
    4089  // If failed, returns false.
    4090  bool CreateAllocationRequest(
    4091  uint32_t currentFrameIndex,
    4092  uint32_t frameInUseCount,
    4093  VkDeviceSize bufferImageGranularity,
    4094  VkDeviceSize allocSize,
    4095  VkDeviceSize allocAlignment,
    4096  VmaSuballocationType allocType,
    4097  bool canMakeOtherLost,
    4098  VmaAllocationRequest* pAllocationRequest);
    4099 
    4100  bool MakeRequestedAllocationsLost(
    4101  uint32_t currentFrameIndex,
    4102  uint32_t frameInUseCount,
    4103  VmaAllocationRequest* pAllocationRequest);
    4104 
    4105  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4106 
    4107  // Makes actual allocation based on request. Request must already be checked and valid.
    4108  void Alloc(
    4109  const VmaAllocationRequest& request,
    4110  VmaSuballocationType type,
    4111  VkDeviceSize allocSize,
    4112  VmaAllocation hAllocation);
    4113 
    4114  // Frees suballocation assigned to given memory region.
    4115  void Free(const VmaAllocation allocation);
    4116  void FreeAtOffset(VkDeviceSize offset);
    4117 
    4118 private:
    4119  VkDeviceSize m_Size;
    4120  uint32_t m_FreeCount;
    4121  VkDeviceSize m_SumFreeSize;
    4122  VmaSuballocationList m_Suballocations;
    4123  // Suballocations that are free and have size greater than certain threshold.
    4124  // Sorted by size, ascending.
    4125  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4126 
    4127  bool ValidateFreeSuballocationList() const;
    4128 
    4129  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4130  // If yes, fills pOffset and returns true. If no, returns false.
    4131  bool CheckAllocation(
    4132  uint32_t currentFrameIndex,
    4133  uint32_t frameInUseCount,
    4134  VkDeviceSize bufferImageGranularity,
    4135  VkDeviceSize allocSize,
    4136  VkDeviceSize allocAlignment,
    4137  VmaSuballocationType allocType,
    4138  VmaSuballocationList::const_iterator suballocItem,
    4139  bool canMakeOtherLost,
    4140  VkDeviceSize* pOffset,
    4141  size_t* itemsToMakeLostCount,
    4142  VkDeviceSize* pSumFreeSize,
    4143  VkDeviceSize* pSumItemSize) const;
    4144  // Given free suballocation, it merges it with following one, which must also be free.
    4145  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4146  // Releases given suballocation, making it free.
    4147  // Merges it with adjacent free suballocations if applicable.
    4148  // Returns iterator to new free suballocation at this place.
    4149  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4150  // Given free suballocation, it inserts it into sorted list of
    4151  // m_FreeSuballocationsBySize if it's suitable.
    4152  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4153  // Given free suballocation, it removes it from sorted list of
    4154  // m_FreeSuballocationsBySize if it's suitable.
    4155  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4156 };
    4157 
    4158 /*
    4159 Represents a single block of device memory (`VkDeviceMemory`) with all the
    4160 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    4161 
    4162 Thread-safety: This class must be externally synchronized.
    4163 */
    4164 class VmaDeviceMemoryBlock
    4165 {
    4166  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    4167 public:
    4168  VmaBlockMetadata m_Metadata;
    4169 
    4170  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    4171 
    4172  ~VmaDeviceMemoryBlock()
    4173  {
    4174  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    4175  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    4176  }
    4177 
    4178  // Always call after construction.
    4179  void Init(
    4180  uint32_t newMemoryTypeIndex,
    4181  VkDeviceMemory newMemory,
    4182  VkDeviceSize newSize,
    4183  uint32_t id);
    4184  // Always call before destruction.
    4185  void Destroy(VmaAllocator allocator);
    4186 
    4187  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    4188  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    4189  uint32_t GetId() const { return m_Id; }
    4190  void* GetMappedData() const { return m_pMappedData; }
    4191 
    4192  // Validates all data structures inside this object. If not valid, returns false.
    4193  bool Validate() const;
    4194 
    4195  // ppData can be null.
    4196  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    4197  void Unmap(VmaAllocator hAllocator, uint32_t count);
    4198 
    4199  VkResult BindBufferMemory(
    4200  const VmaAllocator hAllocator,
    4201  const VmaAllocation hAllocation,
    4202  VkBuffer hBuffer);
    4203  VkResult BindImageMemory(
    4204  const VmaAllocator hAllocator,
    4205  const VmaAllocation hAllocation,
    4206  VkImage hImage);
    4207 
    4208 private:
    4209  uint32_t m_MemoryTypeIndex;
    4210  uint32_t m_Id;
    4211  VkDeviceMemory m_hMemory;
    4212 
    4213  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    4214  // Also protects m_MapCount, m_pMappedData.
    4215  VMA_MUTEX m_Mutex;
    4216  uint32_t m_MapCount;
    4217  void* m_pMappedData;
    4218 };
    4219 
    4220 struct VmaPointerLess
    4221 {
    4222  bool operator()(const void* lhs, const void* rhs) const
    4223  {
    4224  return lhs < rhs;
    4225  }
    4226 };
    4227 
    4228 class VmaDefragmentator;
    4229 
    4230 /*
    4231 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    4232 Vulkan memory type.
    4233 
    4234 Synchronized internally with a mutex.
    4235 */
    4236 struct VmaBlockVector
    4237 {
    4238  VMA_CLASS_NO_COPY(VmaBlockVector)
    4239 public:
    4240  VmaBlockVector(
    4241  VmaAllocator hAllocator,
    4242  uint32_t memoryTypeIndex,
    4243  VkDeviceSize preferredBlockSize,
    4244  size_t minBlockCount,
    4245  size_t maxBlockCount,
    4246  VkDeviceSize bufferImageGranularity,
    4247  uint32_t frameInUseCount,
    4248  bool isCustomPool);
    4249  ~VmaBlockVector();
    4250 
    4251  VkResult CreateMinBlocks();
    4252 
    4253  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    4254  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    4255  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    4256  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    4257 
    4258  void GetPoolStats(VmaPoolStats* pStats);
    4259 
    4260  bool IsEmpty() const { return m_Blocks.empty(); }
    4261 
    4262  VkResult Allocate(
    4263  VmaPool hCurrentPool,
    4264  uint32_t currentFrameIndex,
    4265  VkDeviceSize size,
    4266  VkDeviceSize alignment,
    4267  const VmaAllocationCreateInfo& createInfo,
    4268  VmaSuballocationType suballocType,
    4269  VmaAllocation* pAllocation);
    4270 
    4271  void Free(
    4272  VmaAllocation hAllocation);
    4273 
    4274  // Adds statistics of this BlockVector to pStats.
    4275  void AddStats(VmaStats* pStats);
    4276 
    4277 #if VMA_STATS_STRING_ENABLED
    4278  void PrintDetailedMap(class VmaJsonWriter& json);
    4279 #endif
    4280 
    4281  void MakePoolAllocationsLost(
    4282  uint32_t currentFrameIndex,
    4283  size_t* pLostAllocationCount);
    4284 
    4285  VmaDefragmentator* EnsureDefragmentator(
    4286  VmaAllocator hAllocator,
    4287  uint32_t currentFrameIndex);
    4288 
    4289  VkResult Defragment(
    4290  VmaDefragmentationStats* pDefragmentationStats,
    4291  VkDeviceSize& maxBytesToMove,
    4292  uint32_t& maxAllocationsToMove);
    4293 
    4294  void DestroyDefragmentator();
    4295 
    4296 private:
    4297  friend class VmaDefragmentator;
    4298 
    4299  const VmaAllocator m_hAllocator;
    4300  const uint32_t m_MemoryTypeIndex;
    4301  const VkDeviceSize m_PreferredBlockSize;
    4302  const size_t m_MinBlockCount;
    4303  const size_t m_MaxBlockCount;
    4304  const VkDeviceSize m_BufferImageGranularity;
    4305  const uint32_t m_FrameInUseCount;
    4306  const bool m_IsCustomPool;
    4307  VMA_MUTEX m_Mutex;
    4308  // Incrementally sorted by sumFreeSize, ascending.
    4309  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    4310  /* There can be at most one allocation that is completely empty - a
    4311  hysteresis to avoid pessimistic case of alternating creation and destruction
    4312  of a VkDeviceMemory. */
    4313  bool m_HasEmptyBlock;
    4314  VmaDefragmentator* m_pDefragmentator;
    4315  uint32_t m_NextBlockId;
    4316 
    4317  VkDeviceSize CalcMaxBlockSize() const;
    4318 
    4319  // Finds and removes given block from vector.
    4320  void Remove(VmaDeviceMemoryBlock* pBlock);
    4321 
    4322  // Performs single step in sorting m_Blocks. They may not be fully sorted
    4323  // after this call.
    4324  void IncrementallySortBlocks();
    4325 
    4326  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    4327 };
    4328 
    4329 struct VmaPool_T
    4330 {
    4331  VMA_CLASS_NO_COPY(VmaPool_T)
    4332 public:
    4333  VmaBlockVector m_BlockVector;
    4334 
    4335  VmaPool_T(
    4336  VmaAllocator hAllocator,
    4337  const VmaPoolCreateInfo& createInfo);
    4338  ~VmaPool_T();
    4339 
    4340  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    4341  uint32_t GetId() const { return m_Id; }
    4342  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    4343 
    4344 #if VMA_STATS_STRING_ENABLED
    4345  //void PrintDetailedMap(class VmaStringBuilder& sb);
    4346 #endif
    4347 
    4348 private:
    4349  uint32_t m_Id;
    4350 };
    4351 
    4352 class VmaDefragmentator
    4353 {
    4354  VMA_CLASS_NO_COPY(VmaDefragmentator)
    4355 private:
    4356  const VmaAllocator m_hAllocator;
    4357  VmaBlockVector* const m_pBlockVector;
    4358  uint32_t m_CurrentFrameIndex;
    4359  VkDeviceSize m_BytesMoved;
    4360  uint32_t m_AllocationsMoved;
    4361 
    4362  struct AllocationInfo
    4363  {
    4364  VmaAllocation m_hAllocation;
    4365  VkBool32* m_pChanged;
    4366 
    4367  AllocationInfo() :
    4368  m_hAllocation(VK_NULL_HANDLE),
    4369  m_pChanged(VMA_NULL)
    4370  {
    4371  }
    4372  };
    4373 
    4374  struct AllocationInfoSizeGreater
    4375  {
    4376  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    4377  {
    4378  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    4379  }
    4380  };
    4381 
    4382  // Used between AddAllocation and Defragment.
    4383  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    4384 
    4385  struct BlockInfo
    4386  {
    4387  VmaDeviceMemoryBlock* m_pBlock;
    4388  bool m_HasNonMovableAllocations;
    4389  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    4390 
    4391  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    4392  m_pBlock(VMA_NULL),
    4393  m_HasNonMovableAllocations(true),
    4394  m_Allocations(pAllocationCallbacks),
    4395  m_pMappedDataForDefragmentation(VMA_NULL)
    4396  {
    4397  }
    4398 
    4399  void CalcHasNonMovableAllocations()
    4400  {
    4401  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    4402  const size_t defragmentAllocCount = m_Allocations.size();
    4403  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    4404  }
    4405 
    4406  void SortAllocationsBySizeDescecnding()
    4407  {
    4408  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    4409  }
    4410 
    4411  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    4412  void Unmap(VmaAllocator hAllocator);
    4413 
    4414  private:
    4415  // Not null if mapped for defragmentation only, not originally mapped.
    4416  void* m_pMappedDataForDefragmentation;
    4417  };
    4418 
    4419  struct BlockPointerLess
    4420  {
    4421  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    4422  {
    4423  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    4424  }
    4425  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    4426  {
    4427  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    4428  }
    4429  };
    4430 
    4431  // 1. Blocks with some non-movable allocations go first.
    4432  // 2. Blocks with smaller sumFreeSize go first.
    4433  struct BlockInfoCompareMoveDestination
    4434  {
    4435  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    4436  {
    4437  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    4438  {
    4439  return true;
    4440  }
    4441  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    4442  {
    4443  return false;
    4444  }
    4445  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    4446  {
    4447  return true;
    4448  }
    4449  return false;
    4450  }
    4451  };
    4452 
    4453  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    4454  BlockInfoVector m_Blocks;
    4455 
    4456  VkResult DefragmentRound(
    4457  VkDeviceSize maxBytesToMove,
    4458  uint32_t maxAllocationsToMove);
    4459 
    4460  static bool MoveMakesSense(
    4461  size_t dstBlockIndex, VkDeviceSize dstOffset,
    4462  size_t srcBlockIndex, VkDeviceSize srcOffset);
    4463 
    4464 public:
    4465  VmaDefragmentator(
    4466  VmaAllocator hAllocator,
    4467  VmaBlockVector* pBlockVector,
    4468  uint32_t currentFrameIndex);
    4469 
    4470  ~VmaDefragmentator();
    4471 
    4472  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    4473  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    4474 
    4475  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    4476 
    4477  VkResult Defragment(
    4478  VkDeviceSize maxBytesToMove,
    4479  uint32_t maxAllocationsToMove);
    4480 };
    4481 
    4482 // Main allocator object.
    4483 struct VmaAllocator_T
    4484 {
    4485  VMA_CLASS_NO_COPY(VmaAllocator_T)
    4486 public:
    4487  bool m_UseMutex;
    4488  bool m_UseKhrDedicatedAllocation;
    4489  VkDevice m_hDevice;
    4490  bool m_AllocationCallbacksSpecified;
    4491  VkAllocationCallbacks m_AllocationCallbacks;
    4492  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    4493 
    4494  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    4495  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    4496  VMA_MUTEX m_HeapSizeLimitMutex;
    4497 
    4498  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    4499  VkPhysicalDeviceMemoryProperties m_MemProps;
    4500 
    4501  // Default pools.
    4502  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    4503 
    4504  // Each vector is sorted by memory (handle value).
    4505  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    4506  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    4507  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    4508 
    4509  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    4510  ~VmaAllocator_T();
    4511 
    4512  const VkAllocationCallbacks* GetAllocationCallbacks() const
    4513  {
    4514  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    4515  }
    4516  const VmaVulkanFunctions& GetVulkanFunctions() const
    4517  {
    4518  return m_VulkanFunctions;
    4519  }
    4520 
    4521  VkDeviceSize GetBufferImageGranularity() const
    4522  {
    4523  return VMA_MAX(
    4524  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    4525  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    4526  }
    4527 
    4528  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    4529  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    4530 
    4531  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    4532  {
    4533  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    4534  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    4535  }
    4536  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    4537  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    4538  {
    4539  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    4540  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    4541  }
    4542  // Minimum alignment for all allocations in specific memory type.
    4543  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    4544  {
    4545  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    4546  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    4547  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    4548  }
    4549 
    4550  bool IsIntegratedGpu() const
    4551  {
    4552  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    4553  }
    4554 
    4555  void GetBufferMemoryRequirements(
    4556  VkBuffer hBuffer,
    4557  VkMemoryRequirements& memReq,
    4558  bool& requiresDedicatedAllocation,
    4559  bool& prefersDedicatedAllocation) const;
    4560  void GetImageMemoryRequirements(
    4561  VkImage hImage,
    4562  VkMemoryRequirements& memReq,
    4563  bool& requiresDedicatedAllocation,
    4564  bool& prefersDedicatedAllocation) const;
    4565 
    4566  // Main allocation function.
    4567  VkResult AllocateMemory(
    4568  const VkMemoryRequirements& vkMemReq,
    4569  bool requiresDedicatedAllocation,
    4570  bool prefersDedicatedAllocation,
    4571  VkBuffer dedicatedBuffer,
    4572  VkImage dedicatedImage,
    4573  const VmaAllocationCreateInfo& createInfo,
    4574  VmaSuballocationType suballocType,
    4575  VmaAllocation* pAllocation);
    4576 
    4577  // Main deallocation function.
    4578  void FreeMemory(const VmaAllocation allocation);
    4579 
    4580  void CalculateStats(VmaStats* pStats);
    4581 
    4582 #if VMA_STATS_STRING_ENABLED
    4583  void PrintDetailedMap(class VmaJsonWriter& json);
    4584 #endif
    4585 
    4586  VkResult Defragment(
    4587  VmaAllocation* pAllocations,
    4588  size_t allocationCount,
    4589  VkBool32* pAllocationsChanged,
    4590  const VmaDefragmentationInfo* pDefragmentationInfo,
    4591  VmaDefragmentationStats* pDefragmentationStats);
    4592 
    4593  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    4594  bool TouchAllocation(VmaAllocation hAllocation);
    4595 
    4596  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    4597  void DestroyPool(VmaPool pool);
    4598  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    4599 
    4600  void SetCurrentFrameIndex(uint32_t frameIndex);
    4601 
    4602  void MakePoolAllocationsLost(
    4603  VmaPool hPool,
    4604  size_t* pLostAllocationCount);
    4605 
    4606  void CreateLostAllocation(VmaAllocation* pAllocation);
    4607 
    4608  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    4609  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    4610 
    4611  VkResult Map(VmaAllocation hAllocation, void** ppData);
    4612  void Unmap(VmaAllocation hAllocation);
    4613 
    4614  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    4615  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    4616 
    4617  void FlushOrInvalidateAllocation(
    4618  VmaAllocation hAllocation,
    4619  VkDeviceSize offset, VkDeviceSize size,
    4620  VMA_CACHE_OPERATION op);
    4621 
    4622 private:
    4623  VkDeviceSize m_PreferredLargeHeapBlockSize;
    4624 
    4625  VkPhysicalDevice m_PhysicalDevice;
    4626  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    4627 
    4628  VMA_MUTEX m_PoolsMutex;
    4629  // Protected by m_PoolsMutex. Sorted by pointer value.
    4630  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    4631  uint32_t m_NextPoolId;
    4632 
    4633  VmaVulkanFunctions m_VulkanFunctions;
    4634 
    4635  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    4636 
    4637  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    4638 
    4639  VkResult AllocateMemoryOfType(
    4640  VkDeviceSize size,
    4641  VkDeviceSize alignment,
    4642  bool dedicatedAllocation,
    4643  VkBuffer dedicatedBuffer,
    4644  VkImage dedicatedImage,
    4645  const VmaAllocationCreateInfo& createInfo,
    4646  uint32_t memTypeIndex,
    4647  VmaSuballocationType suballocType,
    4648  VmaAllocation* pAllocation);
    4649 
    4650  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    4651  VkResult AllocateDedicatedMemory(
    4652  VkDeviceSize size,
    4653  VmaSuballocationType suballocType,
    4654  uint32_t memTypeIndex,
    4655  bool map,
    4656  bool isUserDataString,
    4657  void* pUserData,
    4658  VkBuffer dedicatedBuffer,
    4659  VkImage dedicatedImage,
    4660  VmaAllocation* pAllocation);
    4661 
    4662  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    4663  void FreeDedicatedMemory(VmaAllocation allocation);
    4664 };
    4665 
    4667 // Memory allocation #2 after VmaAllocator_T definition
    4668 
    4669 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    4670 {
    4671  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    4672 }
    4673 
    4674 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    4675 {
    4676  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    4677 }
    4678 
    4679 template<typename T>
    4680 static T* VmaAllocate(VmaAllocator hAllocator)
    4681 {
    4682  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    4683 }
    4684 
    4685 template<typename T>
    4686 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    4687 {
    4688  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    4689 }
    4690 
    4691 template<typename T>
    4692 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    4693 {
    4694  if(ptr != VMA_NULL)
    4695  {
    4696  ptr->~T();
    4697  VmaFree(hAllocator, ptr);
    4698  }
    4699 }
    4700 
    4701 template<typename T>
    4702 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    4703 {
    4704  if(ptr != VMA_NULL)
    4705  {
    4706  for(size_t i = count; i--; )
    4707  ptr[i].~T();
    4708  VmaFree(hAllocator, ptr);
    4709  }
    4710 }
    4711 
    4713 // VmaStringBuilder
    4714 
    4715 #if VMA_STATS_STRING_ENABLED
    4716 
    4717 class VmaStringBuilder
    4718 {
    4719 public:
    4720  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    4721  size_t GetLength() const { return m_Data.size(); }
    4722  const char* GetData() const { return m_Data.data(); }
    4723 
    4724  void Add(char ch) { m_Data.push_back(ch); }
    4725  void Add(const char* pStr);
    4726  void AddNewLine() { Add('\n'); }
    4727  void AddNumber(uint32_t num);
    4728  void AddNumber(uint64_t num);
    4729  void AddPointer(const void* ptr);
    4730 
    4731 private:
    4732  VmaVector< char, VmaStlAllocator<char> > m_Data;
    4733 };
    4734 
    4735 void VmaStringBuilder::Add(const char* pStr)
    4736 {
    4737  const size_t strLen = strlen(pStr);
    4738  if(strLen > 0)
    4739  {
    4740  const size_t oldCount = m_Data.size();
    4741  m_Data.resize(oldCount + strLen);
    4742  memcpy(m_Data.data() + oldCount, pStr, strLen);
    4743  }
    4744 }
    4745 
    4746 void VmaStringBuilder::AddNumber(uint32_t num)
    4747 {
    4748  char buf[11];
    4749  VmaUint32ToStr(buf, sizeof(buf), num);
    4750  Add(buf);
    4751 }
    4752 
    4753 void VmaStringBuilder::AddNumber(uint64_t num)
    4754 {
    4755  char buf[21];
    4756  VmaUint64ToStr(buf, sizeof(buf), num);
    4757  Add(buf);
    4758 }
    4759 
    4760 void VmaStringBuilder::AddPointer(const void* ptr)
    4761 {
    4762  char buf[21];
    4763  VmaPtrToStr(buf, sizeof(buf), ptr);
    4764  Add(buf);
    4765 }
    4766 
    4767 #endif // #if VMA_STATS_STRING_ENABLED
    4768 
    4770 // VmaJsonWriter
    4771 
    4772 #if VMA_STATS_STRING_ENABLED
    4773 
    4774 class VmaJsonWriter
    4775 {
    4776  VMA_CLASS_NO_COPY(VmaJsonWriter)
    4777 public:
    4778  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    4779  ~VmaJsonWriter();
    4780 
    4781  void BeginObject(bool singleLine = false);
    4782  void EndObject();
    4783 
    4784  void BeginArray(bool singleLine = false);
    4785  void EndArray();
    4786 
    4787  void WriteString(const char* pStr);
    4788  void BeginString(const char* pStr = VMA_NULL);
    4789  void ContinueString(const char* pStr);
    4790  void ContinueString(uint32_t n);
    4791  void ContinueString(uint64_t n);
    4792  void ContinueString_Pointer(const void* ptr);
    4793  void EndString(const char* pStr = VMA_NULL);
    4794 
    4795  void WriteNumber(uint32_t n);
    4796  void WriteNumber(uint64_t n);
    4797  void WriteBool(bool b);
    4798  void WriteNull();
    4799 
    4800 private:
    4801  static const char* const INDENT;
    4802 
    4803  enum COLLECTION_TYPE
    4804  {
    4805  COLLECTION_TYPE_OBJECT,
    4806  COLLECTION_TYPE_ARRAY,
    4807  };
    4808  struct StackItem
    4809  {
    4810  COLLECTION_TYPE type;
    4811  uint32_t valueCount;
    4812  bool singleLineMode;
    4813  };
    4814 
    4815  VmaStringBuilder& m_SB;
    4816  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    4817  bool m_InsideString;
    4818 
    4819  void BeginValue(bool isString);
    4820  void WriteIndent(bool oneLess = false);
    4821 };
    4822 
    4823 const char* const VmaJsonWriter::INDENT = " ";
    4824 
    4825 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    4826  m_SB(sb),
    4827  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    4828  m_InsideString(false)
    4829 {
    4830 }
    4831 
    4832 VmaJsonWriter::~VmaJsonWriter()
    4833 {
    4834  VMA_ASSERT(!m_InsideString);
    4835  VMA_ASSERT(m_Stack.empty());
    4836 }
    4837 
    4838 void VmaJsonWriter::BeginObject(bool singleLine)
    4839 {
    4840  VMA_ASSERT(!m_InsideString);
    4841 
    4842  BeginValue(false);
    4843  m_SB.Add('{');
    4844 
    4845  StackItem item;
    4846  item.type = COLLECTION_TYPE_OBJECT;
    4847  item.valueCount = 0;
    4848  item.singleLineMode = singleLine;
    4849  m_Stack.push_back(item);
    4850 }
    4851 
    4852 void VmaJsonWriter::EndObject()
    4853 {
    4854  VMA_ASSERT(!m_InsideString);
    4855 
    4856  WriteIndent(true);
    4857  m_SB.Add('}');
    4858 
    4859  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    4860  m_Stack.pop_back();
    4861 }
    4862 
    4863 void VmaJsonWriter::BeginArray(bool singleLine)
    4864 {
    4865  VMA_ASSERT(!m_InsideString);
    4866 
    4867  BeginValue(false);
    4868  m_SB.Add('[');
    4869 
    4870  StackItem item;
    4871  item.type = COLLECTION_TYPE_ARRAY;
    4872  item.valueCount = 0;
    4873  item.singleLineMode = singleLine;
    4874  m_Stack.push_back(item);
    4875 }
    4876 
    4877 void VmaJsonWriter::EndArray()
    4878 {
    4879  VMA_ASSERT(!m_InsideString);
    4880 
    4881  WriteIndent(true);
    4882  m_SB.Add(']');
    4883 
    4884  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    4885  m_Stack.pop_back();
    4886 }
    4887 
    4888 void VmaJsonWriter::WriteString(const char* pStr)
    4889 {
    4890  BeginString(pStr);
    4891  EndString();
    4892 }
    4893 
    4894 void VmaJsonWriter::BeginString(const char* pStr)
    4895 {
    4896  VMA_ASSERT(!m_InsideString);
    4897 
    4898  BeginValue(true);
    4899  m_SB.Add('"');
    4900  m_InsideString = true;
    4901  if(pStr != VMA_NULL && pStr[0] != '\0')
    4902  {
    4903  ContinueString(pStr);
    4904  }
    4905 }
    4906 
    4907 void VmaJsonWriter::ContinueString(const char* pStr)
    4908 {
    4909  VMA_ASSERT(m_InsideString);
    4910 
    4911  const size_t strLen = strlen(pStr);
    4912  for(size_t i = 0; i < strLen; ++i)
    4913  {
    4914  char ch = pStr[i];
    4915  if(ch == '\'')
    4916  {
    4917  m_SB.Add("\\\\");
    4918  }
    4919  else if(ch == '"')
    4920  {
    4921  m_SB.Add("\\\"");
    4922  }
    4923  else if(ch >= 32)
    4924  {
    4925  m_SB.Add(ch);
    4926  }
    4927  else switch(ch)
    4928  {
    4929  case '\b':
    4930  m_SB.Add("\\b");
    4931  break;
    4932  case '\f':
    4933  m_SB.Add("\\f");
    4934  break;
    4935  case '\n':
    4936  m_SB.Add("\\n");
    4937  break;
    4938  case '\r':
    4939  m_SB.Add("\\r");
    4940  break;
    4941  case '\t':
    4942  m_SB.Add("\\t");
    4943  break;
    4944  default:
    4945  VMA_ASSERT(0 && "Character not currently supported.");
    4946  break;
    4947  }
    4948  }
    4949 }
    4950 
    4951 void VmaJsonWriter::ContinueString(uint32_t n)
    4952 {
    4953  VMA_ASSERT(m_InsideString);
    4954  m_SB.AddNumber(n);
    4955 }
    4956 
    4957 void VmaJsonWriter::ContinueString(uint64_t n)
    4958 {
    4959  VMA_ASSERT(m_InsideString);
    4960  m_SB.AddNumber(n);
    4961 }
    4962 
    4963 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    4964 {
    4965  VMA_ASSERT(m_InsideString);
    4966  m_SB.AddPointer(ptr);
    4967 }
    4968 
    4969 void VmaJsonWriter::EndString(const char* pStr)
    4970 {
    4971  VMA_ASSERT(m_InsideString);
    4972  if(pStr != VMA_NULL && pStr[0] != '\0')
    4973  {
    4974  ContinueString(pStr);
    4975  }
    4976  m_SB.Add('"');
    4977  m_InsideString = false;
    4978 }
    4979 
    4980 void VmaJsonWriter::WriteNumber(uint32_t n)
    4981 {
    4982  VMA_ASSERT(!m_InsideString);
    4983  BeginValue(false);
    4984  m_SB.AddNumber(n);
    4985 }
    4986 
    4987 void VmaJsonWriter::WriteNumber(uint64_t n)
    4988 {
    4989  VMA_ASSERT(!m_InsideString);
    4990  BeginValue(false);
    4991  m_SB.AddNumber(n);
    4992 }
    4993 
    4994 void VmaJsonWriter::WriteBool(bool b)
    4995 {
    4996  VMA_ASSERT(!m_InsideString);
    4997  BeginValue(false);
    4998  m_SB.Add(b ? "true" : "false");
    4999 }
    5000 
    5001 void VmaJsonWriter::WriteNull()
    5002 {
    5003  VMA_ASSERT(!m_InsideString);
    5004  BeginValue(false);
    5005  m_SB.Add("null");
    5006 }
    5007 
    5008 void VmaJsonWriter::BeginValue(bool isString)
    5009 {
    5010  if(!m_Stack.empty())
    5011  {
    5012  StackItem& currItem = m_Stack.back();
    5013  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    5014  currItem.valueCount % 2 == 0)
    5015  {
    5016  VMA_ASSERT(isString);
    5017  }
    5018 
    5019  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    5020  currItem.valueCount % 2 != 0)
    5021  {
    5022  m_SB.Add(": ");
    5023  }
    5024  else if(currItem.valueCount > 0)
    5025  {
    5026  m_SB.Add(", ");
    5027  WriteIndent();
    5028  }
    5029  else
    5030  {
    5031  WriteIndent();
    5032  }
    5033  ++currItem.valueCount;
    5034  }
    5035 }
    5036 
    5037 void VmaJsonWriter::WriteIndent(bool oneLess)
    5038 {
    5039  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    5040  {
    5041  m_SB.AddNewLine();
    5042 
    5043  size_t count = m_Stack.size();
    5044  if(count > 0 && oneLess)
    5045  {
    5046  --count;
    5047  }
    5048  for(size_t i = 0; i < count; ++i)
    5049  {
    5050  m_SB.Add(INDENT);
    5051  }
    5052  }
    5053 }
    5054 
    5055 #endif // #if VMA_STATS_STRING_ENABLED
    5056 
    5058 
    5059 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    5060 {
    5061  if(IsUserDataString())
    5062  {
    5063  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    5064 
    5065  FreeUserDataString(hAllocator);
    5066 
    5067  if(pUserData != VMA_NULL)
    5068  {
    5069  const char* const newStrSrc = (char*)pUserData;
    5070  const size_t newStrLen = strlen(newStrSrc);
    5071  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    5072  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    5073  m_pUserData = newStrDst;
    5074  }
    5075  }
    5076  else
    5077  {
    5078  m_pUserData = pUserData;
    5079  }
    5080 }
    5081 
    5082 void VmaAllocation_T::ChangeBlockAllocation(
    5083  VmaAllocator hAllocator,
    5084  VmaDeviceMemoryBlock* block,
    5085  VkDeviceSize offset)
    5086 {
    5087  VMA_ASSERT(block != VMA_NULL);
    5088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5089 
    5090  // Move mapping reference counter from old block to new block.
    5091  if(block != m_BlockAllocation.m_Block)
    5092  {
    5093  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    5094  if(IsPersistentMap())
    5095  ++mapRefCount;
    5096  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    5097  block->Map(hAllocator, mapRefCount, VMA_NULL);
    5098  }
    5099 
    5100  m_BlockAllocation.m_Block = block;
    5101  m_BlockAllocation.m_Offset = offset;
    5102 }
    5103 
    5104 VkDeviceSize VmaAllocation_T::GetOffset() const
    5105 {
    5106  switch(m_Type)
    5107  {
    5108  case ALLOCATION_TYPE_BLOCK:
    5109  return m_BlockAllocation.m_Offset;
    5110  case ALLOCATION_TYPE_DEDICATED:
    5111  return 0;
    5112  default:
    5113  VMA_ASSERT(0);
    5114  return 0;
    5115  }
    5116 }
    5117 
    5118 VkDeviceMemory VmaAllocation_T::GetMemory() const
    5119 {
    5120  switch(m_Type)
    5121  {
    5122  case ALLOCATION_TYPE_BLOCK:
    5123  return m_BlockAllocation.m_Block->GetDeviceMemory();
    5124  case ALLOCATION_TYPE_DEDICATED:
    5125  return m_DedicatedAllocation.m_hMemory;
    5126  default:
    5127  VMA_ASSERT(0);
    5128  return VK_NULL_HANDLE;
    5129  }
    5130 }
    5131 
    5132 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    5133 {
    5134  switch(m_Type)
    5135  {
    5136  case ALLOCATION_TYPE_BLOCK:
    5137  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    5138  case ALLOCATION_TYPE_DEDICATED:
    5139  return m_DedicatedAllocation.m_MemoryTypeIndex;
    5140  default:
    5141  VMA_ASSERT(0);
    5142  return UINT32_MAX;
    5143  }
    5144 }
    5145 
    5146 void* VmaAllocation_T::GetMappedData() const
    5147 {
    5148  switch(m_Type)
    5149  {
    5150  case ALLOCATION_TYPE_BLOCK:
    5151  if(m_MapCount != 0)
    5152  {
    5153  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    5154  VMA_ASSERT(pBlockData != VMA_NULL);
    5155  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    5156  }
    5157  else
    5158  {
    5159  return VMA_NULL;
    5160  }
    5161  break;
    5162  case ALLOCATION_TYPE_DEDICATED:
    5163  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    5164  return m_DedicatedAllocation.m_pMappedData;
    5165  default:
    5166  VMA_ASSERT(0);
    5167  return VMA_NULL;
    5168  }
    5169 }
    5170 
    5171 bool VmaAllocation_T::CanBecomeLost() const
    5172 {
    5173  switch(m_Type)
    5174  {
    5175  case ALLOCATION_TYPE_BLOCK:
    5176  return m_BlockAllocation.m_CanBecomeLost;
    5177  case ALLOCATION_TYPE_DEDICATED:
    5178  return false;
    5179  default:
    5180  VMA_ASSERT(0);
    5181  return false;
    5182  }
    5183 }
    5184 
    5185 VmaPool VmaAllocation_T::GetPool() const
    5186 {
    5187  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5188  return m_BlockAllocation.m_hPool;
    5189 }
    5190 
    5191 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5192 {
    5193  VMA_ASSERT(CanBecomeLost());
    5194 
    5195  /*
    5196  Warning: This is a carefully designed algorithm.
    5197  Do not modify unless you really know what you're doing :)
    5198  */
    5199  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    5200  for(;;)
    5201  {
    5202  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    5203  {
    5204  VMA_ASSERT(0);
    5205  return false;
    5206  }
    5207  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    5208  {
    5209  return false;
    5210  }
    5211  else // Last use time earlier than current time.
    5212  {
    5213  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    5214  {
    5215  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    5216  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    5217  return true;
    5218  }
    5219  }
    5220  }
    5221 }
    5222 
    5223 #if VMA_STATS_STRING_ENABLED
    5224 
    5225 // Correspond to values of enum VmaSuballocationType.
    5226 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    5227  "FREE",
    5228  "UNKNOWN",
    5229  "BUFFER",
    5230  "IMAGE_UNKNOWN",
    5231  "IMAGE_LINEAR",
    5232  "IMAGE_OPTIMAL",
    5233 };
    5234 
    5235 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    5236 {
    5237  json.WriteString("Type");
    5238  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    5239 
    5240  json.WriteString("Size");
    5241  json.WriteNumber(m_Size);
    5242 
    5243  if(m_pUserData != VMA_NULL)
    5244  {
    5245  json.WriteString("UserData");
    5246  if(IsUserDataString())
    5247  {
    5248  json.WriteString((const char*)m_pUserData);
    5249  }
    5250  else
    5251  {
    5252  json.BeginString();
    5253  json.ContinueString_Pointer(m_pUserData);
    5254  json.EndString();
    5255  }
    5256  }
    5257 
    5258  json.WriteString("CreationFrameIndex");
    5259  json.WriteNumber(m_CreationFrameIndex);
    5260 
    5261  json.WriteString("LastUseFrameIndex");
    5262  json.WriteNumber(GetLastUseFrameIndex());
    5263 
    5264  if(m_BufferImageUsage != 0)
    5265  {
    5266  json.WriteString("Usage");
    5267  json.WriteNumber(m_BufferImageUsage);
    5268  }
    5269 }
    5270 
    5271 #endif
    5272 
    5273 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    5274 {
    5275  VMA_ASSERT(IsUserDataString());
    5276  if(m_pUserData != VMA_NULL)
    5277  {
    5278  char* const oldStr = (char*)m_pUserData;
    5279  const size_t oldStrLen = strlen(oldStr);
    5280  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    5281  m_pUserData = VMA_NULL;
    5282  }
    5283 }
    5284 
    5285 void VmaAllocation_T::BlockAllocMap()
    5286 {
    5287  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    5288 
    5289  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    5290  {
    5291  ++m_MapCount;
    5292  }
    5293  else
    5294  {
    5295  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    5296  }
    5297 }
    5298 
    5299 void VmaAllocation_T::BlockAllocUnmap()
    5300 {
    5301  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    5302 
    5303  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    5304  {
    5305  --m_MapCount;
    5306  }
    5307  else
    5308  {
    5309  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    5310  }
    5311 }
    5312 
    5313 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    5314 {
    5315  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    5316 
    5317  if(m_MapCount != 0)
    5318  {
    5319  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    5320  {
    5321  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    5322  *ppData = m_DedicatedAllocation.m_pMappedData;
    5323  ++m_MapCount;
    5324  return VK_SUCCESS;
    5325  }
    5326  else
    5327  {
    5328  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    5329  return VK_ERROR_MEMORY_MAP_FAILED;
    5330  }
    5331  }
    5332  else
    5333  {
    5334  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5335  hAllocator->m_hDevice,
    5336  m_DedicatedAllocation.m_hMemory,
    5337  0, // offset
    5338  VK_WHOLE_SIZE,
    5339  0, // flags
    5340  ppData);
    5341  if(result == VK_SUCCESS)
    5342  {
    5343  m_DedicatedAllocation.m_pMappedData = *ppData;
    5344  m_MapCount = 1;
    5345  }
    5346  return result;
    5347  }
    5348 }
    5349 
    5350 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    5351 {
    5352  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    5353 
    5354  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    5355  {
    5356  --m_MapCount;
    5357  if(m_MapCount == 0)
    5358  {
    5359  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    5360  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    5361  hAllocator->m_hDevice,
    5362  m_DedicatedAllocation.m_hMemory);
    5363  }
    5364  }
    5365  else
    5366  {
    5367  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    5368  }
    5369 }
    5370 
    5371 #if VMA_STATS_STRING_ENABLED
    5372 
    5373 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    5374 {
    5375  json.BeginObject();
    5376 
    5377  json.WriteString("Blocks");
    5378  json.WriteNumber(stat.blockCount);
    5379 
    5380  json.WriteString("Allocations");
    5381  json.WriteNumber(stat.allocationCount);
    5382 
    5383  json.WriteString("UnusedRanges");
    5384  json.WriteNumber(stat.unusedRangeCount);
    5385 
    5386  json.WriteString("UsedBytes");
    5387  json.WriteNumber(stat.usedBytes);
    5388 
    5389  json.WriteString("UnusedBytes");
    5390  json.WriteNumber(stat.unusedBytes);
    5391 
    5392  if(stat.allocationCount > 1)
    5393  {
    5394  json.WriteString("AllocationSize");
    5395  json.BeginObject(true);
    5396  json.WriteString("Min");
    5397  json.WriteNumber(stat.allocationSizeMin);
    5398  json.WriteString("Avg");
    5399  json.WriteNumber(stat.allocationSizeAvg);
    5400  json.WriteString("Max");
    5401  json.WriteNumber(stat.allocationSizeMax);
    5402  json.EndObject();
    5403  }
    5404 
    5405  if(stat.unusedRangeCount > 1)
    5406  {
    5407  json.WriteString("UnusedRangeSize");
    5408  json.BeginObject(true);
    5409  json.WriteString("Min");
    5410  json.WriteNumber(stat.unusedRangeSizeMin);
    5411  json.WriteString("Avg");
    5412  json.WriteNumber(stat.unusedRangeSizeAvg);
    5413  json.WriteString("Max");
    5414  json.WriteNumber(stat.unusedRangeSizeMax);
    5415  json.EndObject();
    5416  }
    5417 
    5418  json.EndObject();
    5419 }
    5420 
    5421 #endif // #if VMA_STATS_STRING_ENABLED
    5422 
    5423 struct VmaSuballocationItemSizeLess
    5424 {
    5425  bool operator()(
    5426  const VmaSuballocationList::iterator lhs,
    5427  const VmaSuballocationList::iterator rhs) const
    5428  {
    5429  return lhs->size < rhs->size;
    5430  }
    5431  bool operator()(
    5432  const VmaSuballocationList::iterator lhs,
    5433  VkDeviceSize rhsSize) const
    5434  {
    5435  return lhs->size < rhsSize;
    5436  }
    5437 };
    5438 
    5440 // class VmaBlockMetadata
    5441 
    5442 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    5443  m_Size(0),
    5444  m_FreeCount(0),
    5445  m_SumFreeSize(0),
    5446  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    5447  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    5448 {
    5449 }
    5450 
    5451 VmaBlockMetadata::~VmaBlockMetadata()
    5452 {
    5453 }
    5454 
    5455 void VmaBlockMetadata::Init(VkDeviceSize size)
    5456 {
    5457  m_Size = size;
    5458  m_FreeCount = 1;
    5459  m_SumFreeSize = size;
    5460 
    5461  VmaSuballocation suballoc = {};
    5462  suballoc.offset = 0;
    5463  suballoc.size = size;
    5464  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5465  suballoc.hAllocation = VK_NULL_HANDLE;
    5466 
    5467  m_Suballocations.push_back(suballoc);
    5468  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    5469  --suballocItem;
    5470  m_FreeSuballocationsBySize.push_back(suballocItem);
    5471 }
    5472 
    5473 bool VmaBlockMetadata::Validate() const
    5474 {
    5475  if(m_Suballocations.empty())
    5476  {
    5477  return false;
    5478  }
    5479 
    5480  // Expected offset of new suballocation as calculates from previous ones.
    5481  VkDeviceSize calculatedOffset = 0;
    5482  // Expected number of free suballocations as calculated from traversing their list.
    5483  uint32_t calculatedFreeCount = 0;
    5484  // Expected sum size of free suballocations as calculated from traversing their list.
    5485  VkDeviceSize calculatedSumFreeSize = 0;
    5486  // Expected number of free suballocations that should be registered in
    5487  // m_FreeSuballocationsBySize calculated from traversing their list.
    5488  size_t freeSuballocationsToRegister = 0;
    5489  // True if previous visisted suballocation was free.
    5490  bool prevFree = false;
    5491 
    5492  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5493  suballocItem != m_Suballocations.cend();
    5494  ++suballocItem)
    5495  {
    5496  const VmaSuballocation& subAlloc = *suballocItem;
    5497 
    5498  // Actual offset of this suballocation doesn't match expected one.
    5499  if(subAlloc.offset != calculatedOffset)
    5500  {
    5501  return false;
    5502  }
    5503 
    5504  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5505  // Two adjacent free suballocations are invalid. They should be merged.
    5506  if(prevFree && currFree)
    5507  {
    5508  return false;
    5509  }
    5510 
    5511  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    5512  {
    5513  return false;
    5514  }
    5515 
    5516  if(currFree)
    5517  {
    5518  calculatedSumFreeSize += subAlloc.size;
    5519  ++calculatedFreeCount;
    5520  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5521  {
    5522  ++freeSuballocationsToRegister;
    5523  }
    5524  }
    5525  else
    5526  {
    5527  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
    5528  {
    5529  return false;
    5530  }
    5531  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
    5532  {
    5533  return false;
    5534  }
    5535  }
    5536 
    5537  calculatedOffset += subAlloc.size;
    5538  prevFree = currFree;
    5539  }
    5540 
    5541  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    5542  // match expected one.
    5543  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    5544  {
    5545  return false;
    5546  }
    5547 
    5548  VkDeviceSize lastSize = 0;
    5549  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    5550  {
    5551  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    5552 
    5553  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    5554  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5555  {
    5556  return false;
    5557  }
    5558  // They must be sorted by size ascending.
    5559  if(suballocItem->size < lastSize)
    5560  {
    5561  return false;
    5562  }
    5563 
    5564  lastSize = suballocItem->size;
    5565  }
    5566 
    5567  // Check if totals match calculacted values.
    5568  if(!ValidateFreeSuballocationList() ||
    5569  (calculatedOffset != m_Size) ||
    5570  (calculatedSumFreeSize != m_SumFreeSize) ||
    5571  (calculatedFreeCount != m_FreeCount))
    5572  {
    5573  return false;
    5574  }
    5575 
    5576  return true;
    5577 }
    5578 
    5579 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    5580 {
    5581  if(!m_FreeSuballocationsBySize.empty())
    5582  {
    5583  return m_FreeSuballocationsBySize.back()->size;
    5584  }
    5585  else
    5586  {
    5587  return 0;
    5588  }
    5589 }
    5590 
    5591 bool VmaBlockMetadata::IsEmpty() const
    5592 {
    5593  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    5594 }
    5595 
    5596 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    5597 {
    5598  outInfo.blockCount = 1;
    5599 
    5600  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5601  outInfo.allocationCount = rangeCount - m_FreeCount;
    5602  outInfo.unusedRangeCount = m_FreeCount;
    5603 
    5604  outInfo.unusedBytes = m_SumFreeSize;
    5605  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    5606 
    5607  outInfo.allocationSizeMin = UINT64_MAX;
    5608  outInfo.allocationSizeMax = 0;
    5609  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5610  outInfo.unusedRangeSizeMax = 0;
    5611 
    5612  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5613  suballocItem != m_Suballocations.cend();
    5614  ++suballocItem)
    5615  {
    5616  const VmaSuballocation& suballoc = *suballocItem;
    5617  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    5618  {
    5619  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    5620  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    5621  }
    5622  else
    5623  {
    5624  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    5625  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    5626  }
    5627  }
    5628 }
    5629 
    5630 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    5631 {
    5632  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5633 
    5634  inoutStats.size += m_Size;
    5635  inoutStats.unusedSize += m_SumFreeSize;
    5636  inoutStats.allocationCount += rangeCount - m_FreeCount;
    5637  inoutStats.unusedRangeCount += m_FreeCount;
    5638  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    5639 }
    5640 
    5641 #if VMA_STATS_STRING_ENABLED
    5642 
    5643 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    5644 {
    5645  json.BeginObject();
    5646 
    5647  json.WriteString("TotalBytes");
    5648  json.WriteNumber(m_Size);
    5649 
    5650  json.WriteString("UnusedBytes");
    5651  json.WriteNumber(m_SumFreeSize);
    5652 
    5653  json.WriteString("Allocations");
    5654  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
    5655 
    5656  json.WriteString("UnusedRanges");
    5657  json.WriteNumber(m_FreeCount);
    5658 
    5659  json.WriteString("Suballocations");
    5660  json.BeginArray();
    5661  size_t i = 0;
    5662  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5663  suballocItem != m_Suballocations.cend();
    5664  ++suballocItem, ++i)
    5665  {
    5666  json.BeginObject(true);
    5667 
    5668  json.WriteString("Offset");
    5669  json.WriteNumber(suballocItem->offset);
    5670 
    5671  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5672  {
    5673  json.WriteString("Type");
    5674  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    5675 
    5676  json.WriteString("Size");
    5677  json.WriteNumber(suballocItem->size);
    5678  }
    5679  else
    5680  {
    5681  suballocItem->hAllocation->PrintParameters(json);
    5682  }
    5683 
    5684  json.EndObject();
    5685  }
    5686  json.EndArray();
    5687 
    5688  json.EndObject();
    5689 }
    5690 
    5691 #endif // #if VMA_STATS_STRING_ENABLED
    5692 
    5693 /*
    5694 How many suitable free suballocations to analyze before choosing best one.
    5695 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    5696  be chosen.
    5697 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    5698  suballocations will be analized and best one will be chosen.
    5699 - Any other value is also acceptable.
    5700 */
    5701 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    5702 
    5703 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    5704 {
    5705  VMA_ASSERT(IsEmpty());
    5706  pAllocationRequest->offset = 0;
    5707  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    5708  pAllocationRequest->sumItemSize = 0;
    5709  pAllocationRequest->item = m_Suballocations.begin();
    5710  pAllocationRequest->itemsToMakeLostCount = 0;
    5711 }
    5712 
    5713 bool VmaBlockMetadata::CreateAllocationRequest(
    5714  uint32_t currentFrameIndex,
    5715  uint32_t frameInUseCount,
    5716  VkDeviceSize bufferImageGranularity,
    5717  VkDeviceSize allocSize,
    5718  VkDeviceSize allocAlignment,
    5719  VmaSuballocationType allocType,
    5720  bool canMakeOtherLost,
    5721  VmaAllocationRequest* pAllocationRequest)
    5722 {
    5723  VMA_ASSERT(allocSize > 0);
    5724  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5725  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    5726  VMA_HEAVY_ASSERT(Validate());
    5727 
    5728  // There is not enough total free space in this block to fullfill the request: Early return.
    5729  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    5730  {
    5731  return false;
    5732  }
    5733 
    5734  // New algorithm, efficiently searching freeSuballocationsBySize.
    5735  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    5736  if(freeSuballocCount > 0)
    5737  {
    5738  if(VMA_BEST_FIT)
    5739  {
    5740  // Find first free suballocation with size not less than allocSize.
    5741  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5742  m_FreeSuballocationsBySize.data(),
    5743  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    5744  allocSize,
    5745  VmaSuballocationItemSizeLess());
    5746  size_t index = it - m_FreeSuballocationsBySize.data();
    5747  for(; index < freeSuballocCount; ++index)
    5748  {
    5749  if(CheckAllocation(
    5750  currentFrameIndex,
    5751  frameInUseCount,
    5752  bufferImageGranularity,
    5753  allocSize,
    5754  allocAlignment,
    5755  allocType,
    5756  m_FreeSuballocationsBySize[index],
    5757  false, // canMakeOtherLost
    5758  &pAllocationRequest->offset,
    5759  &pAllocationRequest->itemsToMakeLostCount,
    5760  &pAllocationRequest->sumFreeSize,
    5761  &pAllocationRequest->sumItemSize))
    5762  {
    5763  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5764  return true;
    5765  }
    5766  }
    5767  }
    5768  else
    5769  {
    5770  // Search staring from biggest suballocations.
    5771  for(size_t index = freeSuballocCount; index--; )
    5772  {
    5773  if(CheckAllocation(
    5774  currentFrameIndex,
    5775  frameInUseCount,
    5776  bufferImageGranularity,
    5777  allocSize,
    5778  allocAlignment,
    5779  allocType,
    5780  m_FreeSuballocationsBySize[index],
    5781  false, // canMakeOtherLost
    5782  &pAllocationRequest->offset,
    5783  &pAllocationRequest->itemsToMakeLostCount,
    5784  &pAllocationRequest->sumFreeSize,
    5785  &pAllocationRequest->sumItemSize))
    5786  {
    5787  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5788  return true;
    5789  }
    5790  }
    5791  }
    5792  }
    5793 
    5794  if(canMakeOtherLost)
    5795  {
    5796  // Brute-force algorithm. TODO: Come up with something better.
    5797 
    5798  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    5799  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    5800 
    5801  VmaAllocationRequest tmpAllocRequest = {};
    5802  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    5803  suballocIt != m_Suballocations.end();
    5804  ++suballocIt)
    5805  {
    5806  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    5807  suballocIt->hAllocation->CanBecomeLost())
    5808  {
    5809  if(CheckAllocation(
    5810  currentFrameIndex,
    5811  frameInUseCount,
    5812  bufferImageGranularity,
    5813  allocSize,
    5814  allocAlignment,
    5815  allocType,
    5816  suballocIt,
    5817  canMakeOtherLost,
    5818  &tmpAllocRequest.offset,
    5819  &tmpAllocRequest.itemsToMakeLostCount,
    5820  &tmpAllocRequest.sumFreeSize,
    5821  &tmpAllocRequest.sumItemSize))
    5822  {
    5823  tmpAllocRequest.item = suballocIt;
    5824 
    5825  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    5826  {
    5827  *pAllocationRequest = tmpAllocRequest;
    5828  }
    5829  }
    5830  }
    5831  }
    5832 
    5833  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    5834  {
    5835  return true;
    5836  }
    5837  }
    5838 
    5839  return false;
    5840 }
    5841 
    5842 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    5843  uint32_t currentFrameIndex,
    5844  uint32_t frameInUseCount,
    5845  VmaAllocationRequest* pAllocationRequest)
    5846 {
    5847  while(pAllocationRequest->itemsToMakeLostCount > 0)
    5848  {
    5849  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    5850  {
    5851  ++pAllocationRequest->item;
    5852  }
    5853  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5854  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    5855  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    5856  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5857  {
    5858  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    5859  --pAllocationRequest->itemsToMakeLostCount;
    5860  }
    5861  else
    5862  {
    5863  return false;
    5864  }
    5865  }
    5866 
    5867  VMA_HEAVY_ASSERT(Validate());
    5868  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5869  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5870 
    5871  return true;
    5872 }
    5873 
    5874 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5875 {
    5876  uint32_t lostAllocationCount = 0;
    5877  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    5878  it != m_Suballocations.end();
    5879  ++it)
    5880  {
    5881  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    5882  it->hAllocation->CanBecomeLost() &&
    5883  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5884  {
    5885  it = FreeSuballocation(it);
    5886  ++lostAllocationCount;
    5887  }
    5888  }
    5889  return lostAllocationCount;
    5890 }
    5891 
    5892 void VmaBlockMetadata::Alloc(
    5893  const VmaAllocationRequest& request,
    5894  VmaSuballocationType type,
    5895  VkDeviceSize allocSize,
    5896  VmaAllocation hAllocation)
    5897 {
    5898  VMA_ASSERT(request.item != m_Suballocations.end());
    5899  VmaSuballocation& suballoc = *request.item;
    5900  // Given suballocation is a free block.
    5901  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5902  // Given offset is inside this suballocation.
    5903  VMA_ASSERT(request.offset >= suballoc.offset);
    5904  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    5905  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    5906  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    5907 
    5908  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    5909  // it to become used.
    5910  UnregisterFreeSuballocation(request.item);
    5911 
    5912  suballoc.offset = request.offset;
    5913  suballoc.size = allocSize;
    5914  suballoc.type = type;
    5915  suballoc.hAllocation = hAllocation;
    5916 
    5917  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    5918  if(paddingEnd)
    5919  {
    5920  VmaSuballocation paddingSuballoc = {};
    5921  paddingSuballoc.offset = request.offset + allocSize;
    5922  paddingSuballoc.size = paddingEnd;
    5923  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5924  VmaSuballocationList::iterator next = request.item;
    5925  ++next;
    5926  const VmaSuballocationList::iterator paddingEndItem =
    5927  m_Suballocations.insert(next, paddingSuballoc);
    5928  RegisterFreeSuballocation(paddingEndItem);
    5929  }
    5930 
    5931  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    5932  if(paddingBegin)
    5933  {
    5934  VmaSuballocation paddingSuballoc = {};
    5935  paddingSuballoc.offset = request.offset - paddingBegin;
    5936  paddingSuballoc.size = paddingBegin;
    5937  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5938  const VmaSuballocationList::iterator paddingBeginItem =
    5939  m_Suballocations.insert(request.item, paddingSuballoc);
    5940  RegisterFreeSuballocation(paddingBeginItem);
    5941  }
    5942 
    5943  // Update totals.
    5944  m_FreeCount = m_FreeCount - 1;
    5945  if(paddingBegin > 0)
    5946  {
    5947  ++m_FreeCount;
    5948  }
    5949  if(paddingEnd > 0)
    5950  {
    5951  ++m_FreeCount;
    5952  }
    5953  m_SumFreeSize -= allocSize;
    5954 }
    5955 
    5956 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    5957 {
    5958  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5959  suballocItem != m_Suballocations.end();
    5960  ++suballocItem)
    5961  {
    5962  VmaSuballocation& suballoc = *suballocItem;
    5963  if(suballoc.hAllocation == allocation)
    5964  {
    5965  FreeSuballocation(suballocItem);
    5966  VMA_HEAVY_ASSERT(Validate());
    5967  return;
    5968  }
    5969  }
    5970  VMA_ASSERT(0 && "Not found!");
    5971 }
    5972 
    5973 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
    5974 {
    5975  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5976  suballocItem != m_Suballocations.end();
    5977  ++suballocItem)
    5978  {
    5979  VmaSuballocation& suballoc = *suballocItem;
    5980  if(suballoc.offset == offset)
    5981  {
    5982  FreeSuballocation(suballocItem);
    5983  return;
    5984  }
    5985  }
    5986  VMA_ASSERT(0 && "Not found!");
    5987 }
    5988 
    5989 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    5990 {
    5991  VkDeviceSize lastSize = 0;
    5992  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    5993  {
    5994  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    5995 
    5996  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    5997  {
    5998  VMA_ASSERT(0);
    5999  return false;
    6000  }
    6001  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6002  {
    6003  VMA_ASSERT(0);
    6004  return false;
    6005  }
    6006  if(it->size < lastSize)
    6007  {
    6008  VMA_ASSERT(0);
    6009  return false;
    6010  }
    6011 
    6012  lastSize = it->size;
    6013  }
    6014  return true;
    6015 }
    6016 
    6017 bool VmaBlockMetadata::CheckAllocation(
    6018  uint32_t currentFrameIndex,
    6019  uint32_t frameInUseCount,
    6020  VkDeviceSize bufferImageGranularity,
    6021  VkDeviceSize allocSize,
    6022  VkDeviceSize allocAlignment,
    6023  VmaSuballocationType allocType,
    6024  VmaSuballocationList::const_iterator suballocItem,
    6025  bool canMakeOtherLost,
    6026  VkDeviceSize* pOffset,
    6027  size_t* itemsToMakeLostCount,
    6028  VkDeviceSize* pSumFreeSize,
    6029  VkDeviceSize* pSumItemSize) const
    6030 {
    6031  VMA_ASSERT(allocSize > 0);
    6032  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6033  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    6034  VMA_ASSERT(pOffset != VMA_NULL);
    6035 
    6036  *itemsToMakeLostCount = 0;
    6037  *pSumFreeSize = 0;
    6038  *pSumItemSize = 0;
    6039 
    6040  if(canMakeOtherLost)
    6041  {
    6042  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6043  {
    6044  *pSumFreeSize = suballocItem->size;
    6045  }
    6046  else
    6047  {
    6048  if(suballocItem->hAllocation->CanBecomeLost() &&
    6049  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    6050  {
    6051  ++*itemsToMakeLostCount;
    6052  *pSumItemSize = suballocItem->size;
    6053  }
    6054  else
    6055  {
    6056  return false;
    6057  }
    6058  }
    6059 
    6060  // Remaining size is too small for this request: Early return.
    6061  if(m_Size - suballocItem->offset < allocSize)
    6062  {
    6063  return false;
    6064  }
    6065 
    6066  // Start from offset equal to beginning of this suballocation.
    6067  *pOffset = suballocItem->offset;
    6068 
    6069  // Apply VMA_DEBUG_MARGIN at the beginning.
    6070  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    6071  {
    6072  *pOffset += VMA_DEBUG_MARGIN;
    6073  }
    6074 
    6075  // Apply alignment.
    6076  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    6077 
    6078  // Check previous suballocations for BufferImageGranularity conflicts.
    6079  // Make bigger alignment if necessary.
    6080  if(bufferImageGranularity > 1)
    6081  {
    6082  bool bufferImageGranularityConflict = false;
    6083  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    6084  while(prevSuballocItem != m_Suballocations.cbegin())
    6085  {
    6086  --prevSuballocItem;
    6087  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    6088  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    6089  {
    6090  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    6091  {
    6092  bufferImageGranularityConflict = true;
    6093  break;
    6094  }
    6095  }
    6096  else
    6097  // Already on previous page.
    6098  break;
    6099  }
    6100  if(bufferImageGranularityConflict)
    6101  {
    6102  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    6103  }
    6104  }
    6105 
    6106  // Now that we have final *pOffset, check if we are past suballocItem.
    6107  // If yes, return false - this function should be called for another suballocItem as starting point.
    6108  if(*pOffset >= suballocItem->offset + suballocItem->size)
    6109  {
    6110  return false;
    6111  }
    6112 
    6113  // Calculate padding at the beginning based on current offset.
    6114  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    6115 
    6116  // Calculate required margin at the end if this is not last suballocation.
    6117  VmaSuballocationList::const_iterator next = suballocItem;
    6118  ++next;
    6119  const VkDeviceSize requiredEndMargin =
    6120  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    6121 
    6122  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    6123  // Another early return check.
    6124  if(suballocItem->offset + totalSize > m_Size)
    6125  {
    6126  return false;
    6127  }
    6128 
    6129  // Advance lastSuballocItem until desired size is reached.
    6130  // Update itemsToMakeLostCount.
    6131  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    6132  if(totalSize > suballocItem->size)
    6133  {
    6134  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    6135  while(remainingSize > 0)
    6136  {
    6137  ++lastSuballocItem;
    6138  if(lastSuballocItem == m_Suballocations.cend())
    6139  {
    6140  return false;
    6141  }
    6142  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6143  {
    6144  *pSumFreeSize += lastSuballocItem->size;
    6145  }
    6146  else
    6147  {
    6148  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    6149  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    6150  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    6151  {
    6152  ++*itemsToMakeLostCount;
    6153  *pSumItemSize += lastSuballocItem->size;
    6154  }
    6155  else
    6156  {
    6157  return false;
    6158  }
    6159  }
    6160  remainingSize = (lastSuballocItem->size < remainingSize) ?
    6161  remainingSize - lastSuballocItem->size : 0;
    6162  }
    6163  }
    6164 
    6165  // Check next suballocations for BufferImageGranularity conflicts.
    6166  // If conflict exists, we must mark more allocations lost or fail.
    6167  if(bufferImageGranularity > 1)
    6168  {
    6169  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    6170  ++nextSuballocItem;
    6171  while(nextSuballocItem != m_Suballocations.cend())
    6172  {
    6173  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    6174  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    6175  {
    6176  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    6177  {
    6178  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    6179  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    6180  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    6181  {
    6182  ++*itemsToMakeLostCount;
    6183  }
    6184  else
    6185  {
    6186  return false;
    6187  }
    6188  }
    6189  }
    6190  else
    6191  {
    6192  // Already on next page.
    6193  break;
    6194  }
    6195  ++nextSuballocItem;
    6196  }
    6197  }
    6198  }
    6199  else
    6200  {
    6201  const VmaSuballocation& suballoc = *suballocItem;
    6202  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6203 
    6204  *pSumFreeSize = suballoc.size;
    6205 
    6206  // Size of this suballocation is too small for this request: Early return.
    6207  if(suballoc.size < allocSize)
    6208  {
    6209  return false;
    6210  }
    6211 
    6212  // Start from offset equal to beginning of this suballocation.
    6213  *pOffset = suballoc.offset;
    6214 
    6215  // Apply VMA_DEBUG_MARGIN at the beginning.
    6216  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    6217  {
    6218  *pOffset += VMA_DEBUG_MARGIN;
    6219  }
    6220 
    6221  // Apply alignment.
    6222  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    6223 
    6224  // Check previous suballocations for BufferImageGranularity conflicts.
    6225  // Make bigger alignment if necessary.
    6226  if(bufferImageGranularity > 1)
    6227  {
    6228  bool bufferImageGranularityConflict = false;
    6229  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    6230  while(prevSuballocItem != m_Suballocations.cbegin())
    6231  {
    6232  --prevSuballocItem;
    6233  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    6234  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    6235  {
    6236  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    6237  {
    6238  bufferImageGranularityConflict = true;
    6239  break;
    6240  }
    6241  }
    6242  else
    6243  // Already on previous page.
    6244  break;
    6245  }
    6246  if(bufferImageGranularityConflict)
    6247  {
    6248  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    6249  }
    6250  }
    6251 
    6252  // Calculate padding at the beginning based on current offset.
    6253  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    6254 
    6255  // Calculate required margin at the end if this is not last suballocation.
    6256  VmaSuballocationList::const_iterator next = suballocItem;
    6257  ++next;
    6258  const VkDeviceSize requiredEndMargin =
    6259  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    6260 
    6261  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    6262  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    6263  {
    6264  return false;
    6265  }
    6266 
    6267  // Check next suballocations for BufferImageGranularity conflicts.
    6268  // If conflict exists, allocation cannot be made here.
    6269  if(bufferImageGranularity > 1)
    6270  {
    6271  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    6272  ++nextSuballocItem;
    6273  while(nextSuballocItem != m_Suballocations.cend())
    6274  {
    6275  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    6276  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    6277  {
    6278  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    6279  {
    6280  return false;
    6281  }
    6282  }
    6283  else
    6284  {
    6285  // Already on next page.
    6286  break;
    6287  }
    6288  ++nextSuballocItem;
    6289  }
    6290  }
    6291  }
    6292 
    6293  // All tests passed: Success. pOffset is already filled.
    6294  return true;
    6295 }
    6296 
    6297 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    6298 {
    6299  VMA_ASSERT(item != m_Suballocations.end());
    6300  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6301 
    6302  VmaSuballocationList::iterator nextItem = item;
    6303  ++nextItem;
    6304  VMA_ASSERT(nextItem != m_Suballocations.end());
    6305  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6306 
    6307  item->size += nextItem->size;
    6308  --m_FreeCount;
    6309  m_Suballocations.erase(nextItem);
    6310 }
    6311 
    6312 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    6313 {
    6314  // Change this suballocation to be marked as free.
    6315  VmaSuballocation& suballoc = *suballocItem;
    6316  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6317  suballoc.hAllocation = VK_NULL_HANDLE;
    6318 
    6319  // Update totals.
    6320  ++m_FreeCount;
    6321  m_SumFreeSize += suballoc.size;
    6322 
    6323  // Merge with previous and/or next suballocation if it's also free.
    6324  bool mergeWithNext = false;
    6325  bool mergeWithPrev = false;
    6326 
    6327  VmaSuballocationList::iterator nextItem = suballocItem;
    6328  ++nextItem;
    6329  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    6330  {
    6331  mergeWithNext = true;
    6332  }
    6333 
    6334  VmaSuballocationList::iterator prevItem = suballocItem;
    6335  if(suballocItem != m_Suballocations.begin())
    6336  {
    6337  --prevItem;
    6338  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6339  {
    6340  mergeWithPrev = true;
    6341  }
    6342  }
    6343 
    6344  if(mergeWithNext)
    6345  {
    6346  UnregisterFreeSuballocation(nextItem);
    6347  MergeFreeWithNext(suballocItem);
    6348  }
    6349 
    6350  if(mergeWithPrev)
    6351  {
    6352  UnregisterFreeSuballocation(prevItem);
    6353  MergeFreeWithNext(prevItem);
    6354  RegisterFreeSuballocation(prevItem);
    6355  return prevItem;
    6356  }
    6357  else
    6358  {
    6359  RegisterFreeSuballocation(suballocItem);
    6360  return suballocItem;
    6361  }
    6362 }
    6363 
    6364 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    6365 {
    6366  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6367  VMA_ASSERT(item->size > 0);
    6368 
    6369  // You may want to enable this validation at the beginning or at the end of
    6370  // this function, depending on what do you want to check.
    6371  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6372 
    6373  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6374  {
    6375  if(m_FreeSuballocationsBySize.empty())
    6376  {
    6377  m_FreeSuballocationsBySize.push_back(item);
    6378  }
    6379  else
    6380  {
    6381  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    6382  }
    6383  }
    6384 
    6385  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6386 }
    6387 
    6388 
    6389 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    6390 {
    6391  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    6392  VMA_ASSERT(item->size > 0);
    6393 
    6394  // You may want to enable this validation at the beginning or at the end of
    6395  // this function, depending on what do you want to check.
    6396  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6397 
    6398  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6399  {
    6400  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6401  m_FreeSuballocationsBySize.data(),
    6402  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    6403  item,
    6404  VmaSuballocationItemSizeLess());
    6405  for(size_t index = it - m_FreeSuballocationsBySize.data();
    6406  index < m_FreeSuballocationsBySize.size();
    6407  ++index)
    6408  {
    6409  if(m_FreeSuballocationsBySize[index] == item)
    6410  {
    6411  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    6412  return;
    6413  }
    6414  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    6415  }
    6416  VMA_ASSERT(0 && "Not found.");
    6417  }
    6418 
    6419  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    6420 }
    6421 
    6423 // class VmaDeviceMemoryBlock
    6424 
    6425 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    6426  m_Metadata(hAllocator),
    6427  m_MemoryTypeIndex(UINT32_MAX),
    6428  m_Id(0),
    6429  m_hMemory(VK_NULL_HANDLE),
    6430  m_MapCount(0),
    6431  m_pMappedData(VMA_NULL)
    6432 {
    6433 }
    6434 
    6435 void VmaDeviceMemoryBlock::Init(
    6436  uint32_t newMemoryTypeIndex,
    6437  VkDeviceMemory newMemory,
    6438  VkDeviceSize newSize,
    6439  uint32_t id)
    6440 {
    6441  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    6442 
    6443  m_MemoryTypeIndex = newMemoryTypeIndex;
    6444  m_Id = id;
    6445  m_hMemory = newMemory;
    6446 
    6447  m_Metadata.Init(newSize);
    6448 }
    6449 
    6450 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    6451 {
    6452  // This is the most important assert in the entire library.
    6453  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    6454  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    6455 
    6456  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    6457  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    6458  m_hMemory = VK_NULL_HANDLE;
    6459 }
    6460 
    6461 bool VmaDeviceMemoryBlock::Validate() const
    6462 {
    6463  if((m_hMemory == VK_NULL_HANDLE) ||
    6464  (m_Metadata.GetSize() == 0))
    6465  {
    6466  return false;
    6467  }
    6468 
    6469  return m_Metadata.Validate();
    6470 }
    6471 
    6472 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    6473 {
    6474  if(count == 0)
    6475  {
    6476  return VK_SUCCESS;
    6477  }
    6478 
    6479  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6480  if(m_MapCount != 0)
    6481  {
    6482  m_MapCount += count;
    6483  VMA_ASSERT(m_pMappedData != VMA_NULL);
    6484  if(ppData != VMA_NULL)
    6485  {
    6486  *ppData = m_pMappedData;
    6487  }
    6488  return VK_SUCCESS;
    6489  }
    6490  else
    6491  {
    6492  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6493  hAllocator->m_hDevice,
    6494  m_hMemory,
    6495  0, // offset
    6496  VK_WHOLE_SIZE,
    6497  0, // flags
    6498  &m_pMappedData);
    6499  if(result == VK_SUCCESS)
    6500  {
    6501  if(ppData != VMA_NULL)
    6502  {
    6503  *ppData = m_pMappedData;
    6504  }
    6505  m_MapCount = count;
    6506  }
    6507  return result;
    6508  }
    6509 }
    6510 
    6511 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    6512 {
    6513  if(count == 0)
    6514  {
    6515  return;
    6516  }
    6517 
    6518  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6519  if(m_MapCount >= count)
    6520  {
    6521  m_MapCount -= count;
    6522  if(m_MapCount == 0)
    6523  {
    6524  m_pMappedData = VMA_NULL;
    6525  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    6526  }
    6527  }
    6528  else
    6529  {
    6530  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    6531  }
    6532 }
    6533 
    6534 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    6535  const VmaAllocator hAllocator,
    6536  const VmaAllocation hAllocation,
    6537  VkBuffer hBuffer)
    6538 {
    6539  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    6540  hAllocation->GetBlock() == this);
    6541  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    6542  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6543  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    6544  hAllocator->m_hDevice,
    6545  hBuffer,
    6546  m_hMemory,
    6547  hAllocation->GetOffset());
    6548 }
    6549 
    6550 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    6551  const VmaAllocator hAllocator,
    6552  const VmaAllocation hAllocation,
    6553  VkImage hImage)
    6554 {
    6555  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    6556  hAllocation->GetBlock() == this);
    6557  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    6558  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    6559  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    6560  hAllocator->m_hDevice,
    6561  hImage,
    6562  m_hMemory,
    6563  hAllocation->GetOffset());
    6564 }
    6565 
    6566 static void InitStatInfo(VmaStatInfo& outInfo)
    6567 {
    6568  memset(&outInfo, 0, sizeof(outInfo));
    6569  outInfo.allocationSizeMin = UINT64_MAX;
    6570  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6571 }
    6572 
    6573 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    6574 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    6575 {
    6576  inoutInfo.blockCount += srcInfo.blockCount;
    6577  inoutInfo.allocationCount += srcInfo.allocationCount;
    6578  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    6579  inoutInfo.usedBytes += srcInfo.usedBytes;
    6580  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    6581  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    6582  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    6583  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    6584  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    6585 }
    6586 
    6587 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    6588 {
    6589  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    6590  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    6591  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    6592  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    6593 }
    6594 
    6595 VmaPool_T::VmaPool_T(
    6596  VmaAllocator hAllocator,
    6597  const VmaPoolCreateInfo& createInfo) :
    6598  m_BlockVector(
    6599  hAllocator,
    6600  createInfo.memoryTypeIndex,
    6601  createInfo.blockSize,
    6602  createInfo.minBlockCount,
    6603  createInfo.maxBlockCount,
    6604  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    6605  createInfo.frameInUseCount,
    6606  true), // isCustomPool
    6607  m_Id(0)
    6608 {
    6609 }
    6610 
    6611 VmaPool_T::~VmaPool_T()
    6612 {
    6613 }
    6614 
    6615 #if VMA_STATS_STRING_ENABLED
    6616 
    6617 #endif // #if VMA_STATS_STRING_ENABLED
    6618 
    6619 VmaBlockVector::VmaBlockVector(
    6620  VmaAllocator hAllocator,
    6621  uint32_t memoryTypeIndex,
    6622  VkDeviceSize preferredBlockSize,
    6623  size_t minBlockCount,
    6624  size_t maxBlockCount,
    6625  VkDeviceSize bufferImageGranularity,
    6626  uint32_t frameInUseCount,
    6627  bool isCustomPool) :
    6628  m_hAllocator(hAllocator),
    6629  m_MemoryTypeIndex(memoryTypeIndex),
    6630  m_PreferredBlockSize(preferredBlockSize),
    6631  m_MinBlockCount(minBlockCount),
    6632  m_MaxBlockCount(maxBlockCount),
    6633  m_BufferImageGranularity(bufferImageGranularity),
    6634  m_FrameInUseCount(frameInUseCount),
    6635  m_IsCustomPool(isCustomPool),
    6636  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    6637  m_HasEmptyBlock(false),
    6638  m_pDefragmentator(VMA_NULL),
    6639  m_NextBlockId(0)
    6640 {
    6641 }
    6642 
    6643 VmaBlockVector::~VmaBlockVector()
    6644 {
    6645  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    6646 
    6647  for(size_t i = m_Blocks.size(); i--; )
    6648  {
    6649  m_Blocks[i]->Destroy(m_hAllocator);
    6650  vma_delete(m_hAllocator, m_Blocks[i]);
    6651  }
    6652 }
    6653 
    6654 VkResult VmaBlockVector::CreateMinBlocks()
    6655 {
    6656  for(size_t i = 0; i < m_MinBlockCount; ++i)
    6657  {
    6658  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    6659  if(res != VK_SUCCESS)
    6660  {
    6661  return res;
    6662  }
    6663  }
    6664  return VK_SUCCESS;
    6665 }
    6666 
    6667 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    6668 {
    6669  pStats->size = 0;
    6670  pStats->unusedSize = 0;
    6671  pStats->allocationCount = 0;
    6672  pStats->unusedRangeCount = 0;
    6673  pStats->unusedRangeSizeMax = 0;
    6674 
    6675  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6676 
    6677  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6678  {
    6679  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6680  VMA_ASSERT(pBlock);
    6681  VMA_HEAVY_ASSERT(pBlock->Validate());
    6682  pBlock->m_Metadata.AddPoolStats(*pStats);
    6683  }
    6684 }
    6685 
    6686 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    6687 
    6688 VkResult VmaBlockVector::Allocate(
    6689  VmaPool hCurrentPool,
    6690  uint32_t currentFrameIndex,
    6691  VkDeviceSize size,
    6692  VkDeviceSize alignment,
    6693  const VmaAllocationCreateInfo& createInfo,
    6694  VmaSuballocationType suballocType,
    6695  VmaAllocation* pAllocation)
    6696 {
    6697  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    6698  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    6699 
    6700  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6701 
    6702  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    6703  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6704  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6705  {
    6706  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6707  VMA_ASSERT(pCurrBlock);
    6708  VmaAllocationRequest currRequest = {};
    6709  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6710  currentFrameIndex,
    6711  m_FrameInUseCount,
    6712  m_BufferImageGranularity,
    6713  size,
    6714  alignment,
    6715  suballocType,
    6716  false, // canMakeOtherLost
    6717  &currRequest))
    6718  {
    6719  // Allocate from pCurrBlock.
    6720  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    6721 
    6722  if(mapped)
    6723  {
    6724  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
    6725  if(res != VK_SUCCESS)
    6726  {
    6727  return res;
    6728  }
    6729  }
    6730 
    6731  // We no longer have an empty Allocation.
    6732  if(pCurrBlock->m_Metadata.IsEmpty())
    6733  {
    6734  m_HasEmptyBlock = false;
    6735  }
    6736 
    6737  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6738  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, size, *pAllocation);
    6739  (*pAllocation)->InitBlockAllocation(
    6740  hCurrentPool,
    6741  pCurrBlock,
    6742  currRequest.offset,
    6743  alignment,
    6744  size,
    6745  suballocType,
    6746  mapped,
    6747  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6748  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    6749  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6750  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6751  return VK_SUCCESS;
    6752  }
    6753  }
    6754 
    6755  const bool canCreateNewBlock =
    6756  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    6757  (m_Blocks.size() < m_MaxBlockCount);
    6758 
    6759  // 2. Try to create new block.
    6760  if(canCreateNewBlock)
    6761  {
    6762  // Calculate optimal size for new block.
    6763  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    6764  uint32_t newBlockSizeShift = 0;
    6765  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    6766 
    6767  // Allocating blocks of other sizes is allowed only in default pools.
    6768  // In custom pools block size is fixed.
    6769  if(m_IsCustomPool == false)
    6770  {
    6771  // Allocate 1/8, 1/4, 1/2 as first blocks.
    6772  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    6773  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    6774  {
    6775  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6776  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    6777  {
    6778  newBlockSize = smallerNewBlockSize;
    6779  ++newBlockSizeShift;
    6780  }
    6781  else
    6782  {
    6783  break;
    6784  }
    6785  }
    6786  }
    6787 
    6788  size_t newBlockIndex = 0;
    6789  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    6790  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    6791  if(m_IsCustomPool == false)
    6792  {
    6793  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    6794  {
    6795  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6796  if(smallerNewBlockSize >= size)
    6797  {
    6798  newBlockSize = smallerNewBlockSize;
    6799  ++newBlockSizeShift;
    6800  res = CreateBlock(newBlockSize, &newBlockIndex);
    6801  }
    6802  else
    6803  {
    6804  break;
    6805  }
    6806  }
    6807  }
    6808 
    6809  if(res == VK_SUCCESS)
    6810  {
    6811  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    6812  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= size);
    6813 
    6814  if(mapped)
    6815  {
    6816  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    6817  if(res != VK_SUCCESS)
    6818  {
    6819  return res;
    6820  }
    6821  }
    6822 
    6823  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    6824  VmaAllocationRequest allocRequest;
    6825  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    6826  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6827  pBlock->m_Metadata.Alloc(allocRequest, suballocType, size, *pAllocation);
    6828  (*pAllocation)->InitBlockAllocation(
    6829  hCurrentPool,
    6830  pBlock,
    6831  allocRequest.offset,
    6832  alignment,
    6833  size,
    6834  suballocType,
    6835  mapped,
    6836  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6837  VMA_HEAVY_ASSERT(pBlock->Validate());
    6838  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    6839  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6840  return VK_SUCCESS;
    6841  }
    6842  }
    6843 
    6844  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    6845 
    6846  // 3. Try to allocate from existing blocks with making other allocations lost.
    6847  if(canMakeOtherLost)
    6848  {
    6849  uint32_t tryIndex = 0;
    6850  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    6851  {
    6852  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    6853  VmaAllocationRequest bestRequest = {};
    6854  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    6855 
    6856  // 1. Search existing allocations.
    6857  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6858  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6859  {
    6860  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6861  VMA_ASSERT(pCurrBlock);
    6862  VmaAllocationRequest currRequest = {};
    6863  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6864  currentFrameIndex,
    6865  m_FrameInUseCount,
    6866  m_BufferImageGranularity,
    6867  size,
    6868  alignment,
    6869  suballocType,
    6870  canMakeOtherLost,
    6871  &currRequest))
    6872  {
    6873  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    6874  if(pBestRequestBlock == VMA_NULL ||
    6875  currRequestCost < bestRequestCost)
    6876  {
    6877  pBestRequestBlock = pCurrBlock;
    6878  bestRequest = currRequest;
    6879  bestRequestCost = currRequestCost;
    6880 
    6881  if(bestRequestCost == 0)
    6882  {
    6883  break;
    6884  }
    6885  }
    6886  }
    6887  }
    6888 
    6889  if(pBestRequestBlock != VMA_NULL)
    6890  {
    6891  if(mapped)
    6892  {
    6893  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    6894  if(res != VK_SUCCESS)
    6895  {
    6896  return res;
    6897  }
    6898  }
    6899 
    6900  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    6901  currentFrameIndex,
    6902  m_FrameInUseCount,
    6903  &bestRequest))
    6904  {
    6905  // We no longer have an empty Allocation.
    6906  if(pBestRequestBlock->m_Metadata.IsEmpty())
    6907  {
    6908  m_HasEmptyBlock = false;
    6909  }
    6910  // Allocate from this pBlock.
    6911  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6912  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, size, *pAllocation);
    6913  (*pAllocation)->InitBlockAllocation(
    6914  hCurrentPool,
    6915  pBestRequestBlock,
    6916  bestRequest.offset,
    6917  alignment,
    6918  size,
    6919  suballocType,
    6920  mapped,
    6921  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6922  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    6923  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6924  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6925  return VK_SUCCESS;
    6926  }
    6927  // else: Some allocations must have been touched while we are here. Next try.
    6928  }
    6929  else
    6930  {
    6931  // Could not find place in any of the blocks - break outer loop.
    6932  break;
    6933  }
    6934  }
    6935  /* Maximum number of tries exceeded - a very unlike event when many other
    6936  threads are simultaneously touching allocations making it impossible to make
    6937  lost at the same time as we try to allocate. */
    6938  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    6939  {
    6940  return VK_ERROR_TOO_MANY_OBJECTS;
    6941  }
    6942  }
    6943 
    6944  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6945 }
    6946 
    6947 void VmaBlockVector::Free(
    6948  VmaAllocation hAllocation)
    6949 {
    6950  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    6951 
    6952  // Scope for lock.
    6953  {
    6954  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6955 
    6956  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    6957 
    6958  if(hAllocation->IsPersistentMap())
    6959  {
    6960  pBlock->Unmap(m_hAllocator, 1);
    6961  }
    6962 
    6963  pBlock->m_Metadata.Free(hAllocation);
    6964  VMA_HEAVY_ASSERT(pBlock->Validate());
    6965 
    6966  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    6967 
    6968  // pBlock became empty after this deallocation.
    6969  if(pBlock->m_Metadata.IsEmpty())
    6970  {
    6971  // Already has empty Allocation. We don't want to have two, so delete this one.
    6972  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    6973  {
    6974  pBlockToDelete = pBlock;
    6975  Remove(pBlock);
    6976  }
    6977  // We now have first empty block.
    6978  else
    6979  {
    6980  m_HasEmptyBlock = true;
    6981  }
    6982  }
    6983  // pBlock didn't become empty, but we have another empty block - find and free that one.
    6984  // (This is optional, heuristics.)
    6985  else if(m_HasEmptyBlock)
    6986  {
    6987  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    6988  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    6989  {
    6990  pBlockToDelete = pLastBlock;
    6991  m_Blocks.pop_back();
    6992  m_HasEmptyBlock = false;
    6993  }
    6994  }
    6995 
    6996  IncrementallySortBlocks();
    6997  }
    6998 
    6999  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    7000  // lock, for performance reason.
    7001  if(pBlockToDelete != VMA_NULL)
    7002  {
    7003  VMA_DEBUG_LOG(" Deleted empty allocation");
    7004  pBlockToDelete->Destroy(m_hAllocator);
    7005  vma_delete(m_hAllocator, pBlockToDelete);
    7006  }
    7007 }
    7008 
    7009 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    7010 {
    7011  VkDeviceSize result = 0;
    7012  for(size_t i = m_Blocks.size(); i--; )
    7013  {
    7014  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
    7015  if(result >= m_PreferredBlockSize)
    7016  {
    7017  break;
    7018  }
    7019  }
    7020  return result;
    7021 }
    7022 
    7023 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    7024 {
    7025  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    7026  {
    7027  if(m_Blocks[blockIndex] == pBlock)
    7028  {
    7029  VmaVectorRemove(m_Blocks, blockIndex);
    7030  return;
    7031  }
    7032  }
    7033  VMA_ASSERT(0);
    7034 }
    7035 
    7036 void VmaBlockVector::IncrementallySortBlocks()
    7037 {
    7038  // Bubble sort only until first swap.
    7039  for(size_t i = 1; i < m_Blocks.size(); ++i)
    7040  {
    7041  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    7042  {
    7043  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    7044  return;
    7045  }
    7046  }
    7047 }
    7048 
    7049 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    7050 {
    7051  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    7052  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    7053  allocInfo.allocationSize = blockSize;
    7054  VkDeviceMemory mem = VK_NULL_HANDLE;
    7055  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    7056  if(res < 0)
    7057  {
    7058  return res;
    7059  }
    7060 
    7061  // New VkDeviceMemory successfully created.
    7062 
    7063  // Create new Allocation for it.
    7064  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    7065  pBlock->Init(
    7066  m_MemoryTypeIndex,
    7067  mem,
    7068  allocInfo.allocationSize,
    7069  m_NextBlockId++);
    7070 
    7071  m_Blocks.push_back(pBlock);
    7072  if(pNewBlockIndex != VMA_NULL)
    7073  {
    7074  *pNewBlockIndex = m_Blocks.size() - 1;
    7075  }
    7076 
    7077  return VK_SUCCESS;
    7078 }
    7079 
    7080 #if VMA_STATS_STRING_ENABLED
    7081 
    7082 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    7083 {
    7084  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7085 
    7086  json.BeginObject();
    7087 
    7088  if(m_IsCustomPool)
    7089  {
    7090  json.WriteString("MemoryTypeIndex");
    7091  json.WriteNumber(m_MemoryTypeIndex);
    7092 
    7093  json.WriteString("BlockSize");
    7094  json.WriteNumber(m_PreferredBlockSize);
    7095 
    7096  json.WriteString("BlockCount");
    7097  json.BeginObject(true);
    7098  if(m_MinBlockCount > 0)
    7099  {
    7100  json.WriteString("Min");
    7101  json.WriteNumber((uint64_t)m_MinBlockCount);
    7102  }
    7103  if(m_MaxBlockCount < SIZE_MAX)
    7104  {
    7105  json.WriteString("Max");
    7106  json.WriteNumber((uint64_t)m_MaxBlockCount);
    7107  }
    7108  json.WriteString("Cur");
    7109  json.WriteNumber((uint64_t)m_Blocks.size());
    7110  json.EndObject();
    7111 
    7112  if(m_FrameInUseCount > 0)
    7113  {
    7114  json.WriteString("FrameInUseCount");
    7115  json.WriteNumber(m_FrameInUseCount);
    7116  }
    7117  }
    7118  else
    7119  {
    7120  json.WriteString("PreferredBlockSize");
    7121  json.WriteNumber(m_PreferredBlockSize);
    7122  }
    7123 
    7124  json.WriteString("Blocks");
    7125  json.BeginObject();
    7126  for(size_t i = 0; i < m_Blocks.size(); ++i)
    7127  {
    7128  json.BeginString();
    7129  json.ContinueString(m_Blocks[i]->GetId());
    7130  json.EndString();
    7131 
    7132  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    7133  }
    7134  json.EndObject();
    7135 
    7136  json.EndObject();
    7137 }
    7138 
    7139 #endif // #if VMA_STATS_STRING_ENABLED
    7140 
    7141 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    7142  VmaAllocator hAllocator,
    7143  uint32_t currentFrameIndex)
    7144 {
    7145  if(m_pDefragmentator == VMA_NULL)
    7146  {
    7147  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    7148  hAllocator,
    7149  this,
    7150  currentFrameIndex);
    7151  }
    7152 
    7153  return m_pDefragmentator;
    7154 }
    7155 
    7156 VkResult VmaBlockVector::Defragment(
    7157  VmaDefragmentationStats* pDefragmentationStats,
    7158  VkDeviceSize& maxBytesToMove,
    7159  uint32_t& maxAllocationsToMove)
    7160 {
    7161  if(m_pDefragmentator == VMA_NULL)
    7162  {
    7163  return VK_SUCCESS;
    7164  }
    7165 
    7166  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7167 
    7168  // Defragment.
    7169  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    7170 
    7171  // Accumulate statistics.
    7172  if(pDefragmentationStats != VMA_NULL)
    7173  {
    7174  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    7175  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    7176  pDefragmentationStats->bytesMoved += bytesMoved;
    7177  pDefragmentationStats->allocationsMoved += allocationsMoved;
    7178  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    7179  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    7180  maxBytesToMove -= bytesMoved;
    7181  maxAllocationsToMove -= allocationsMoved;
    7182  }
    7183 
    7184  // Free empty blocks.
    7185  m_HasEmptyBlock = false;
    7186  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    7187  {
    7188  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    7189  if(pBlock->m_Metadata.IsEmpty())
    7190  {
    7191  if(m_Blocks.size() > m_MinBlockCount)
    7192  {
    7193  if(pDefragmentationStats != VMA_NULL)
    7194  {
    7195  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    7196  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    7197  }
    7198 
    7199  VmaVectorRemove(m_Blocks, blockIndex);
    7200  pBlock->Destroy(m_hAllocator);
    7201  vma_delete(m_hAllocator, pBlock);
    7202  }
    7203  else
    7204  {
    7205  m_HasEmptyBlock = true;
    7206  }
    7207  }
    7208  }
    7209 
    7210  return result;
    7211 }
    7212 
    7213 void VmaBlockVector::DestroyDefragmentator()
    7214 {
    7215  if(m_pDefragmentator != VMA_NULL)
    7216  {
    7217  vma_delete(m_hAllocator, m_pDefragmentator);
    7218  m_pDefragmentator = VMA_NULL;
    7219  }
    7220 }
    7221 
    7222 void VmaBlockVector::MakePoolAllocationsLost(
    7223  uint32_t currentFrameIndex,
    7224  size_t* pLostAllocationCount)
    7225 {
    7226  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7227  size_t lostAllocationCount = 0;
    7228  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    7229  {
    7230  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    7231  VMA_ASSERT(pBlock);
    7232  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    7233  }
    7234  if(pLostAllocationCount != VMA_NULL)
    7235  {
    7236  *pLostAllocationCount = lostAllocationCount;
    7237  }
    7238 }
    7239 
    7240 void VmaBlockVector::AddStats(VmaStats* pStats)
    7241 {
    7242  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    7243  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    7244 
    7245  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    7246 
    7247  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    7248  {
    7249  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    7250  VMA_ASSERT(pBlock);
    7251  VMA_HEAVY_ASSERT(pBlock->Validate());
    7252  VmaStatInfo allocationStatInfo;
    7253  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    7254  VmaAddStatInfo(pStats->total, allocationStatInfo);
    7255  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    7256  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    7257  }
    7258 }
    7259 
    7261 // VmaDefragmentator members definition
    7262 
    7263 VmaDefragmentator::VmaDefragmentator(
    7264  VmaAllocator hAllocator,
    7265  VmaBlockVector* pBlockVector,
    7266  uint32_t currentFrameIndex) :
    7267  m_hAllocator(hAllocator),
    7268  m_pBlockVector(pBlockVector),
    7269  m_CurrentFrameIndex(currentFrameIndex),
    7270  m_BytesMoved(0),
    7271  m_AllocationsMoved(0),
    7272  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    7273  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    7274 {
    7275 }
    7276 
    7277 VmaDefragmentator::~VmaDefragmentator()
    7278 {
    7279  for(size_t i = m_Blocks.size(); i--; )
    7280  {
    7281  vma_delete(m_hAllocator, m_Blocks[i]);
    7282  }
    7283 }
    7284 
    7285 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    7286 {
    7287  AllocationInfo allocInfo;
    7288  allocInfo.m_hAllocation = hAlloc;
    7289  allocInfo.m_pChanged = pChanged;
    7290  m_Allocations.push_back(allocInfo);
    7291 }
    7292 
    7293 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    7294 {
    7295  // It has already been mapped for defragmentation.
    7296  if(m_pMappedDataForDefragmentation)
    7297  {
    7298  *ppMappedData = m_pMappedDataForDefragmentation;
    7299  return VK_SUCCESS;
    7300  }
    7301 
    7302  // It is originally mapped.
    7303  if(m_pBlock->GetMappedData())
    7304  {
    7305  *ppMappedData = m_pBlock->GetMappedData();
    7306  return VK_SUCCESS;
    7307  }
    7308 
    7309  // Map on first usage.
    7310  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    7311  *ppMappedData = m_pMappedDataForDefragmentation;
    7312  return res;
    7313 }
    7314 
    7315 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    7316 {
    7317  if(m_pMappedDataForDefragmentation != VMA_NULL)
    7318  {
    7319  m_pBlock->Unmap(hAllocator, 1);
    7320  }
    7321 }
    7322 
    7323 VkResult VmaDefragmentator::DefragmentRound(
    7324  VkDeviceSize maxBytesToMove,
    7325  uint32_t maxAllocationsToMove)
    7326 {
    7327  if(m_Blocks.empty())
    7328  {
    7329  return VK_SUCCESS;
    7330  }
    7331 
    7332  size_t srcBlockIndex = m_Blocks.size() - 1;
    7333  size_t srcAllocIndex = SIZE_MAX;
    7334  for(;;)
    7335  {
    7336  // 1. Find next allocation to move.
    7337  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    7338  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    7339  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    7340  {
    7341  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    7342  {
    7343  // Finished: no more allocations to process.
    7344  if(srcBlockIndex == 0)
    7345  {
    7346  return VK_SUCCESS;
    7347  }
    7348  else
    7349  {
    7350  --srcBlockIndex;
    7351  srcAllocIndex = SIZE_MAX;
    7352  }
    7353  }
    7354  else
    7355  {
    7356  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    7357  }
    7358  }
    7359 
    7360  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    7361  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    7362 
    7363  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    7364  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    7365  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    7366  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    7367 
    7368  // 2. Try to find new place for this allocation in preceding or current block.
    7369  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    7370  {
    7371  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    7372  VmaAllocationRequest dstAllocRequest;
    7373  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    7374  m_CurrentFrameIndex,
    7375  m_pBlockVector->GetFrameInUseCount(),
    7376  m_pBlockVector->GetBufferImageGranularity(),
    7377  size,
    7378  alignment,
    7379  suballocType,
    7380  false, // canMakeOtherLost
    7381  &dstAllocRequest) &&
    7382  MoveMakesSense(
    7383  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    7384  {
    7385  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    7386 
    7387  // Reached limit on number of allocations or bytes to move.
    7388  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    7389  (m_BytesMoved + size > maxBytesToMove))
    7390  {
    7391  return VK_INCOMPLETE;
    7392  }
    7393 
    7394  void* pDstMappedData = VMA_NULL;
    7395  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    7396  if(res != VK_SUCCESS)
    7397  {
    7398  return res;
    7399  }
    7400 
    7401  void* pSrcMappedData = VMA_NULL;
    7402  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    7403  if(res != VK_SUCCESS)
    7404  {
    7405  return res;
    7406  }
    7407 
    7408  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    7409  memcpy(
    7410  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    7411  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    7412  static_cast<size_t>(size));
    7413 
    7414  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    7415  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
    7416 
    7417  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    7418 
    7419  if(allocInfo.m_pChanged != VMA_NULL)
    7420  {
    7421  *allocInfo.m_pChanged = VK_TRUE;
    7422  }
    7423 
    7424  ++m_AllocationsMoved;
    7425  m_BytesMoved += size;
    7426 
    7427  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    7428 
    7429  break;
    7430  }
    7431  }
    7432 
    7433  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    7434 
    7435  if(srcAllocIndex > 0)
    7436  {
    7437  --srcAllocIndex;
    7438  }
    7439  else
    7440  {
    7441  if(srcBlockIndex > 0)
    7442  {
    7443  --srcBlockIndex;
    7444  srcAllocIndex = SIZE_MAX;
    7445  }
    7446  else
    7447  {
    7448  return VK_SUCCESS;
    7449  }
    7450  }
    7451  }
    7452 }
    7453 
    7454 VkResult VmaDefragmentator::Defragment(
    7455  VkDeviceSize maxBytesToMove,
    7456  uint32_t maxAllocationsToMove)
    7457 {
    7458  if(m_Allocations.empty())
    7459  {
    7460  return VK_SUCCESS;
    7461  }
    7462 
    7463  // Create block info for each block.
    7464  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    7465  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7466  {
    7467  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    7468  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    7469  m_Blocks.push_back(pBlockInfo);
    7470  }
    7471 
    7472  // Sort them by m_pBlock pointer value.
    7473  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    7474 
    7475  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    7476  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    7477  {
    7478  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    7479  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    7480  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    7481  {
    7482  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    7483  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    7484  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    7485  {
    7486  (*it)->m_Allocations.push_back(allocInfo);
    7487  }
    7488  else
    7489  {
    7490  VMA_ASSERT(0);
    7491  }
    7492  }
    7493  }
    7494  m_Allocations.clear();
    7495 
    7496  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7497  {
    7498  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    7499  pBlockInfo->CalcHasNonMovableAllocations();
    7500  pBlockInfo->SortAllocationsBySizeDescecnding();
    7501  }
    7502 
    7503  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    7504  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    7505 
    7506  // Execute defragmentation rounds (the main part).
    7507  VkResult result = VK_SUCCESS;
    7508  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    7509  {
    7510  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    7511  }
    7512 
    7513  // Unmap blocks that were mapped for defragmentation.
    7514  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    7515  {
    7516  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    7517  }
    7518 
    7519  return result;
    7520 }
    7521 
    7522 bool VmaDefragmentator::MoveMakesSense(
    7523  size_t dstBlockIndex, VkDeviceSize dstOffset,
    7524  size_t srcBlockIndex, VkDeviceSize srcOffset)
    7525 {
    7526  if(dstBlockIndex < srcBlockIndex)
    7527  {
    7528  return true;
    7529  }
    7530  if(dstBlockIndex > srcBlockIndex)
    7531  {
    7532  return false;
    7533  }
    7534  if(dstOffset < srcOffset)
    7535  {
    7536  return true;
    7537  }
    7538  return false;
    7539 }
    7540 
    7542 // VmaAllocator_T
    7543 
    7544 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    7545  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    7546  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    7547  m_hDevice(pCreateInfo->device),
    7548  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    7549  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    7550  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    7551  m_PreferredLargeHeapBlockSize(0),
    7552  m_PhysicalDevice(pCreateInfo->physicalDevice),
    7553  m_CurrentFrameIndex(0),
    7554  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    7555  m_NextPoolId(0)
    7556 {
    7557  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    7558 
    7559 #if !(VMA_DEDICATED_ALLOCATION)
    7561  {
    7562  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    7563  }
    7564 #endif
    7565 
    7566  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    7567  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    7568  memset(&m_MemProps, 0, sizeof(m_MemProps));
    7569 
    7570  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    7571  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    7572 
    7573  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7574  {
    7575  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    7576  }
    7577 
    7578  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    7579  {
    7580  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    7581  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    7582  }
    7583 
    7584  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    7585 
    7586  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    7587  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    7588 
    7589  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    7590  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    7591 
    7592  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    7593  {
    7594  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    7595  {
    7596  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    7597  if(limit != VK_WHOLE_SIZE)
    7598  {
    7599  m_HeapSizeLimit[heapIndex] = limit;
    7600  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    7601  {
    7602  m_MemProps.memoryHeaps[heapIndex].size = limit;
    7603  }
    7604  }
    7605  }
    7606  }
    7607 
    7608  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7609  {
    7610  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    7611 
    7612  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    7613  this,
    7614  memTypeIndex,
    7615  preferredBlockSize,
    7616  0,
    7617  SIZE_MAX,
    7618  GetBufferImageGranularity(),
    7619  pCreateInfo->frameInUseCount,
    7620  false); // isCustomPool
    7621  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    7622  // becase minBlockCount is 0.
    7623  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    7624 
    7625  }
    7626 }
    7627 
    7628 VmaAllocator_T::~VmaAllocator_T()
    7629 {
    7630  VMA_ASSERT(m_Pools.empty());
    7631 
    7632  for(size_t i = GetMemoryTypeCount(); i--; )
    7633  {
    7634  vma_delete(this, m_pDedicatedAllocations[i]);
    7635  vma_delete(this, m_pBlockVectors[i]);
    7636  }
    7637 }
    7638 
    7639 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    7640 {
    7641 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7642  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    7643  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    7644  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    7645  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    7646  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    7647  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    7648  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    7649  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    7650  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    7651  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    7652  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    7653  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    7654  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    7655  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    7656  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    7657  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    7658 #if VMA_DEDICATED_ALLOCATION
    7659  if(m_UseKhrDedicatedAllocation)
    7660  {
    7661  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    7662  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    7663  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    7664  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    7665  }
    7666 #endif // #if VMA_DEDICATED_ALLOCATION
    7667 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7668 
    7669 #define VMA_COPY_IF_NOT_NULL(funcName) \
    7670  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    7671 
    7672  if(pVulkanFunctions != VMA_NULL)
    7673  {
    7674  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    7675  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    7676  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    7677  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    7678  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    7679  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    7680  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    7681  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    7682  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    7683  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    7684  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    7685  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    7686  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    7687  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    7688  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    7689  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    7690 #if VMA_DEDICATED_ALLOCATION
    7691  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    7692  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    7693 #endif
    7694  }
    7695 
    7696 #undef VMA_COPY_IF_NOT_NULL
    7697 
    7698  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    7699  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    7700  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    7701  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    7702  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    7703  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    7704  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    7705  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    7706  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    7707  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    7708  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    7709  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    7710  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    7711  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    7712  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    7713  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    7714  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    7715  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    7716 #if VMA_DEDICATED_ALLOCATION
    7717  if(m_UseKhrDedicatedAllocation)
    7718  {
    7719  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    7720  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    7721  }
    7722 #endif
    7723 }
    7724 
    7725 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    7726 {
    7727  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7728  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    7729  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    7730  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    7731 }
    7732 
    7733 VkResult VmaAllocator_T::AllocateMemoryOfType(
    7734  VkDeviceSize size,
    7735  VkDeviceSize alignment,
    7736  bool dedicatedAllocation,
    7737  VkBuffer dedicatedBuffer,
    7738  VkImage dedicatedImage,
    7739  const VmaAllocationCreateInfo& createInfo,
    7740  uint32_t memTypeIndex,
    7741  VmaSuballocationType suballocType,
    7742  VmaAllocation* pAllocation)
    7743 {
    7744  VMA_ASSERT(pAllocation != VMA_NULL);
    7745  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    7746 
    7747  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    7748 
    7749  // If memory type is not HOST_VISIBLE, disable MAPPED.
    7750  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7751  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    7752  {
    7753  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    7754  }
    7755 
    7756  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    7757  VMA_ASSERT(blockVector);
    7758 
    7759  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    7760  bool preferDedicatedMemory =
    7761  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    7762  dedicatedAllocation ||
    7763  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    7764  size > preferredBlockSize / 2;
    7765 
    7766  if(preferDedicatedMemory &&
    7767  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    7768  finalCreateInfo.pool == VK_NULL_HANDLE)
    7769  {
    7771  }
    7772 
    7773  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    7774  {
    7775  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7776  {
    7777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7778  }
    7779  else
    7780  {
    7781  return AllocateDedicatedMemory(
    7782  size,
    7783  suballocType,
    7784  memTypeIndex,
    7785  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7786  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7787  finalCreateInfo.pUserData,
    7788  dedicatedBuffer,
    7789  dedicatedImage,
    7790  pAllocation);
    7791  }
    7792  }
    7793  else
    7794  {
    7795  VkResult res = blockVector->Allocate(
    7796  VK_NULL_HANDLE, // hCurrentPool
    7797  m_CurrentFrameIndex.load(),
    7798  size,
    7799  alignment,
    7800  finalCreateInfo,
    7801  suballocType,
    7802  pAllocation);
    7803  if(res == VK_SUCCESS)
    7804  {
    7805  return res;
    7806  }
    7807 
    7808  // 5. Try dedicated memory.
    7809  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7810  {
    7811  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7812  }
    7813  else
    7814  {
    7815  res = AllocateDedicatedMemory(
    7816  size,
    7817  suballocType,
    7818  memTypeIndex,
    7819  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7820  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7821  finalCreateInfo.pUserData,
    7822  dedicatedBuffer,
    7823  dedicatedImage,
    7824  pAllocation);
    7825  if(res == VK_SUCCESS)
    7826  {
    7827  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    7828  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    7829  return VK_SUCCESS;
    7830  }
    7831  else
    7832  {
    7833  // Everything failed: Return error code.
    7834  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7835  return res;
    7836  }
    7837  }
    7838  }
    7839 }
    7840 
    7841 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    7842  VkDeviceSize size,
    7843  VmaSuballocationType suballocType,
    7844  uint32_t memTypeIndex,
    7845  bool map,
    7846  bool isUserDataString,
    7847  void* pUserData,
    7848  VkBuffer dedicatedBuffer,
    7849  VkImage dedicatedImage,
    7850  VmaAllocation* pAllocation)
    7851 {
    7852  VMA_ASSERT(pAllocation);
    7853 
    7854  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    7855  allocInfo.memoryTypeIndex = memTypeIndex;
    7856  allocInfo.allocationSize = size;
    7857 
    7858 #if VMA_DEDICATED_ALLOCATION
    7859  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    7860  if(m_UseKhrDedicatedAllocation)
    7861  {
    7862  if(dedicatedBuffer != VK_NULL_HANDLE)
    7863  {
    7864  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    7865  dedicatedAllocInfo.buffer = dedicatedBuffer;
    7866  allocInfo.pNext = &dedicatedAllocInfo;
    7867  }
    7868  else if(dedicatedImage != VK_NULL_HANDLE)
    7869  {
    7870  dedicatedAllocInfo.image = dedicatedImage;
    7871  allocInfo.pNext = &dedicatedAllocInfo;
    7872  }
    7873  }
    7874 #endif // #if VMA_DEDICATED_ALLOCATION
    7875 
    7876  // Allocate VkDeviceMemory.
    7877  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    7878  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    7879  if(res < 0)
    7880  {
    7881  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7882  return res;
    7883  }
    7884 
    7885  void* pMappedData = VMA_NULL;
    7886  if(map)
    7887  {
    7888  res = (*m_VulkanFunctions.vkMapMemory)(
    7889  m_hDevice,
    7890  hMemory,
    7891  0,
    7892  VK_WHOLE_SIZE,
    7893  0,
    7894  &pMappedData);
    7895  if(res < 0)
    7896  {
    7897  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    7898  FreeVulkanMemory(memTypeIndex, size, hMemory);
    7899  return res;
    7900  }
    7901  }
    7902 
    7903  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    7904  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    7905  (*pAllocation)->SetUserData(this, pUserData);
    7906 
    7907  // Register it in m_pDedicatedAllocations.
    7908  {
    7909  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7910  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    7911  VMA_ASSERT(pDedicatedAllocations);
    7912  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    7913  }
    7914 
    7915  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    7916 
    7917  return VK_SUCCESS;
    7918 }
    7919 
    7920 void VmaAllocator_T::GetBufferMemoryRequirements(
    7921  VkBuffer hBuffer,
    7922  VkMemoryRequirements& memReq,
    7923  bool& requiresDedicatedAllocation,
    7924  bool& prefersDedicatedAllocation) const
    7925 {
    7926 #if VMA_DEDICATED_ALLOCATION
    7927  if(m_UseKhrDedicatedAllocation)
    7928  {
    7929  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7930  memReqInfo.buffer = hBuffer;
    7931 
    7932  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7933 
    7934  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7935  memReq2.pNext = &memDedicatedReq;
    7936 
    7937  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7938 
    7939  memReq = memReq2.memoryRequirements;
    7940  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7941  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7942  }
    7943  else
    7944 #endif // #if VMA_DEDICATED_ALLOCATION
    7945  {
    7946  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    7947  requiresDedicatedAllocation = false;
    7948  prefersDedicatedAllocation = false;
    7949  }
    7950 }
    7951 
    7952 void VmaAllocator_T::GetImageMemoryRequirements(
    7953  VkImage hImage,
    7954  VkMemoryRequirements& memReq,
    7955  bool& requiresDedicatedAllocation,
    7956  bool& prefersDedicatedAllocation) const
    7957 {
    7958 #if VMA_DEDICATED_ALLOCATION
    7959  if(m_UseKhrDedicatedAllocation)
    7960  {
    7961  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7962  memReqInfo.image = hImage;
    7963 
    7964  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7965 
    7966  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7967  memReq2.pNext = &memDedicatedReq;
    7968 
    7969  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7970 
    7971  memReq = memReq2.memoryRequirements;
    7972  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7973  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7974  }
    7975  else
    7976 #endif // #if VMA_DEDICATED_ALLOCATION
    7977  {
    7978  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    7979  requiresDedicatedAllocation = false;
    7980  prefersDedicatedAllocation = false;
    7981  }
    7982 }
    7983 
    7984 VkResult VmaAllocator_T::AllocateMemory(
    7985  const VkMemoryRequirements& vkMemReq,
    7986  bool requiresDedicatedAllocation,
    7987  bool prefersDedicatedAllocation,
    7988  VkBuffer dedicatedBuffer,
    7989  VkImage dedicatedImage,
    7990  const VmaAllocationCreateInfo& createInfo,
    7991  VmaSuballocationType suballocType,
    7992  VmaAllocation* pAllocation)
    7993 {
    7994  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    7995  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7996  {
    7997  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    7998  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7999  }
    8000  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    8002  {
    8003  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    8004  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8005  }
    8006  if(requiresDedicatedAllocation)
    8007  {
    8008  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    8009  {
    8010  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    8011  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8012  }
    8013  if(createInfo.pool != VK_NULL_HANDLE)
    8014  {
    8015  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    8016  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8017  }
    8018  }
    8019  if((createInfo.pool != VK_NULL_HANDLE) &&
    8020  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    8021  {
    8022  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    8023  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8024  }
    8025 
    8026  if(createInfo.pool != VK_NULL_HANDLE)
    8027  {
    8028  const VkDeviceSize alignmentForPool = VMA_MAX(
    8029  vkMemReq.alignment,
    8030  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    8031  return createInfo.pool->m_BlockVector.Allocate(
    8032  createInfo.pool,
    8033  m_CurrentFrameIndex.load(),
    8034  vkMemReq.size,
    8035  alignmentForPool,
    8036  createInfo,
    8037  suballocType,
    8038  pAllocation);
    8039  }
    8040  else
    8041  {
    8042  // Bit mask of memory Vulkan types acceptable for this allocation.
    8043  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    8044  uint32_t memTypeIndex = UINT32_MAX;
    8045  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    8046  if(res == VK_SUCCESS)
    8047  {
    8048  VkDeviceSize alignmentForMemType = VMA_MAX(
    8049  vkMemReq.alignment,
    8050  GetMemoryTypeMinAlignment(memTypeIndex));
    8051 
    8052  res = AllocateMemoryOfType(
    8053  vkMemReq.size,
    8054  alignmentForMemType,
    8055  requiresDedicatedAllocation || prefersDedicatedAllocation,
    8056  dedicatedBuffer,
    8057  dedicatedImage,
    8058  createInfo,
    8059  memTypeIndex,
    8060  suballocType,
    8061  pAllocation);
    8062  // Succeeded on first try.
    8063  if(res == VK_SUCCESS)
    8064  {
    8065  return res;
    8066  }
    8067  // Allocation from this memory type failed. Try other compatible memory types.
    8068  else
    8069  {
    8070  for(;;)
    8071  {
    8072  // Remove old memTypeIndex from list of possibilities.
    8073  memoryTypeBits &= ~(1u << memTypeIndex);
    8074  // Find alternative memTypeIndex.
    8075  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    8076  if(res == VK_SUCCESS)
    8077  {
    8078  alignmentForMemType = VMA_MAX(
    8079  vkMemReq.alignment,
    8080  GetMemoryTypeMinAlignment(memTypeIndex));
    8081 
    8082  res = AllocateMemoryOfType(
    8083  vkMemReq.size,
    8084  alignmentForMemType,
    8085  requiresDedicatedAllocation || prefersDedicatedAllocation,
    8086  dedicatedBuffer,
    8087  dedicatedImage,
    8088  createInfo,
    8089  memTypeIndex,
    8090  suballocType,
    8091  pAllocation);
    8092  // Allocation from this alternative memory type succeeded.
    8093  if(res == VK_SUCCESS)
    8094  {
    8095  return res;
    8096  }
    8097  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    8098  }
    8099  // No other matching memory type index could be found.
    8100  else
    8101  {
    8102  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    8103  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8104  }
    8105  }
    8106  }
    8107  }
    8108  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    8109  else
    8110  return res;
    8111  }
    8112 }
    8113 
    8114 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    8115 {
    8116  VMA_ASSERT(allocation);
    8117 
    8118  if(allocation->CanBecomeLost() == false ||
    8119  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    8120  {
    8121  switch(allocation->GetType())
    8122  {
    8123  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8124  {
    8125  VmaBlockVector* pBlockVector = VMA_NULL;
    8126  VmaPool hPool = allocation->GetPool();
    8127  if(hPool != VK_NULL_HANDLE)
    8128  {
    8129  pBlockVector = &hPool->m_BlockVector;
    8130  }
    8131  else
    8132  {
    8133  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8134  pBlockVector = m_pBlockVectors[memTypeIndex];
    8135  }
    8136  pBlockVector->Free(allocation);
    8137  }
    8138  break;
    8139  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8140  FreeDedicatedMemory(allocation);
    8141  break;
    8142  default:
    8143  VMA_ASSERT(0);
    8144  }
    8145  }
    8146 
    8147  allocation->SetUserData(this, VMA_NULL);
    8148  vma_delete(this, allocation);
    8149 }
    8150 
    8151 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    8152 {
    8153  // Initialize.
    8154  InitStatInfo(pStats->total);
    8155  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    8156  InitStatInfo(pStats->memoryType[i]);
    8157  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    8158  InitStatInfo(pStats->memoryHeap[i]);
    8159 
    8160  // Process default pools.
    8161  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8162  {
    8163  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    8164  VMA_ASSERT(pBlockVector);
    8165  pBlockVector->AddStats(pStats);
    8166  }
    8167 
    8168  // Process custom pools.
    8169  {
    8170  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8171  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    8172  {
    8173  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    8174  }
    8175  }
    8176 
    8177  // Process dedicated allocations.
    8178  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8179  {
    8180  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    8181  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8182  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8183  VMA_ASSERT(pDedicatedAllocVector);
    8184  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    8185  {
    8186  VmaStatInfo allocationStatInfo;
    8187  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    8188  VmaAddStatInfo(pStats->total, allocationStatInfo);
    8189  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    8190  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    8191  }
    8192  }
    8193 
    8194  // Postprocess.
    8195  VmaPostprocessCalcStatInfo(pStats->total);
    8196  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    8197  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    8198  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    8199  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    8200 }
    8201 
    8202 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    8203 
    8204 VkResult VmaAllocator_T::Defragment(
    8205  VmaAllocation* pAllocations,
    8206  size_t allocationCount,
    8207  VkBool32* pAllocationsChanged,
    8208  const VmaDefragmentationInfo* pDefragmentationInfo,
    8209  VmaDefragmentationStats* pDefragmentationStats)
    8210 {
    8211  if(pAllocationsChanged != VMA_NULL)
    8212  {
    8213  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    8214  }
    8215  if(pDefragmentationStats != VMA_NULL)
    8216  {
    8217  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    8218  }
    8219 
    8220  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    8221 
    8222  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    8223 
    8224  const size_t poolCount = m_Pools.size();
    8225 
    8226  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    8227  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    8228  {
    8229  VmaAllocation hAlloc = pAllocations[allocIndex];
    8230  VMA_ASSERT(hAlloc);
    8231  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    8232  // DedicatedAlloc cannot be defragmented.
    8233  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    8234  // Only HOST_VISIBLE memory types can be defragmented.
    8235  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    8236  // Lost allocation cannot be defragmented.
    8237  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    8238  {
    8239  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    8240 
    8241  const VmaPool hAllocPool = hAlloc->GetPool();
    8242  // This allocation belongs to custom pool.
    8243  if(hAllocPool != VK_NULL_HANDLE)
    8244  {
    8245  pAllocBlockVector = &hAllocPool->GetBlockVector();
    8246  }
    8247  // This allocation belongs to general pool.
    8248  else
    8249  {
    8250  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    8251  }
    8252 
    8253  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    8254 
    8255  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    8256  &pAllocationsChanged[allocIndex] : VMA_NULL;
    8257  pDefragmentator->AddAllocation(hAlloc, pChanged);
    8258  }
    8259  }
    8260 
    8261  VkResult result = VK_SUCCESS;
    8262 
    8263  // ======== Main processing.
    8264 
    8265  VkDeviceSize maxBytesToMove = SIZE_MAX;
    8266  uint32_t maxAllocationsToMove = UINT32_MAX;
    8267  if(pDefragmentationInfo != VMA_NULL)
    8268  {
    8269  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    8270  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    8271  }
    8272 
    8273  // Process standard memory.
    8274  for(uint32_t memTypeIndex = 0;
    8275  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    8276  ++memTypeIndex)
    8277  {
    8278  // Only HOST_VISIBLE memory types can be defragmented.
    8279  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8280  {
    8281  result = m_pBlockVectors[memTypeIndex]->Defragment(
    8282  pDefragmentationStats,
    8283  maxBytesToMove,
    8284  maxAllocationsToMove);
    8285  }
    8286  }
    8287 
    8288  // Process custom pools.
    8289  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    8290  {
    8291  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    8292  pDefragmentationStats,
    8293  maxBytesToMove,
    8294  maxAllocationsToMove);
    8295  }
    8296 
    8297  // ======== Destroy defragmentators.
    8298 
    8299  // Process custom pools.
    8300  for(size_t poolIndex = poolCount; poolIndex--; )
    8301  {
    8302  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    8303  }
    8304 
    8305  // Process standard memory.
    8306  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    8307  {
    8308  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8309  {
    8310  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    8311  }
    8312  }
    8313 
    8314  return result;
    8315 }
    8316 
    8317 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    8318 {
    8319  if(hAllocation->CanBecomeLost())
    8320  {
    8321  /*
    8322  Warning: This is a carefully designed algorithm.
    8323  Do not modify unless you really know what you're doing :)
    8324  */
    8325  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8326  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8327  for(;;)
    8328  {
    8329  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    8330  {
    8331  pAllocationInfo->memoryType = UINT32_MAX;
    8332  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    8333  pAllocationInfo->offset = 0;
    8334  pAllocationInfo->size = hAllocation->GetSize();
    8335  pAllocationInfo->pMappedData = VMA_NULL;
    8336  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8337  return;
    8338  }
    8339  else if(localLastUseFrameIndex == localCurrFrameIndex)
    8340  {
    8341  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    8342  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    8343  pAllocationInfo->offset = hAllocation->GetOffset();
    8344  pAllocationInfo->size = hAllocation->GetSize();
    8345  pAllocationInfo->pMappedData = VMA_NULL;
    8346  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8347  return;
    8348  }
    8349  else // Last use time earlier than current time.
    8350  {
    8351  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8352  {
    8353  localLastUseFrameIndex = localCurrFrameIndex;
    8354  }
    8355  }
    8356  }
    8357  }
    8358  else
    8359  {
    8360 #if VMA_STATS_STRING_ENABLED
    8361  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8362  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8363  for(;;)
    8364  {
    8365  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    8366  if(localLastUseFrameIndex == localCurrFrameIndex)
    8367  {
    8368  break;
    8369  }
    8370  else // Last use time earlier than current time.
    8371  {
    8372  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8373  {
    8374  localLastUseFrameIndex = localCurrFrameIndex;
    8375  }
    8376  }
    8377  }
    8378 #endif
    8379 
    8380  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    8381  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    8382  pAllocationInfo->offset = hAllocation->GetOffset();
    8383  pAllocationInfo->size = hAllocation->GetSize();
    8384  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    8385  pAllocationInfo->pUserData = hAllocation->GetUserData();
    8386  }
    8387 }
    8388 
    8389 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    8390 {
    8391  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    8392  if(hAllocation->CanBecomeLost())
    8393  {
    8394  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8395  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8396  for(;;)
    8397  {
    8398  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    8399  {
    8400  return false;
    8401  }
    8402  else if(localLastUseFrameIndex == localCurrFrameIndex)
    8403  {
    8404  return true;
    8405  }
    8406  else // Last use time earlier than current time.
    8407  {
    8408  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8409  {
    8410  localLastUseFrameIndex = localCurrFrameIndex;
    8411  }
    8412  }
    8413  }
    8414  }
    8415  else
    8416  {
    8417 #if VMA_STATS_STRING_ENABLED
    8418  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    8419  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    8420  for(;;)
    8421  {
    8422  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    8423  if(localLastUseFrameIndex == localCurrFrameIndex)
    8424  {
    8425  break;
    8426  }
    8427  else // Last use time earlier than current time.
    8428  {
    8429  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    8430  {
    8431  localLastUseFrameIndex = localCurrFrameIndex;
    8432  }
    8433  }
    8434  }
    8435 #endif
    8436 
    8437  return true;
    8438  }
    8439 }
    8440 
    8441 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    8442 {
    8443  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    8444 
    8445  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    8446 
    8447  if(newCreateInfo.maxBlockCount == 0)
    8448  {
    8449  newCreateInfo.maxBlockCount = SIZE_MAX;
    8450  }
    8451  if(newCreateInfo.blockSize == 0)
    8452  {
    8453  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    8454  }
    8455 
    8456  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    8457 
    8458  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    8459  if(res != VK_SUCCESS)
    8460  {
    8461  vma_delete(this, *pPool);
    8462  *pPool = VMA_NULL;
    8463  return res;
    8464  }
    8465 
    8466  // Add to m_Pools.
    8467  {
    8468  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8469  (*pPool)->SetId(m_NextPoolId++);
    8470  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    8471  }
    8472 
    8473  return VK_SUCCESS;
    8474 }
    8475 
    8476 void VmaAllocator_T::DestroyPool(VmaPool pool)
    8477 {
    8478  // Remove from m_Pools.
    8479  {
    8480  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8481  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    8482  VMA_ASSERT(success && "Pool not found in Allocator.");
    8483  }
    8484 
    8485  vma_delete(this, pool);
    8486 }
    8487 
    8488 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    8489 {
    8490  pool->m_BlockVector.GetPoolStats(pPoolStats);
    8491 }
    8492 
    8493 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    8494 {
    8495  m_CurrentFrameIndex.store(frameIndex);
    8496 }
    8497 
    8498 void VmaAllocator_T::MakePoolAllocationsLost(
    8499  VmaPool hPool,
    8500  size_t* pLostAllocationCount)
    8501 {
    8502  hPool->m_BlockVector.MakePoolAllocationsLost(
    8503  m_CurrentFrameIndex.load(),
    8504  pLostAllocationCount);
    8505 }
    8506 
    8507 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    8508 {
    8509  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    8510  (*pAllocation)->InitLost();
    8511 }
    8512 
    8513 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    8514 {
    8515  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    8516 
    8517  VkResult res;
    8518  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    8519  {
    8520  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    8521  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    8522  {
    8523  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    8524  if(res == VK_SUCCESS)
    8525  {
    8526  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    8527  }
    8528  }
    8529  else
    8530  {
    8531  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    8532  }
    8533  }
    8534  else
    8535  {
    8536  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    8537  }
    8538 
    8539  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    8540  {
    8541  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    8542  }
    8543 
    8544  return res;
    8545 }
    8546 
    8547 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    8548 {
    8549  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    8550  {
    8551  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    8552  }
    8553 
    8554  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    8555 
    8556  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    8557  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    8558  {
    8559  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    8560  m_HeapSizeLimit[heapIndex] += size;
    8561  }
    8562 }
    8563 
    8564 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    8565 {
    8566  if(hAllocation->CanBecomeLost())
    8567  {
    8568  return VK_ERROR_MEMORY_MAP_FAILED;
    8569  }
    8570 
    8571  switch(hAllocation->GetType())
    8572  {
    8573  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8574  {
    8575  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    8576  char *pBytes = VMA_NULL;
    8577  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    8578  if(res == VK_SUCCESS)
    8579  {
    8580  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    8581  hAllocation->BlockAllocMap();
    8582  }
    8583  return res;
    8584  }
    8585  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8586  return hAllocation->DedicatedAllocMap(this, ppData);
    8587  default:
    8588  VMA_ASSERT(0);
    8589  return VK_ERROR_MEMORY_MAP_FAILED;
    8590  }
    8591 }
    8592 
    8593 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    8594 {
    8595  switch(hAllocation->GetType())
    8596  {
    8597  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8598  {
    8599  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    8600  hAllocation->BlockAllocUnmap();
    8601  pBlock->Unmap(this, 1);
    8602  }
    8603  break;
    8604  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8605  hAllocation->DedicatedAllocUnmap(this);
    8606  break;
    8607  default:
    8608  VMA_ASSERT(0);
    8609  }
    8610 }
    8611 
    8612 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    8613 {
    8614  VkResult res = VK_SUCCESS;
    8615  switch(hAllocation->GetType())
    8616  {
    8617  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8618  res = GetVulkanFunctions().vkBindBufferMemory(
    8619  m_hDevice,
    8620  hBuffer,
    8621  hAllocation->GetMemory(),
    8622  0); //memoryOffset
    8623  break;
    8624  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8625  {
    8626  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    8627  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    8628  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    8629  break;
    8630  }
    8631  default:
    8632  VMA_ASSERT(0);
    8633  }
    8634  return res;
    8635 }
    8636 
    8637 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    8638 {
    8639  VkResult res = VK_SUCCESS;
    8640  switch(hAllocation->GetType())
    8641  {
    8642  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8643  res = GetVulkanFunctions().vkBindImageMemory(
    8644  m_hDevice,
    8645  hImage,
    8646  hAllocation->GetMemory(),
    8647  0); //memoryOffset
    8648  break;
    8649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8650  {
    8651  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    8652  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    8653  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    8654  break;
    8655  }
    8656  default:
    8657  VMA_ASSERT(0);
    8658  }
    8659  return res;
    8660 }
    8661 
    8662 void VmaAllocator_T::FlushOrInvalidateAllocation(
    8663  VmaAllocation hAllocation,
    8664  VkDeviceSize offset, VkDeviceSize size,
    8665  VMA_CACHE_OPERATION op)
    8666 {
    8667  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    8668  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    8669  {
    8670  const VkDeviceSize allocationSize = hAllocation->GetSize();
    8671  VMA_ASSERT(offset <= allocationSize);
    8672 
    8673  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    8674 
    8675  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    8676  memRange.memory = hAllocation->GetMemory();
    8677 
    8678  switch(hAllocation->GetType())
    8679  {
    8680  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    8681  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    8682  if(size == VK_WHOLE_SIZE)
    8683  {
    8684  memRange.size = allocationSize - memRange.offset;
    8685  }
    8686  else
    8687  {
    8688  VMA_ASSERT(offset + size <= allocationSize);
    8689  memRange.size = VMA_MIN(
    8690  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    8691  allocationSize - memRange.offset);
    8692  }
    8693  break;
    8694 
    8695  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    8696  {
    8697  // 1. Still within this allocation.
    8698  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    8699  if(size == VK_WHOLE_SIZE)
    8700  {
    8701  size = allocationSize - offset;
    8702  }
    8703  else
    8704  {
    8705  VMA_ASSERT(offset + size <= allocationSize);
    8706  }
    8707  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    8708 
    8709  // 2. Adjust to whole block.
    8710  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    8711  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    8712  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_Metadata.GetSize();
    8713  memRange.offset += allocationOffset;
    8714  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    8715 
    8716  break;
    8717  }
    8718 
    8719  default:
    8720  VMA_ASSERT(0);
    8721  }
    8722 
    8723  switch(op)
    8724  {
    8725  case VMA_CACHE_FLUSH:
    8726  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    8727  break;
    8728  case VMA_CACHE_INVALIDATE:
    8729  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    8730  break;
    8731  default:
    8732  VMA_ASSERT(0);
    8733  }
    8734  }
    8735  // else: Just ignore this call.
    8736 }
    8737 
    8738 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    8739 {
    8740  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    8741 
    8742  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8743  {
    8744  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8745  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    8746  VMA_ASSERT(pDedicatedAllocations);
    8747  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    8748  VMA_ASSERT(success);
    8749  }
    8750 
    8751  VkDeviceMemory hMemory = allocation->GetMemory();
    8752 
    8753  if(allocation->GetMappedData() != VMA_NULL)
    8754  {
    8755  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    8756  }
    8757 
    8758  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    8759 
    8760  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    8761 }
    8762 
    8763 #if VMA_STATS_STRING_ENABLED
    8764 
    8765 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    8766 {
    8767  bool dedicatedAllocationsStarted = false;
    8768  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8769  {
    8770  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8771  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8772  VMA_ASSERT(pDedicatedAllocVector);
    8773  if(pDedicatedAllocVector->empty() == false)
    8774  {
    8775  if(dedicatedAllocationsStarted == false)
    8776  {
    8777  dedicatedAllocationsStarted = true;
    8778  json.WriteString("DedicatedAllocations");
    8779  json.BeginObject();
    8780  }
    8781 
    8782  json.BeginString("Type ");
    8783  json.ContinueString(memTypeIndex);
    8784  json.EndString();
    8785 
    8786  json.BeginArray();
    8787 
    8788  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    8789  {
    8790  json.BeginObject(true);
    8791  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    8792  hAlloc->PrintParameters(json);
    8793  json.EndObject();
    8794  }
    8795 
    8796  json.EndArray();
    8797  }
    8798  }
    8799  if(dedicatedAllocationsStarted)
    8800  {
    8801  json.EndObject();
    8802  }
    8803 
    8804  {
    8805  bool allocationsStarted = false;
    8806  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8807  {
    8808  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    8809  {
    8810  if(allocationsStarted == false)
    8811  {
    8812  allocationsStarted = true;
    8813  json.WriteString("DefaultPools");
    8814  json.BeginObject();
    8815  }
    8816 
    8817  json.BeginString("Type ");
    8818  json.ContinueString(memTypeIndex);
    8819  json.EndString();
    8820 
    8821  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    8822  }
    8823  }
    8824  if(allocationsStarted)
    8825  {
    8826  json.EndObject();
    8827  }
    8828  }
    8829 
    8830  {
    8831  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8832  const size_t poolCount = m_Pools.size();
    8833  if(poolCount > 0)
    8834  {
    8835  json.WriteString("Pools");
    8836  json.BeginObject();
    8837  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    8838  {
    8839  json.BeginString();
    8840  json.ContinueString(m_Pools[poolIndex]->GetId());
    8841  json.EndString();
    8842 
    8843  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    8844  }
    8845  json.EndObject();
    8846  }
    8847  }
    8848 }
    8849 
    8850 #endif // #if VMA_STATS_STRING_ENABLED
    8851 
    8852 static VkResult AllocateMemoryForImage(
    8853  VmaAllocator allocator,
    8854  VkImage image,
    8855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8856  VmaSuballocationType suballocType,
    8857  VmaAllocation* pAllocation)
    8858 {
    8859  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    8860 
    8861  VkMemoryRequirements vkMemReq = {};
    8862  bool requiresDedicatedAllocation = false;
    8863  bool prefersDedicatedAllocation = false;
    8864  allocator->GetImageMemoryRequirements(image, vkMemReq,
    8865  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8866 
    8867  return allocator->AllocateMemory(
    8868  vkMemReq,
    8869  requiresDedicatedAllocation,
    8870  prefersDedicatedAllocation,
    8871  VK_NULL_HANDLE, // dedicatedBuffer
    8872  image, // dedicatedImage
    8873  *pAllocationCreateInfo,
    8874  suballocType,
    8875  pAllocation);
    8876 }
    8877 
    8879 // Public interface
    8880 
    8881 VkResult vmaCreateAllocator(
    8882  const VmaAllocatorCreateInfo* pCreateInfo,
    8883  VmaAllocator* pAllocator)
    8884 {
    8885  VMA_ASSERT(pCreateInfo && pAllocator);
    8886  VMA_DEBUG_LOG("vmaCreateAllocator");
    8887  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    8888  return VK_SUCCESS;
    8889 }
    8890 
    8891 void vmaDestroyAllocator(
    8892  VmaAllocator allocator)
    8893 {
    8894  if(allocator != VK_NULL_HANDLE)
    8895  {
    8896  VMA_DEBUG_LOG("vmaDestroyAllocator");
    8897  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    8898  vma_delete(&allocationCallbacks, allocator);
    8899  }
    8900 }
    8901 
    8903  VmaAllocator allocator,
    8904  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    8905 {
    8906  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    8907  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    8908 }
    8909 
    8911  VmaAllocator allocator,
    8912  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    8913 {
    8914  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    8915  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    8916 }
    8917 
    8919  VmaAllocator allocator,
    8920  uint32_t memoryTypeIndex,
    8921  VkMemoryPropertyFlags* pFlags)
    8922 {
    8923  VMA_ASSERT(allocator && pFlags);
    8924  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    8925  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    8926 }
    8927 
    8929  VmaAllocator allocator,
    8930  uint32_t frameIndex)
    8931 {
    8932  VMA_ASSERT(allocator);
    8933  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    8934 
    8935  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8936 
    8937  allocator->SetCurrentFrameIndex(frameIndex);
    8938 }
    8939 
    8940 void vmaCalculateStats(
    8941  VmaAllocator allocator,
    8942  VmaStats* pStats)
    8943 {
    8944  VMA_ASSERT(allocator && pStats);
    8945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8946  allocator->CalculateStats(pStats);
    8947 }
    8948 
    8949 #if VMA_STATS_STRING_ENABLED
    8950 
    8951 void vmaBuildStatsString(
    8952  VmaAllocator allocator,
    8953  char** ppStatsString,
    8954  VkBool32 detailedMap)
    8955 {
    8956  VMA_ASSERT(allocator && ppStatsString);
    8957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8958 
    8959  VmaStringBuilder sb(allocator);
    8960  {
    8961  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    8962  json.BeginObject();
    8963 
    8964  VmaStats stats;
    8965  allocator->CalculateStats(&stats);
    8966 
    8967  json.WriteString("Total");
    8968  VmaPrintStatInfo(json, stats.total);
    8969 
    8970  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    8971  {
    8972  json.BeginString("Heap ");
    8973  json.ContinueString(heapIndex);
    8974  json.EndString();
    8975  json.BeginObject();
    8976 
    8977  json.WriteString("Size");
    8978  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    8979 
    8980  json.WriteString("Flags");
    8981  json.BeginArray(true);
    8982  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    8983  {
    8984  json.WriteString("DEVICE_LOCAL");
    8985  }
    8986  json.EndArray();
    8987 
    8988  if(stats.memoryHeap[heapIndex].blockCount > 0)
    8989  {
    8990  json.WriteString("Stats");
    8991  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    8992  }
    8993 
    8994  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    8995  {
    8996  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    8997  {
    8998  json.BeginString("Type ");
    8999  json.ContinueString(typeIndex);
    9000  json.EndString();
    9001 
    9002  json.BeginObject();
    9003 
    9004  json.WriteString("Flags");
    9005  json.BeginArray(true);
    9006  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    9007  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    9008  {
    9009  json.WriteString("DEVICE_LOCAL");
    9010  }
    9011  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    9012  {
    9013  json.WriteString("HOST_VISIBLE");
    9014  }
    9015  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    9016  {
    9017  json.WriteString("HOST_COHERENT");
    9018  }
    9019  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    9020  {
    9021  json.WriteString("HOST_CACHED");
    9022  }
    9023  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    9024  {
    9025  json.WriteString("LAZILY_ALLOCATED");
    9026  }
    9027  json.EndArray();
    9028 
    9029  if(stats.memoryType[typeIndex].blockCount > 0)
    9030  {
    9031  json.WriteString("Stats");
    9032  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    9033  }
    9034 
    9035  json.EndObject();
    9036  }
    9037  }
    9038 
    9039  json.EndObject();
    9040  }
    9041  if(detailedMap == VK_TRUE)
    9042  {
    9043  allocator->PrintDetailedMap(json);
    9044  }
    9045 
    9046  json.EndObject();
    9047  }
    9048 
    9049  const size_t len = sb.GetLength();
    9050  char* const pChars = vma_new_array(allocator, char, len + 1);
    9051  if(len > 0)
    9052  {
    9053  memcpy(pChars, sb.GetData(), len);
    9054  }
    9055  pChars[len] = '\0';
    9056  *ppStatsString = pChars;
    9057 }
    9058 
    9059 void vmaFreeStatsString(
    9060  VmaAllocator allocator,
    9061  char* pStatsString)
    9062 {
    9063  if(pStatsString != VMA_NULL)
    9064  {
    9065  VMA_ASSERT(allocator);
    9066  size_t len = strlen(pStatsString);
    9067  vma_delete_array(allocator, pStatsString, len + 1);
    9068  }
    9069 }
    9070 
    9071 #endif // #if VMA_STATS_STRING_ENABLED
    9072 
    9073 /*
    9074 This function is not protected by any mutex because it just reads immutable data.
    9075 */
    9076 VkResult vmaFindMemoryTypeIndex(
    9077  VmaAllocator allocator,
    9078  uint32_t memoryTypeBits,
    9079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9080  uint32_t* pMemoryTypeIndex)
    9081 {
    9082  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    9083  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    9084  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    9085 
    9086  if(pAllocationCreateInfo->memoryTypeBits != 0)
    9087  {
    9088  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    9089  }
    9090 
    9091  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    9092  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    9093 
    9094  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    9095  if(mapped)
    9096  {
    9097  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    9098  }
    9099 
    9100  // Convert usage to requiredFlags and preferredFlags.
    9101  switch(pAllocationCreateInfo->usage)
    9102  {
    9104  break;
    9106  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    9107  {
    9108  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    9109  }
    9110  break;
    9112  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    9113  break;
    9115  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    9116  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    9117  {
    9118  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    9119  }
    9120  break;
    9122  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    9123  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    9124  break;
    9125  default:
    9126  break;
    9127  }
    9128 
    9129  *pMemoryTypeIndex = UINT32_MAX;
    9130  uint32_t minCost = UINT32_MAX;
    9131  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    9132  memTypeIndex < allocator->GetMemoryTypeCount();
    9133  ++memTypeIndex, memTypeBit <<= 1)
    9134  {
    9135  // This memory type is acceptable according to memoryTypeBits bitmask.
    9136  if((memTypeBit & memoryTypeBits) != 0)
    9137  {
    9138  const VkMemoryPropertyFlags currFlags =
    9139  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    9140  // This memory type contains requiredFlags.
    9141  if((requiredFlags & ~currFlags) == 0)
    9142  {
    9143  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    9144  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    9145  // Remember memory type with lowest cost.
    9146  if(currCost < minCost)
    9147  {
    9148  *pMemoryTypeIndex = memTypeIndex;
    9149  if(currCost == 0)
    9150  {
    9151  return VK_SUCCESS;
    9152  }
    9153  minCost = currCost;
    9154  }
    9155  }
    9156  }
    9157  }
    9158  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    9159 }
    9160 
    9162  VmaAllocator allocator,
    9163  const VkBufferCreateInfo* pBufferCreateInfo,
    9164  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9165  uint32_t* pMemoryTypeIndex)
    9166 {
    9167  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    9168  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    9169  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    9170  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    9171 
    9172  const VkDevice hDev = allocator->m_hDevice;
    9173  VkBuffer hBuffer = VK_NULL_HANDLE;
    9174  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    9175  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    9176  if(res == VK_SUCCESS)
    9177  {
    9178  VkMemoryRequirements memReq = {};
    9179  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    9180  hDev, hBuffer, &memReq);
    9181 
    9182  res = vmaFindMemoryTypeIndex(
    9183  allocator,
    9184  memReq.memoryTypeBits,
    9185  pAllocationCreateInfo,
    9186  pMemoryTypeIndex);
    9187 
    9188  allocator->GetVulkanFunctions().vkDestroyBuffer(
    9189  hDev, hBuffer, allocator->GetAllocationCallbacks());
    9190  }
    9191  return res;
    9192 }
    9193 
    9195  VmaAllocator allocator,
    9196  const VkImageCreateInfo* pImageCreateInfo,
    9197  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9198  uint32_t* pMemoryTypeIndex)
    9199 {
    9200  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    9201  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    9202  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    9203  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    9204 
    9205  const VkDevice hDev = allocator->m_hDevice;
    9206  VkImage hImage = VK_NULL_HANDLE;
    9207  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    9208  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    9209  if(res == VK_SUCCESS)
    9210  {
    9211  VkMemoryRequirements memReq = {};
    9212  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    9213  hDev, hImage, &memReq);
    9214 
    9215  res = vmaFindMemoryTypeIndex(
    9216  allocator,
    9217  memReq.memoryTypeBits,
    9218  pAllocationCreateInfo,
    9219  pMemoryTypeIndex);
    9220 
    9221  allocator->GetVulkanFunctions().vkDestroyImage(
    9222  hDev, hImage, allocator->GetAllocationCallbacks());
    9223  }
    9224  return res;
    9225 }
    9226 
    9227 VkResult vmaCreatePool(
    9228  VmaAllocator allocator,
    9229  const VmaPoolCreateInfo* pCreateInfo,
    9230  VmaPool* pPool)
    9231 {
    9232  VMA_ASSERT(allocator && pCreateInfo && pPool);
    9233 
    9234  VMA_DEBUG_LOG("vmaCreatePool");
    9235 
    9236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9237 
    9238  return allocator->CreatePool(pCreateInfo, pPool);
    9239 }
    9240 
    9241 void vmaDestroyPool(
    9242  VmaAllocator allocator,
    9243  VmaPool pool)
    9244 {
    9245  VMA_ASSERT(allocator);
    9246 
    9247  if(pool == VK_NULL_HANDLE)
    9248  {
    9249  return;
    9250  }
    9251 
    9252  VMA_DEBUG_LOG("vmaDestroyPool");
    9253 
    9254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9255 
    9256  allocator->DestroyPool(pool);
    9257 }
    9258 
    9259 void vmaGetPoolStats(
    9260  VmaAllocator allocator,
    9261  VmaPool pool,
    9262  VmaPoolStats* pPoolStats)
    9263 {
    9264  VMA_ASSERT(allocator && pool && pPoolStats);
    9265 
    9266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9267 
    9268  allocator->GetPoolStats(pool, pPoolStats);
    9269 }
    9270 
    9272  VmaAllocator allocator,
    9273  VmaPool pool,
    9274  size_t* pLostAllocationCount)
    9275 {
    9276  VMA_ASSERT(allocator && pool);
    9277 
    9278  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9279 
    9280  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    9281 }
    9282 
    9283 VkResult vmaAllocateMemory(
    9284  VmaAllocator allocator,
    9285  const VkMemoryRequirements* pVkMemoryRequirements,
    9286  const VmaAllocationCreateInfo* pCreateInfo,
    9287  VmaAllocation* pAllocation,
    9288  VmaAllocationInfo* pAllocationInfo)
    9289 {
    9290  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    9291 
    9292  VMA_DEBUG_LOG("vmaAllocateMemory");
    9293 
    9294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9295 
    9296  VkResult result = allocator->AllocateMemory(
    9297  *pVkMemoryRequirements,
    9298  false, // requiresDedicatedAllocation
    9299  false, // prefersDedicatedAllocation
    9300  VK_NULL_HANDLE, // dedicatedBuffer
    9301  VK_NULL_HANDLE, // dedicatedImage
    9302  *pCreateInfo,
    9303  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    9304  pAllocation);
    9305 
    9306  if(pAllocationInfo && result == VK_SUCCESS)
    9307  {
    9308  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9309  }
    9310 
    9311  return result;
    9312 }
    9313 
    9315  VmaAllocator allocator,
    9316  VkBuffer buffer,
    9317  const VmaAllocationCreateInfo* pCreateInfo,
    9318  VmaAllocation* pAllocation,
    9319  VmaAllocationInfo* pAllocationInfo)
    9320 {
    9321  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    9322 
    9323  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    9324 
    9325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9326 
    9327  VkMemoryRequirements vkMemReq = {};
    9328  bool requiresDedicatedAllocation = false;
    9329  bool prefersDedicatedAllocation = false;
    9330  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    9331  requiresDedicatedAllocation,
    9332  prefersDedicatedAllocation);
    9333 
    9334  VkResult result = allocator->AllocateMemory(
    9335  vkMemReq,
    9336  requiresDedicatedAllocation,
    9337  prefersDedicatedAllocation,
    9338  buffer, // dedicatedBuffer
    9339  VK_NULL_HANDLE, // dedicatedImage
    9340  *pCreateInfo,
    9341  VMA_SUBALLOCATION_TYPE_BUFFER,
    9342  pAllocation);
    9343 
    9344  if(pAllocationInfo && result == VK_SUCCESS)
    9345  {
    9346  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9347  }
    9348 
    9349  return result;
    9350 }
    9351 
    9352 VkResult vmaAllocateMemoryForImage(
    9353  VmaAllocator allocator,
    9354  VkImage image,
    9355  const VmaAllocationCreateInfo* pCreateInfo,
    9356  VmaAllocation* pAllocation,
    9357  VmaAllocationInfo* pAllocationInfo)
    9358 {
    9359  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    9360 
    9361  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    9362 
    9363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9364 
    9365  VkResult result = AllocateMemoryForImage(
    9366  allocator,
    9367  image,
    9368  pCreateInfo,
    9369  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    9370  pAllocation);
    9371 
    9372  if(pAllocationInfo && result == VK_SUCCESS)
    9373  {
    9374  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9375  }
    9376 
    9377  return result;
    9378 }
    9379 
    9380 void vmaFreeMemory(
    9381  VmaAllocator allocator,
    9382  VmaAllocation allocation)
    9383 {
    9384  VMA_ASSERT(allocator);
    9385  VMA_DEBUG_LOG("vmaFreeMemory");
    9386  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9387  if(allocation != VK_NULL_HANDLE)
    9388  {
    9389  allocator->FreeMemory(allocation);
    9390  }
    9391 }
    9392 
    9394  VmaAllocator allocator,
    9395  VmaAllocation allocation,
    9396  VmaAllocationInfo* pAllocationInfo)
    9397 {
    9398  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    9399 
    9400  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9401 
    9402  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    9403 }
    9404 
    9405 VkBool32 vmaTouchAllocation(
    9406  VmaAllocator allocator,
    9407  VmaAllocation allocation)
    9408 {
    9409  VMA_ASSERT(allocator && allocation);
    9410 
    9411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9412 
    9413  return allocator->TouchAllocation(allocation);
    9414 }
    9415 
    9417  VmaAllocator allocator,
    9418  VmaAllocation allocation,
    9419  void* pUserData)
    9420 {
    9421  VMA_ASSERT(allocator && allocation);
    9422 
    9423  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9424 
    9425  allocation->SetUserData(allocator, pUserData);
    9426 }
    9427 
    9429  VmaAllocator allocator,
    9430  VmaAllocation* pAllocation)
    9431 {
    9432  VMA_ASSERT(allocator && pAllocation);
    9433 
    9434  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    9435 
    9436  allocator->CreateLostAllocation(pAllocation);
    9437 }
    9438 
    9439 VkResult vmaMapMemory(
    9440  VmaAllocator allocator,
    9441  VmaAllocation allocation,
    9442  void** ppData)
    9443 {
    9444  VMA_ASSERT(allocator && allocation && ppData);
    9445 
    9446  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9447 
    9448  return allocator->Map(allocation, ppData);
    9449 }
    9450 
    9451 void vmaUnmapMemory(
    9452  VmaAllocator allocator,
    9453  VmaAllocation allocation)
    9454 {
    9455  VMA_ASSERT(allocator && allocation);
    9456 
    9457  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9458 
    9459  allocator->Unmap(allocation);
    9460 }
    9461 
    9462 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    9463 {
    9464  VMA_ASSERT(allocator && allocation);
    9465 
    9466  VMA_DEBUG_LOG("vmaFlushAllocation");
    9467 
    9468  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9469 
    9470  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    9471 }
    9472 
    9473 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    9474 {
    9475  VMA_ASSERT(allocator && allocation);
    9476 
    9477  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    9478 
    9479  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9480 
    9481  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    9482 }
    9483 
    9484 VkResult vmaDefragment(
    9485  VmaAllocator allocator,
    9486  VmaAllocation* pAllocations,
    9487  size_t allocationCount,
    9488  VkBool32* pAllocationsChanged,
    9489  const VmaDefragmentationInfo *pDefragmentationInfo,
    9490  VmaDefragmentationStats* pDefragmentationStats)
    9491 {
    9492  VMA_ASSERT(allocator && pAllocations);
    9493 
    9494  VMA_DEBUG_LOG("vmaDefragment");
    9495 
    9496  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9497 
    9498  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    9499 }
    9500 
    9501 VkResult vmaBindBufferMemory(
    9502  VmaAllocator allocator,
    9503  VmaAllocation allocation,
    9504  VkBuffer buffer)
    9505 {
    9506  VMA_ASSERT(allocator && allocation && buffer);
    9507 
    9508  VMA_DEBUG_LOG("vmaBindBufferMemory");
    9509 
    9510  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9511 
    9512  return allocator->BindBufferMemory(allocation, buffer);
    9513 }
    9514 
    9515 VkResult vmaBindImageMemory(
    9516  VmaAllocator allocator,
    9517  VmaAllocation allocation,
    9518  VkImage image)
    9519 {
    9520  VMA_ASSERT(allocator && allocation && image);
    9521 
    9522  VMA_DEBUG_LOG("vmaBindImageMemory");
    9523 
    9524  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9525 
    9526  return allocator->BindImageMemory(allocation, image);
    9527 }
    9528 
    9529 VkResult vmaCreateBuffer(
    9530  VmaAllocator allocator,
    9531  const VkBufferCreateInfo* pBufferCreateInfo,
    9532  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9533  VkBuffer* pBuffer,
    9534  VmaAllocation* pAllocation,
    9535  VmaAllocationInfo* pAllocationInfo)
    9536 {
    9537  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    9538 
    9539  VMA_DEBUG_LOG("vmaCreateBuffer");
    9540 
    9541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9542 
    9543  *pBuffer = VK_NULL_HANDLE;
    9544  *pAllocation = VK_NULL_HANDLE;
    9545 
    9546  // 1. Create VkBuffer.
    9547  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    9548  allocator->m_hDevice,
    9549  pBufferCreateInfo,
    9550  allocator->GetAllocationCallbacks(),
    9551  pBuffer);
    9552  if(res >= 0)
    9553  {
    9554  // 2. vkGetBufferMemoryRequirements.
    9555  VkMemoryRequirements vkMemReq = {};
    9556  bool requiresDedicatedAllocation = false;
    9557  bool prefersDedicatedAllocation = false;
    9558  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    9559  requiresDedicatedAllocation, prefersDedicatedAllocation);
    9560 
    9561  // Make sure alignment requirements for specific buffer usages reported
    9562  // in Physical Device Properties are included in alignment reported by memory requirements.
    9563  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    9564  {
    9565  VMA_ASSERT(vkMemReq.alignment %
    9566  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    9567  }
    9568  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    9569  {
    9570  VMA_ASSERT(vkMemReq.alignment %
    9571  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    9572  }
    9573  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    9574  {
    9575  VMA_ASSERT(vkMemReq.alignment %
    9576  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    9577  }
    9578 
    9579  // 3. Allocate memory using allocator.
    9580  res = allocator->AllocateMemory(
    9581  vkMemReq,
    9582  requiresDedicatedAllocation,
    9583  prefersDedicatedAllocation,
    9584  *pBuffer, // dedicatedBuffer
    9585  VK_NULL_HANDLE, // dedicatedImage
    9586  *pAllocationCreateInfo,
    9587  VMA_SUBALLOCATION_TYPE_BUFFER,
    9588  pAllocation);
    9589  if(res >= 0)
    9590  {
    9591  // 3. Bind buffer with memory.
    9592  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    9593  if(res >= 0)
    9594  {
    9595  // All steps succeeded.
    9596  #if VMA_STATS_STRING_ENABLED
    9597  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    9598  #endif
    9599  if(pAllocationInfo != VMA_NULL)
    9600  {
    9601  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9602  }
    9603  return VK_SUCCESS;
    9604  }
    9605  allocator->FreeMemory(*pAllocation);
    9606  *pAllocation = VK_NULL_HANDLE;
    9607  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    9608  *pBuffer = VK_NULL_HANDLE;
    9609  return res;
    9610  }
    9611  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    9612  *pBuffer = VK_NULL_HANDLE;
    9613  return res;
    9614  }
    9615  return res;
    9616 }
    9617 
    9618 void vmaDestroyBuffer(
    9619  VmaAllocator allocator,
    9620  VkBuffer buffer,
    9621  VmaAllocation allocation)
    9622 {
    9623  VMA_ASSERT(allocator);
    9624  VMA_DEBUG_LOG("vmaDestroyBuffer");
    9625  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9626  if(buffer != VK_NULL_HANDLE)
    9627  {
    9628  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    9629  }
    9630  if(allocation != VK_NULL_HANDLE)
    9631  {
    9632  allocator->FreeMemory(allocation);
    9633  }
    9634 }
    9635 
    9636 VkResult vmaCreateImage(
    9637  VmaAllocator allocator,
    9638  const VkImageCreateInfo* pImageCreateInfo,
    9639  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    9640  VkImage* pImage,
    9641  VmaAllocation* pAllocation,
    9642  VmaAllocationInfo* pAllocationInfo)
    9643 {
    9644  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    9645 
    9646  VMA_DEBUG_LOG("vmaCreateImage");
    9647 
    9648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9649 
    9650  *pImage = VK_NULL_HANDLE;
    9651  *pAllocation = VK_NULL_HANDLE;
    9652 
    9653  // 1. Create VkImage.
    9654  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    9655  allocator->m_hDevice,
    9656  pImageCreateInfo,
    9657  allocator->GetAllocationCallbacks(),
    9658  pImage);
    9659  if(res >= 0)
    9660  {
    9661  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    9662  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    9663  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    9664 
    9665  // 2. Allocate memory using allocator.
    9666  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    9667  if(res >= 0)
    9668  {
    9669  // 3. Bind image with memory.
    9670  res = allocator->BindImageMemory(*pAllocation, *pImage);
    9671  if(res >= 0)
    9672  {
    9673  // All steps succeeded.
    9674  #if VMA_STATS_STRING_ENABLED
    9675  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    9676  #endif
    9677  if(pAllocationInfo != VMA_NULL)
    9678  {
    9679  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    9680  }
    9681  return VK_SUCCESS;
    9682  }
    9683  allocator->FreeMemory(*pAllocation);
    9684  *pAllocation = VK_NULL_HANDLE;
    9685  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    9686  *pImage = VK_NULL_HANDLE;
    9687  return res;
    9688  }
    9689  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    9690  *pImage = VK_NULL_HANDLE;
    9691  return res;
    9692  }
    9693  return res;
    9694 }
    9695 
    9696 void vmaDestroyImage(
    9697  VmaAllocator allocator,
    9698  VkImage image,
    9699  VmaAllocation allocation)
    9700 {
    9701  VMA_ASSERT(allocator);
    9702  VMA_DEBUG_LOG("vmaDestroyImage");
    9703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    9704  if(image != VK_NULL_HANDLE)
    9705  {
    9706  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    9707  }
    9708  if(allocation != VK_NULL_HANDLE)
    9709  {
    9710  allocator->FreeMemory(allocation);
    9711  }
    9712 }
    9713 
    9714 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1171
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1437
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1206
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1200
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    +
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1189
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1183
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1400
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1183
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:1773
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1203
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1972
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:1619
    +
    Definition: vk_mem_alloc.h:1394
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1175
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:1767
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1197
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1994
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:1613
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:1673
    -
    Definition: vk_mem_alloc.h:1480
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1172
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1518
    -
    Definition: vk_mem_alloc.h:1427
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1215
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:1667
    +
    Definition: vk_mem_alloc.h:1474
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1164
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1512
    +
    Definition: vk_mem_alloc.h:1421
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1209
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1268
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1200
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1262
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1194
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1431
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1425
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1333
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1186
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1332
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1976
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1327
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1180
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1326
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1998
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1232
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1342
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1984
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1502
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1967
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1187
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1114
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1226
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1336
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2006
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1496
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1989
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1181
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1106
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1209
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1203
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:1627
    -
    Definition: vk_mem_alloc.h:1621
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:1783
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:1621
    +
    Definition: vk_mem_alloc.h:1615
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:1777
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1184
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:1539
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:1643
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:1679
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1176
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:1533
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:1637
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:1673
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1170
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:1630
    +
    Definition: vk_mem_alloc.h:1162
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:1624
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1378
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1372
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1962
    +
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1984
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1980
    -
    Definition: vk_mem_alloc.h:1417
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1526
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1185
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2002
    +
    Definition: vk_mem_alloc.h:1411
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1520
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1179
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1338
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1120
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1332
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1112
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1141
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1133
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1146
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1982
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1138
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2004
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1513
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:1689
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1507
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:1683
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1180
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1321
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:1638
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1133
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1172
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1315
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:1632
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1125
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1487
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1334
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1137
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:1633
    -
    Definition: vk_mem_alloc.h:1426
    +
    Definition: vk_mem_alloc.h:1481
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1328
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1129
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:1627
    +
    Definition: vk_mem_alloc.h:1420
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1178
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1508
    -
    Definition: vk_mem_alloc.h:1499
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1502
    +
    Definition: vk_mem_alloc.h:1493
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1324
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1182
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:1651
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1218
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:1682
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1497
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1532
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1318
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1174
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:1645
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1212
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:1676
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1491
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1526
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1256
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1340
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1467
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1333
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1250
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1334
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1461
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1327
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1191
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1135
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1190
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1185
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1127
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1184
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1665
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1659
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1177
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:1797
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1212
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1333
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1330
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:1791
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1206
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1327
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1324
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:1670
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:1778
    -
    Definition: vk_mem_alloc.h:1495
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1978
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1178
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:1664
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:1772
    +
    Definition: vk_mem_alloc.h:1489
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2000
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1170
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1328
    -
    Definition: vk_mem_alloc.h:1383
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:1623
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1322
    +
    Definition: vk_mem_alloc.h:1377
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:1617
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1326
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1188
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1192
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1454
    -
    Definition: vk_mem_alloc.h:1410
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:1792
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1320
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1182
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1186
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1448
    +
    Definition: vk_mem_alloc.h:1404
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:1786
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1168
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1160
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1181
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:1759
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1173
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:1753
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:1601
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1334
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:1595
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1328
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1341
    + +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1335
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:1676
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1334
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:1764
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:1670
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1328
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:1758