From 0ef61c2fd5e6f3c1e9b6d20fc847e0243a287605 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 18 Aug 2020 14:20:37 +0200 Subject: [PATCH] Fix in documentation chapter "Finding out if memory is mappable" Fixes #143 --- docs/html/index.html | 2 +- docs/html/memory_mapping.html | 5 +- docs/html/vk__mem__alloc_8h_source.html | 29215 +++++++++++----------- src/vk_mem_alloc.h | 2 +- 4 files changed, 14624 insertions(+), 14600 deletions(-) diff --git a/docs/html/index.html b/docs/html/index.html index b07b0c9..929bce2 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -65,7 +65,7 @@ $(function() {
Vulkan Memory Allocator
-

Version 3.0.0-development (2020-03-23)

+

Version 3.0.0-development (2020-06-24)

Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
License: MIT

Documentation of all members: vk_mem_alloc.h

diff --git a/docs/html/memory_mapping.html b/docs/html/memory_mapping.html index 03f92b2..d5bfa77 100644 --- a/docs/html/memory_mapping.html +++ b/docs/html/memory_mapping.html @@ -143,7 +143,7 @@ Finding out if memory is mappable
VkMemoryPropertyFlags memFlags;
vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
-
if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
{
// Allocation ended up in mappable memory. You can map it and access it directly.
void* mappedData;
@@ -170,7 +170,7 @@ Finding out if memory is mappable
vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-
if(allocInfo.pUserData != nullptr)
+
if(allocInfo.pMappedData != nullptr)
{
// Allocation ended up in mappable memory.
// It's persistently mapped. You can access it directly.
@@ -192,7 +192,6 @@ Finding out if memory is mappable
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3112
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2744
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3117
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3078
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2655
diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 641ff65..7def349 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -923,7 +923,7 @@ $(function() {
3943 
3944 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3945 #include <cstdlib>
-
3946 void *aligned_alloc(size_t alignment, size_t size)
+
3946 void *vma_aligned_alloc(size_t alignment, size_t size)
3947 {
3948  // alignment must be >= sizeof(void*)
3949  if(alignment < sizeof(void*))
@@ -935,5750 +935,5750 @@ $(function() {
3955 }
3956 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3957 #include <cstdlib>
-
3958 void *aligned_alloc(size_t alignment, size_t size)
-
3959 {
-
3960  // alignment must be >= sizeof(void*)
-
3961  if(alignment < sizeof(void*))
-
3962  {
-
3963  alignment = sizeof(void*);
-
3964  }
-
3965 
-
3966  void *pointer;
-
3967  if(posix_memalign(&pointer, alignment, size) == 0)
-
3968  return pointer;
-
3969  return VMA_NULL;
-
3970 }
-
3971 #endif
-
3972 
-
3973 // If your compiler is not compatible with C++11 and definition of
-
3974 // aligned_alloc() function is missing, uncommeting following line may help:
-
3975 
-
3976 //#include <malloc.h>
-
3977 
-
3978 // Normal assert to check for programmer's errors, especially in Debug configuration.
-
3979 #ifndef VMA_ASSERT
-
3980  #ifdef NDEBUG
-
3981  #define VMA_ASSERT(expr)
-
3982  #else
-
3983  #define VMA_ASSERT(expr) assert(expr)
-
3984  #endif
-
3985 #endif
-
3986 
-
3987 // Assert that will be called very often, like inside data structures e.g. operator[].
-
3988 // Making it non-empty can make program slow.
-
3989 #ifndef VMA_HEAVY_ASSERT
-
3990  #ifdef NDEBUG
-
3991  #define VMA_HEAVY_ASSERT(expr)
-
3992  #else
-
3993  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
-
3994  #endif
-
3995 #endif
-
3996 
-
3997 #ifndef VMA_ALIGN_OF
-
3998  #define VMA_ALIGN_OF(type) (__alignof(type))
-
3999 #endif
-
4000 
-
4001 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
-
4002  #if defined(_WIN32)
-
4003  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
-
4004  #else
-
4005  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
-
4006  #endif
-
4007 #endif
-
4008 
-
4009 #ifndef VMA_SYSTEM_FREE
-
4010  #if defined(_WIN32)
-
4011  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
-
4012  #else
-
4013  #define VMA_SYSTEM_FREE(ptr) free(ptr)
-
4014  #endif
-
4015 #endif
-
4016 
-
4017 #ifndef VMA_MIN
-
4018  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
-
4019 #endif
-
4020 
-
4021 #ifndef VMA_MAX
-
4022  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
-
4023 #endif
-
4024 
-
4025 #ifndef VMA_SWAP
-
4026  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
-
4027 #endif
-
4028 
-
4029 #ifndef VMA_SORT
-
4030  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
-
4031 #endif
-
4032 
-
4033 #ifndef VMA_DEBUG_LOG
-
4034  #define VMA_DEBUG_LOG(format, ...)
-
4035  /*
-
4036  #define VMA_DEBUG_LOG(format, ...) do { \
-
4037  printf(format, __VA_ARGS__); \
-
4038  printf("\n"); \
-
4039  } while(false)
-
4040  */
-
4041 #endif
-
4042 
-
4043 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
-
4044 #if VMA_STATS_STRING_ENABLED
-
4045  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
-
4046  {
-
4047  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
-
4048  }
-
4049  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
-
4050  {
-
4051  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
-
4052  }
-
4053  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
-
4054  {
-
4055  snprintf(outStr, strLen, "%p", ptr);
-
4056  }
-
4057 #endif
-
4058 
-
4059 #ifndef VMA_MUTEX
-
4060  class VmaMutex
-
4061  {
-
4062  public:
-
4063  void Lock() { m_Mutex.lock(); }
-
4064  void Unlock() { m_Mutex.unlock(); }
-
4065  bool TryLock() { return m_Mutex.try_lock(); }
-
4066  private:
-
4067  std::mutex m_Mutex;
-
4068  };
-
4069  #define VMA_MUTEX VmaMutex
-
4070 #endif
-
4071 
-
4072 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
-
4073 #ifndef VMA_RW_MUTEX
-
4074  #if VMA_USE_STL_SHARED_MUTEX
-
4075  // Use std::shared_mutex from C++17.
-
4076  #include <shared_mutex>
-
4077  class VmaRWMutex
-
4078  {
-
4079  public:
-
4080  void LockRead() { m_Mutex.lock_shared(); }
-
4081  void UnlockRead() { m_Mutex.unlock_shared(); }
-
4082  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
-
4083  void LockWrite() { m_Mutex.lock(); }
-
4084  void UnlockWrite() { m_Mutex.unlock(); }
-
4085  bool TryLockWrite() { return m_Mutex.try_lock(); }
-
4086  private:
-
4087  std::shared_mutex m_Mutex;
-
4088  };
-
4089  #define VMA_RW_MUTEX VmaRWMutex
-
4090  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
-
4091  // Use SRWLOCK from WinAPI.
-
4092  // Minimum supported client = Windows Vista, server = Windows Server 2008.
-
4093  class VmaRWMutex
-
4094  {
-
4095  public:
-
4096  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
-
4097  void LockRead() { AcquireSRWLockShared(&m_Lock); }
-
4098  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
-
4099  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
-
4100  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
-
4101  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
-
4102  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
-
4103  private:
-
4104  SRWLOCK m_Lock;
-
4105  };
-
4106  #define VMA_RW_MUTEX VmaRWMutex
-
4107  #else
-
4108  // Less efficient fallback: Use normal mutex.
-
4109  class VmaRWMutex
-
4110  {
-
4111  public:
-
4112  void LockRead() { m_Mutex.Lock(); }
-
4113  void UnlockRead() { m_Mutex.Unlock(); }
-
4114  bool TryLockRead() { return m_Mutex.TryLock(); }
-
4115  void LockWrite() { m_Mutex.Lock(); }
-
4116  void UnlockWrite() { m_Mutex.Unlock(); }
-
4117  bool TryLockWrite() { return m_Mutex.TryLock(); }
-
4118  private:
-
4119  VMA_MUTEX m_Mutex;
-
4120  };
-
4121  #define VMA_RW_MUTEX VmaRWMutex
-
4122  #endif // #if VMA_USE_STL_SHARED_MUTEX
-
4123 #endif // #ifndef VMA_RW_MUTEX
-
4124 
-
4125 /*
-
4126 If providing your own implementation, you need to implement a subset of std::atomic.
-
4127 */
-
4128 #ifndef VMA_ATOMIC_UINT32
-
4129  #include <atomic>
-
4130  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
-
4131 #endif
-
4132 
-
4133 #ifndef VMA_ATOMIC_UINT64
-
4134  #include <atomic>
-
4135  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
-
4136 #endif
-
4137 
-
4138 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
-
4139 
-
4143  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
-
4144 #endif
-
4145 
-
4146 #ifndef VMA_DEBUG_ALIGNMENT
+
3958 
+
3959 #if defined(__APPLE__)
+
3960 #include <AvailabilityMacros.h>
+
3961 #endif
+
3962 
+
3963 void *vma_aligned_alloc(size_t alignment, size_t size)
+
3964 {
+
3965 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
+
3966 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
+
3967  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
+
3968  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
+
3969  // MAC_OS_X_VERSION_10_16), even though the function is marked
+
3970  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
+
3971  // the __builtin_available checks for 10.15.
+
3972  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
+
3973  if (__builtin_available(macOS 10.15, iOS 13, *))
+
3974  return aligned_alloc(alignment, size);
+
3975 #endif
+
3976 #endif
+
3977  // alignment must be >= sizeof(void*)
+
3978  if(alignment < sizeof(void*))
+
3979  {
+
3980  alignment = sizeof(void*);
+
3981  }
+
3982 
+
3983  void *pointer;
+
3984  if(posix_memalign(&pointer, alignment, size) == 0)
+
3985  return pointer;
+
3986  return VMA_NULL;
+
3987 }
+
3988 #elif defined(_WIN32)
+
3989 void *vma_aligned_alloc(size_t alignment, size_t size)
+
3990 {
+
3991  return _aligned_malloc(size, alignment);
+
3992 }
+
3993 #else
+
3994 void *vma_aligned_alloc(size_t alignment, size_t size)
+
3995 {
+
3996  return aligned_alloc(alignment, size);
+
3997 }
+
3998 #endif
+
3999 
+
4000 // If your compiler is not compatible with C++11 and definition of
+
4001 // aligned_alloc() function is missing, uncommeting following line may help:
+
4002 
+
4003 //#include <malloc.h>
+
4004 
+
4005 // Normal assert to check for programmer's errors, especially in Debug configuration.
+
4006 #ifndef VMA_ASSERT
+
4007  #ifdef NDEBUG
+
4008  #define VMA_ASSERT(expr)
+
4009  #else
+
4010  #define VMA_ASSERT(expr) assert(expr)
+
4011  #endif
+
4012 #endif
+
4013 
+
4014 // Assert that will be called very often, like inside data structures e.g. operator[].
+
4015 // Making it non-empty can make program slow.
+
4016 #ifndef VMA_HEAVY_ASSERT
+
4017  #ifdef NDEBUG
+
4018  #define VMA_HEAVY_ASSERT(expr)
+
4019  #else
+
4020  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
+
4021  #endif
+
4022 #endif
+
4023 
+
4024 #ifndef VMA_ALIGN_OF
+
4025  #define VMA_ALIGN_OF(type) (__alignof(type))
+
4026 #endif
+
4027 
+
4028 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
+
4029  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
+
4030 #endif
+
4031 
+
4032 #ifndef VMA_SYSTEM_FREE
+
4033  #if defined(_WIN32)
+
4034  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
+
4035  #else
+
4036  #define VMA_SYSTEM_FREE(ptr) free(ptr)
+
4037  #endif
+
4038 #endif
+
4039 
+
4040 #ifndef VMA_MIN
+
4041  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
+
4042 #endif
+
4043 
+
4044 #ifndef VMA_MAX
+
4045  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
+
4046 #endif
+
4047 
+
4048 #ifndef VMA_SWAP
+
4049  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
+
4050 #endif
+
4051 
+
4052 #ifndef VMA_SORT
+
4053  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
+
4054 #endif
+
4055 
+
4056 #ifndef VMA_DEBUG_LOG
+
4057  #define VMA_DEBUG_LOG(format, ...)
+
4058  /*
+
4059  #define VMA_DEBUG_LOG(format, ...) do { \
+
4060  printf(format, __VA_ARGS__); \
+
4061  printf("\n"); \
+
4062  } while(false)
+
4063  */
+
4064 #endif
+
4065 
+
4066 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
+
4067 #if VMA_STATS_STRING_ENABLED
+
4068  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
+
4069  {
+
4070  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
+
4071  }
+
4072  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
+
4073  {
+
4074  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
+
4075  }
+
4076  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
+
4077  {
+
4078  snprintf(outStr, strLen, "%p", ptr);
+
4079  }
+
4080 #endif
+
4081 
+
4082 #ifndef VMA_MUTEX
+
4083  class VmaMutex
+
4084  {
+
4085  public:
+
4086  void Lock() { m_Mutex.lock(); }
+
4087  void Unlock() { m_Mutex.unlock(); }
+
4088  bool TryLock() { return m_Mutex.try_lock(); }
+
4089  private:
+
4090  std::mutex m_Mutex;
+
4091  };
+
4092  #define VMA_MUTEX VmaMutex
+
4093 #endif
+
4094 
+
4095 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
+
4096 #ifndef VMA_RW_MUTEX
+
4097  #if VMA_USE_STL_SHARED_MUTEX
+
4098  // Use std::shared_mutex from C++17.
+
4099  #include <shared_mutex>
+
4100  class VmaRWMutex
+
4101  {
+
4102  public:
+
4103  void LockRead() { m_Mutex.lock_shared(); }
+
4104  void UnlockRead() { m_Mutex.unlock_shared(); }
+
4105  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
+
4106  void LockWrite() { m_Mutex.lock(); }
+
4107  void UnlockWrite() { m_Mutex.unlock(); }
+
4108  bool TryLockWrite() { return m_Mutex.try_lock(); }
+
4109  private:
+
4110  std::shared_mutex m_Mutex;
+
4111  };
+
4112  #define VMA_RW_MUTEX VmaRWMutex
+
4113  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
+
4114  // Use SRWLOCK from WinAPI.
+
4115  // Minimum supported client = Windows Vista, server = Windows Server 2008.
+
4116  class VmaRWMutex
+
4117  {
+
4118  public:
+
4119  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
+
4120  void LockRead() { AcquireSRWLockShared(&m_Lock); }
+
4121  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+
4122  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
+
4123  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
+
4124  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+
4125  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
+
4126  private:
+
4127  SRWLOCK m_Lock;
+
4128  };
+
4129  #define VMA_RW_MUTEX VmaRWMutex
+
4130  #else
+
4131  // Less efficient fallback: Use normal mutex.
+
4132  class VmaRWMutex
+
4133  {
+
4134  public:
+
4135  void LockRead() { m_Mutex.Lock(); }
+
4136  void UnlockRead() { m_Mutex.Unlock(); }
+
4137  bool TryLockRead() { return m_Mutex.TryLock(); }
+
4138  void LockWrite() { m_Mutex.Lock(); }
+
4139  void UnlockWrite() { m_Mutex.Unlock(); }
+
4140  bool TryLockWrite() { return m_Mutex.TryLock(); }
+
4141  private:
+
4142  VMA_MUTEX m_Mutex;
+
4143  };
+
4144  #define VMA_RW_MUTEX VmaRWMutex
+
4145  #endif // #if VMA_USE_STL_SHARED_MUTEX
+
4146 #endif // #ifndef VMA_RW_MUTEX
4147 
-
4151  #define VMA_DEBUG_ALIGNMENT (1)
-
4152 #endif
-
4153 
-
4154 #ifndef VMA_DEBUG_MARGIN
+
4148 /*
+
4149 If providing your own implementation, you need to implement a subset of std::atomic.
+
4150 */
+
4151 #ifndef VMA_ATOMIC_UINT32
+
4152  #include <atomic>
+
4153  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
+
4154 #endif
4155 
-
4159  #define VMA_DEBUG_MARGIN (0)
-
4160 #endif
-
4161 
-
4162 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
-
4163 
-
4167  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
-
4168 #endif
-
4169 
-
4170 #ifndef VMA_DEBUG_DETECT_CORRUPTION
-
4171 
-
4176  #define VMA_DEBUG_DETECT_CORRUPTION (0)
-
4177 #endif
+
4156 #ifndef VMA_ATOMIC_UINT64
+
4157  #include <atomic>
+
4158  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
+
4159 #endif
+
4160 
+
4161 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
+
4162 
+
4166  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
+
4167 #endif
+
4168 
+
4169 #ifndef VMA_DEBUG_ALIGNMENT
+
4170 
+
4174  #define VMA_DEBUG_ALIGNMENT (1)
+
4175 #endif
+
4176 
+
4177 #ifndef VMA_DEBUG_MARGIN
4178 
-
4179 #ifndef VMA_DEBUG_GLOBAL_MUTEX
-
4180 
-
4184  #define VMA_DEBUG_GLOBAL_MUTEX (0)
-
4185 #endif
+
4182  #define VMA_DEBUG_MARGIN (0)
+
4183 #endif
+
4184 
+
4185 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4186 
-
4187 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
-
4188 
-
4192  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
-
4193 #endif
+
4190  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
+
4191 #endif
+
4192 
+
4193 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4194 
-
4195 #ifndef VMA_SMALL_HEAP_MAX_SIZE
-
4196  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
-
4198 #endif
-
4199 
-
4200 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
-
4201  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
-
4203 #endif
-
4204 
-
4205 #ifndef VMA_CLASS_NO_COPY
-
4206  #define VMA_CLASS_NO_COPY(className) \
-
4207  private: \
-
4208  className(const className&) = delete; \
-
4209  className& operator=(const className&) = delete;
-
4210 #endif
+
4199  #define VMA_DEBUG_DETECT_CORRUPTION (0)
+
4200 #endif
+
4201 
+
4202 #ifndef VMA_DEBUG_GLOBAL_MUTEX
+
4203 
+
4207  #define VMA_DEBUG_GLOBAL_MUTEX (0)
+
4208 #endif
+
4209 
+
4210 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4211 
-
4212 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
-
4213 
-
4214 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
-
4215 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
-
4216 
-
4217 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
-
4218 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
-
4219 
-
4220 /*******************************************************************************
-
4221 END OF CONFIGURATION
-
4222 */
-
4223 
-
4224 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
-
4225 
-
4226 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
-
4227 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
-
4228 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
-
4229 
-
4230 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
-
4231 
-
4232 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
-
4233  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
+
4215  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
+
4216 #endif
+
4217 
+
4218 #ifndef VMA_SMALL_HEAP_MAX_SIZE
+
4219  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
+
4221 #endif
+
4222 
+
4223 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
+
4224  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
+
4226 #endif
+
4227 
+
4228 #ifndef VMA_CLASS_NO_COPY
+
4229  #define VMA_CLASS_NO_COPY(className) \
+
4230  private: \
+
4231  className(const className&) = delete; \
+
4232  className& operator=(const className&) = delete;
+
4233 #endif
4234 
-
4235 // Returns number of bits set to 1 in (v).
-
4236 static inline uint32_t VmaCountBitsSet(uint32_t v)
-
4237 {
-
4238  uint32_t c = v - ((v >> 1) & 0x55555555);
-
4239  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
-
4240  c = ((c >> 4) + c) & 0x0F0F0F0F;
-
4241  c = ((c >> 8) + c) & 0x00FF00FF;
-
4242  c = ((c >> 16) + c) & 0x0000FFFF;
-
4243  return c;
-
4244 }
-
4245 
-
4246 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
-
4247 // Use types like uint32_t, uint64_t as T.
-
4248 template <typename T>
-
4249 static inline T VmaAlignUp(T val, T align)
-
4250 {
-
4251  return (val + align - 1) / align * align;
-
4252 }
-
4253 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
-
4254 // Use types like uint32_t, uint64_t as T.
-
4255 template <typename T>
-
4256 static inline T VmaAlignDown(T val, T align)
-
4257 {
-
4258  return val / align * align;
-
4259 }
-
4260 
-
4261 // Division with mathematical rounding to nearest number.
-
4262 template <typename T>
-
4263 static inline T VmaRoundDiv(T x, T y)
-
4264 {
-
4265  return (x + (y / (T)2)) / y;
-
4266 }
-
4267 
-
4268 /*
-
4269 Returns true if given number is a power of two.
-
4270 T must be unsigned integer number or signed integer but always nonnegative.
-
4271 For 0 returns true.
-
4272 */
-
4273 template <typename T>
-
4274 inline bool VmaIsPow2(T x)
-
4275 {
-
4276  return (x & (x-1)) == 0;
-
4277 }
-
4278 
-
4279 // Returns smallest power of 2 greater or equal to v.
-
4280 static inline uint32_t VmaNextPow2(uint32_t v)
-
4281 {
-
4282  v--;
-
4283  v |= v >> 1;
-
4284  v |= v >> 2;
-
4285  v |= v >> 4;
-
4286  v |= v >> 8;
-
4287  v |= v >> 16;
-
4288  v++;
-
4289  return v;
-
4290 }
-
4291 static inline uint64_t VmaNextPow2(uint64_t v)
+
4235 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
+
4236 
+
4237 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
+
4238 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+
4239 
+
4240 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+
4241 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+
4242 
+
4243 /*******************************************************************************
+
4244 END OF CONFIGURATION
+
4245 */
+
4246 
+
4247 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
+
4248 
+
4249 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+
4250 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+
4251 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+
4252 
+
4253 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
+
4254 
+
4255 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
+
4256  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
+
4257 
+
4258 // Returns number of bits set to 1 in (v).
+
4259 static inline uint32_t VmaCountBitsSet(uint32_t v)
+
4260 {
+
4261  uint32_t c = v - ((v >> 1) & 0x55555555);
+
4262  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+
4263  c = ((c >> 4) + c) & 0x0F0F0F0F;
+
4264  c = ((c >> 8) + c) & 0x00FF00FF;
+
4265  c = ((c >> 16) + c) & 0x0000FFFF;
+
4266  return c;
+
4267 }
+
4268 
+
4269 /*
+
4270 Returns true if given number is a power of two.
+
4271 T must be unsigned integer number or signed integer but always nonnegative.
+
4272 For 0 returns true.
+
4273 */
+
4274 template <typename T>
+
4275 inline bool VmaIsPow2(T x)
+
4276 {
+
4277  return (x & (x-1)) == 0;
+
4278 }
+
4279 
+
4280 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
+
4281 // Use types like uint32_t, uint64_t as T.
+
4282 template <typename T>
+
4283 static inline T VmaAlignUp(T val, T alignment)
+
4284 {
+
4285  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+
4286  return (val + alignment - 1) & ~(alignment - 1);
+
4287 }
+
4288 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
+
4289 // Use types like uint32_t, uint64_t as T.
+
4290 template <typename T>
+
4291 static inline T VmaAlignDown(T val, T alignment)
4292 {
-
4293  v--;
-
4294  v |= v >> 1;
-
4295  v |= v >> 2;
-
4296  v |= v >> 4;
-
4297  v |= v >> 8;
-
4298  v |= v >> 16;
-
4299  v |= v >> 32;
-
4300  v++;
-
4301  return v;
+
4293  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+
4294  return val & ~(alignment - 1);
+
4295 }
+
4296 
+
4297 // Division with mathematical rounding to nearest number.
+
4298 template <typename T>
+
4299 static inline T VmaRoundDiv(T x, T y)
+
4300 {
+
4301  return (x + (y / (T)2)) / y;
4302 }
4303 
-
4304 // Returns largest power of 2 less or equal to v.
-
4305 static inline uint32_t VmaPrevPow2(uint32_t v)
+
4304 // Returns smallest power of 2 greater or equal to v.
+
4305 static inline uint32_t VmaNextPow2(uint32_t v)
4306 {
-
4307  v |= v >> 1;
-
4308  v |= v >> 2;
-
4309  v |= v >> 4;
-
4310  v |= v >> 8;
-
4311  v |= v >> 16;
-
4312  v = v ^ (v >> 1);
-
4313  return v;
-
4314 }
-
4315 static inline uint64_t VmaPrevPow2(uint64_t v)
-
4316 {
-
4317  v |= v >> 1;
-
4318  v |= v >> 2;
-
4319  v |= v >> 4;
-
4320  v |= v >> 8;
-
4321  v |= v >> 16;
-
4322  v |= v >> 32;
-
4323  v = v ^ (v >> 1);
-
4324  return v;
-
4325 }
-
4326 
-
4327 static inline bool VmaStrIsEmpty(const char* pStr)
-
4328 {
-
4329  return pStr == VMA_NULL || *pStr == '\0';
-
4330 }
-
4331 
-
4332 #if VMA_STATS_STRING_ENABLED
-
4333 
-
4334 static const char* VmaAlgorithmToStr(uint32_t algorithm)
-
4335 {
-
4336  switch(algorithm)
-
4337  {
- -
4339  return "Linear";
- -
4341  return "Buddy";
-
4342  case 0:
-
4343  return "Default";
-
4344  default:
-
4345  VMA_ASSERT(0);
-
4346  return "";
-
4347  }
-
4348 }
-
4349 
-
4350 #endif // #if VMA_STATS_STRING_ENABLED
+
4307  v--;
+
4308  v |= v >> 1;
+
4309  v |= v >> 2;
+
4310  v |= v >> 4;
+
4311  v |= v >> 8;
+
4312  v |= v >> 16;
+
4313  v++;
+
4314  return v;
+
4315 }
+
4316 static inline uint64_t VmaNextPow2(uint64_t v)
+
4317 {
+
4318  v--;
+
4319  v |= v >> 1;
+
4320  v |= v >> 2;
+
4321  v |= v >> 4;
+
4322  v |= v >> 8;
+
4323  v |= v >> 16;
+
4324  v |= v >> 32;
+
4325  v++;
+
4326  return v;
+
4327 }
+
4328 
+
4329 // Returns largest power of 2 less or equal to v.
+
4330 static inline uint32_t VmaPrevPow2(uint32_t v)
+
4331 {
+
4332  v |= v >> 1;
+
4333  v |= v >> 2;
+
4334  v |= v >> 4;
+
4335  v |= v >> 8;
+
4336  v |= v >> 16;
+
4337  v = v ^ (v >> 1);
+
4338  return v;
+
4339 }
+
4340 static inline uint64_t VmaPrevPow2(uint64_t v)
+
4341 {
+
4342  v |= v >> 1;
+
4343  v |= v >> 2;
+
4344  v |= v >> 4;
+
4345  v |= v >> 8;
+
4346  v |= v >> 16;
+
4347  v |= v >> 32;
+
4348  v = v ^ (v >> 1);
+
4349  return v;
+
4350 }
4351 
-
4352 #ifndef VMA_SORT
-
4353 
-
4354 template<typename Iterator, typename Compare>
-
4355 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
-
4356 {
-
4357  Iterator centerValue = end; --centerValue;
-
4358  Iterator insertIndex = beg;
-
4359  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
-
4360  {
-
4361  if(cmp(*memTypeIndex, *centerValue))
-
4362  {
-
4363  if(insertIndex != memTypeIndex)
-
4364  {
-
4365  VMA_SWAP(*memTypeIndex, *insertIndex);
-
4366  }
-
4367  ++insertIndex;
-
4368  }
-
4369  }
-
4370  if(insertIndex != centerValue)
-
4371  {
-
4372  VMA_SWAP(*insertIndex, *centerValue);
-
4373  }
-
4374  return insertIndex;
-
4375 }
+
4352 static inline bool VmaStrIsEmpty(const char* pStr)
+
4353 {
+
4354  return pStr == VMA_NULL || *pStr == '\0';
+
4355 }
+
4356 
+
4357 #if VMA_STATS_STRING_ENABLED
+
4358 
+
4359 static const char* VmaAlgorithmToStr(uint32_t algorithm)
+
4360 {
+
4361  switch(algorithm)
+
4362  {
+ +
4364  return "Linear";
+ +
4366  return "Buddy";
+
4367  case 0:
+
4368  return "Default";
+
4369  default:
+
4370  VMA_ASSERT(0);
+
4371  return "";
+
4372  }
+
4373 }
+
4374 
+
4375 #endif // #if VMA_STATS_STRING_ENABLED
4376 
-
4377 template<typename Iterator, typename Compare>
-
4378 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
-
4379 {
-
4380  if(beg < end)
-
4381  {
-
4382  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
-
4383  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
-
4384  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
-
4385  }
-
4386 }
-
4387 
-
4388 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
-
4389 
-
4390 #endif // #ifndef VMA_SORT
-
4391 
-
4392 /*
-
4393 Returns true if two memory blocks occupy overlapping pages.
-
4394 ResourceA must be in less memory offset than ResourceB.
-
4395 
-
4396 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
-
4397 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
-
4398 */
-
4399 static inline bool VmaBlocksOnSamePage(
-
4400  VkDeviceSize resourceAOffset,
-
4401  VkDeviceSize resourceASize,
-
4402  VkDeviceSize resourceBOffset,
-
4403  VkDeviceSize pageSize)
+
4377 #ifndef VMA_SORT
+
4378 
+
4379 template<typename Iterator, typename Compare>
+
4380 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
+
4381 {
+
4382  Iterator centerValue = end; --centerValue;
+
4383  Iterator insertIndex = beg;
+
4384  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
+
4385  {
+
4386  if(cmp(*memTypeIndex, *centerValue))
+
4387  {
+
4388  if(insertIndex != memTypeIndex)
+
4389  {
+
4390  VMA_SWAP(*memTypeIndex, *insertIndex);
+
4391  }
+
4392  ++insertIndex;
+
4393  }
+
4394  }
+
4395  if(insertIndex != centerValue)
+
4396  {
+
4397  VMA_SWAP(*insertIndex, *centerValue);
+
4398  }
+
4399  return insertIndex;
+
4400 }
+
4401 
+
4402 template<typename Iterator, typename Compare>
+
4403 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4404 {
-
4405  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
-
4406  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
-
4407  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
-
4408  VkDeviceSize resourceBStart = resourceBOffset;
-
4409  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
-
4410  return resourceAEndPage == resourceBStartPage;
+
4405  if(beg < end)
+
4406  {
+
4407  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
+
4408  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
+
4409  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
+
4410  }
4411 }
4412 
-
4413 enum VmaSuballocationType
-
4414 {
-
4415  VMA_SUBALLOCATION_TYPE_FREE = 0,
-
4416  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
-
4417  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
-
4418  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
-
4419  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
-
4420  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
-
4421  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
-
4422 };
-
4423 
-
4424 /*
-
4425 Returns true if given suballocation types could conflict and must respect
-
4426 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
-
4427 or linear image and another one is optimal image. If type is unknown, behave
-
4428 conservatively.
-
4429 */
-
4430 static inline bool VmaIsBufferImageGranularityConflict(
-
4431  VmaSuballocationType suballocType1,
-
4432  VmaSuballocationType suballocType2)
-
4433 {
-
4434  if(suballocType1 > suballocType2)
-
4435  {
-
4436  VMA_SWAP(suballocType1, suballocType2);
-
4437  }
-
4438 
-
4439  switch(suballocType1)
-
4440  {
-
4441  case VMA_SUBALLOCATION_TYPE_FREE:
-
4442  return false;
-
4443  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
-
4444  return true;
-
4445  case VMA_SUBALLOCATION_TYPE_BUFFER:
-
4446  return
-
4447  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-
4448  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-
4449  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
-
4450  return
-
4451  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-
4452  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
-
4453  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-
4454  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
-
4455  return
-
4456  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-
4457  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
-
4458  return false;
-
4459  default:
-
4460  VMA_ASSERT(0);
-
4461  return true;
+
4413 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
+
4414 
+
4415 #endif // #ifndef VMA_SORT
+
4416 
+
4417 /*
+
4418 Returns true if two memory blocks occupy overlapping pages.
+
4419 ResourceA must be in less memory offset than ResourceB.
+
4420 
+
4421 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
+
4422 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
+
4423 */
+
4424 static inline bool VmaBlocksOnSamePage(
+
4425  VkDeviceSize resourceAOffset,
+
4426  VkDeviceSize resourceASize,
+
4427  VkDeviceSize resourceBOffset,
+
4428  VkDeviceSize pageSize)
+
4429 {
+
4430  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
+
4431  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
+
4432  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
+
4433  VkDeviceSize resourceBStart = resourceBOffset;
+
4434  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
+
4435  return resourceAEndPage == resourceBStartPage;
+
4436 }
+
4437 
+
4438 enum VmaSuballocationType
+
4439 {
+
4440  VMA_SUBALLOCATION_TYPE_FREE = 0,
+
4441  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
+
4442  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
+
4443  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
+
4444  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
+
4445  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
+
4446  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+
4447 };
+
4448 
+
4449 /*
+
4450 Returns true if given suballocation types could conflict and must respect
+
4451 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
+
4452 or linear image and another one is optimal image. If type is unknown, behave
+
4453 conservatively.
+
4454 */
+
4455 static inline bool VmaIsBufferImageGranularityConflict(
+
4456  VmaSuballocationType suballocType1,
+
4457  VmaSuballocationType suballocType2)
+
4458 {
+
4459  if(suballocType1 > suballocType2)
+
4460  {
+
4461  VMA_SWAP(suballocType1, suballocType2);
4462  }
-
4463 }
-
4464 
-
4465 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
-
4466 {
-
4467 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-
4468  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
-
4469  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-
4470  for(size_t i = 0; i < numberCount; ++i, ++pDst)
-
4471  {
-
4472  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
-
4473  }
-
4474 #else
-
4475  // no-op
-
4476 #endif
-
4477 }
-
4478 
-
4479 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
-
4480 {
-
4481 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-
4482  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
-
4483  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-
4484  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
-
4485  {
-
4486  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
-
4487  {
-
4488  return false;
-
4489  }
-
4490  }
-
4491 #endif
-
4492  return true;
-
4493 }
-
4494 
-
4495 /*
-
4496 Fills structure with parameters of an example buffer to be used for transfers
-
4497 during GPU memory defragmentation.
-
4498 */
-
4499 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
-
4500 {
-
4501  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
-
4502  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
-
4503  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-
4504  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
-
4505 }
-
4506 
-
4507 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
-
4508 struct VmaMutexLock
-
4509 {
-
4510  VMA_CLASS_NO_COPY(VmaMutexLock)
-
4511 public:
-
4512  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
-
4513  m_pMutex(useMutex ? &mutex : VMA_NULL)
-
4514  { if(m_pMutex) { m_pMutex->Lock(); } }
-
4515  ~VmaMutexLock()
-
4516  { if(m_pMutex) { m_pMutex->Unlock(); } }
-
4517 private:
-
4518  VMA_MUTEX* m_pMutex;
-
4519 };
-
4520 
-
4521 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
-
4522 struct VmaMutexLockRead
-
4523 {
-
4524  VMA_CLASS_NO_COPY(VmaMutexLockRead)
-
4525 public:
-
4526  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
-
4527  m_pMutex(useMutex ? &mutex : VMA_NULL)
-
4528  { if(m_pMutex) { m_pMutex->LockRead(); } }
-
4529  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
-
4530 private:
-
4531  VMA_RW_MUTEX* m_pMutex;
-
4532 };
-
4533 
-
4534 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
-
4535 struct VmaMutexLockWrite
-
4536 {
-
4537  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
-
4538 public:
-
4539  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
-
4540  m_pMutex(useMutex ? &mutex : VMA_NULL)
-
4541  { if(m_pMutex) { m_pMutex->LockWrite(); } }
-
4542  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
-
4543 private:
-
4544  VMA_RW_MUTEX* m_pMutex;
-
4545 };
-
4546 
-
4547 #if VMA_DEBUG_GLOBAL_MUTEX
-
4548  static VMA_MUTEX gDebugGlobalMutex;
-
4549  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
-
4550 #else
-
4551  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
4552 #endif
-
4553 
-
4554 // Minimum size of a free suballocation to register it in the free suballocation collection.
-
4555 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
-
4556 
-
4557 /*
-
4558 Performs binary search and returns iterator to first element that is greater or
-
4559 equal to (key), according to comparison (cmp).
-
4560 
-
4561 Cmp should return true if first argument is less than second argument.
-
4562 
-
4563 Returned value is the found element, if present in the collection or place where
-
4564 new element with value (key) should be inserted.
-
4565 */
-
4566 template <typename CmpLess, typename IterT, typename KeyT>
-
4567 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
-
4568 {
-
4569  size_t down = 0, up = (end - beg);
-
4570  while(down < up)
-
4571  {
-
4572  const size_t mid = (down + up) / 2;
-
4573  if(cmp(*(beg+mid), key))
-
4574  {
-
4575  down = mid + 1;
-
4576  }
-
4577  else
-
4578  {
-
4579  up = mid;
-
4580  }
-
4581  }
-
4582  return beg + down;
-
4583 }
-
4584 
-
4585 template<typename CmpLess, typename IterT, typename KeyT>
-
4586 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
-
4587 {
-
4588  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
-
4589  beg, end, value, cmp);
-
4590  if(it == end ||
-
4591  (!cmp(*it, value) && !cmp(value, *it)))
-
4592  {
-
4593  return it;
-
4594  }
-
4595  return end;
-
4596 }
-
4597 
-
4598 /*
-
4599 Returns true if all pointers in the array are not-null and unique.
-
4600 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
-
4601 T must be pointer type, e.g. VmaAllocation, VmaPool.
-
4602 */
-
4603 template<typename T>
-
4604 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
-
4605 {
-
4606  for(uint32_t i = 0; i < count; ++i)
-
4607  {
-
4608  const T iPtr = arr[i];
-
4609  if(iPtr == VMA_NULL)
-
4610  {
-
4611  return false;
-
4612  }
-
4613  for(uint32_t j = i + 1; j < count; ++j)
-
4614  {
-
4615  if(iPtr == arr[j])
-
4616  {
-
4617  return false;
-
4618  }
-
4619  }
-
4620  }
-
4621  return true;
-
4622 }
-
4623 
-
4624 template<typename MainT, typename NewT>
-
4625 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
-
4626 {
-
4627  newStruct->pNext = mainStruct->pNext;
-
4628  mainStruct->pNext = newStruct;
-
4629 }
-
4630 
-
4632 // Memory allocation
-
4633 
-
4634 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
-
4635 {
-
4636  void* result = VMA_NULL;
-
4637  if((pAllocationCallbacks != VMA_NULL) &&
-
4638  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
-
4639  {
-
4640  result = (*pAllocationCallbacks->pfnAllocation)(
-
4641  pAllocationCallbacks->pUserData,
-
4642  size,
-
4643  alignment,
-
4644  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
4463 
+
4464  switch(suballocType1)
+
4465  {
+
4466  case VMA_SUBALLOCATION_TYPE_FREE:
+
4467  return false;
+
4468  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
+
4469  return true;
+
4470  case VMA_SUBALLOCATION_TYPE_BUFFER:
+
4471  return
+
4472  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+
4473  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+
4474  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
+
4475  return
+
4476  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+
4477  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
+
4478  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+
4479  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
+
4480  return
+
4481  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+
4482  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
+
4483  return false;
+
4484  default:
+
4485  VMA_ASSERT(0);
+
4486  return true;
+
4487  }
+
4488 }
+
4489 
+
4490 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
+
4491 {
+
4492 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+
4493  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
+
4494  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+
4495  for(size_t i = 0; i < numberCount; ++i, ++pDst)
+
4496  {
+
4497  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
+
4498  }
+
4499 #else
+
4500  // no-op
+
4501 #endif
+
4502 }
+
4503 
+
4504 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
+
4505 {
+
4506 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+
4507  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
+
4508  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+
4509  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
+
4510  {
+
4511  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
+
4512  {
+
4513  return false;
+
4514  }
+
4515  }
+
4516 #endif
+
4517  return true;
+
4518 }
+
4519 
+
4520 /*
+
4521 Fills structure with parameters of an example buffer to be used for transfers
+
4522 during GPU memory defragmentation.
+
4523 */
+
4524 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+
4525 {
+
4526  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
+
4527  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+
4528  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
4529  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
+
4530 }
+
4531 
+
4532 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
+
4533 struct VmaMutexLock
+
4534 {
+
4535  VMA_CLASS_NO_COPY(VmaMutexLock)
+
4536 public:
+
4537  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
+
4538  m_pMutex(useMutex ? &mutex : VMA_NULL)
+
4539  { if(m_pMutex) { m_pMutex->Lock(); } }
+
4540  ~VmaMutexLock()
+
4541  { if(m_pMutex) { m_pMutex->Unlock(); } }
+
4542 private:
+
4543  VMA_MUTEX* m_pMutex;
+
4544 };
+
4545 
+
4546 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
+
4547 struct VmaMutexLockRead
+
4548 {
+
4549  VMA_CLASS_NO_COPY(VmaMutexLockRead)
+
4550 public:
+
4551  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
+
4552  m_pMutex(useMutex ? &mutex : VMA_NULL)
+
4553  { if(m_pMutex) { m_pMutex->LockRead(); } }
+
4554  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
+
4555 private:
+
4556  VMA_RW_MUTEX* m_pMutex;
+
4557 };
+
4558 
+
4559 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
+
4560 struct VmaMutexLockWrite
+
4561 {
+
4562  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
+
4563 public:
+
4564  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
+
4565  m_pMutex(useMutex ? &mutex : VMA_NULL)
+
4566  { if(m_pMutex) { m_pMutex->LockWrite(); } }
+
4567  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
+
4568 private:
+
4569  VMA_RW_MUTEX* m_pMutex;
+
4570 };
+
4571 
+
4572 #if VMA_DEBUG_GLOBAL_MUTEX
+
4573  static VMA_MUTEX gDebugGlobalMutex;
+
4574  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
+
4575 #else
+
4576  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
4577 #endif
+
4578 
+
4579 // Minimum size of a free suballocation to register it in the free suballocation collection.
+
4580 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
+
4581 
+
4582 /*
+
4583 Performs binary search and returns iterator to first element that is greater or
+
4584 equal to (key), according to comparison (cmp).
+
4585 
+
4586 Cmp should return true if first argument is less than second argument.
+
4587 
+
4588 Returned value is the found element, if present in the collection or place where
+
4589 new element with value (key) should be inserted.
+
4590 */
+
4591 template <typename CmpLess, typename IterT, typename KeyT>
+
4592 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
+
4593 {
+
4594  size_t down = 0, up = (end - beg);
+
4595  while(down < up)
+
4596  {
+
4597  const size_t mid = (down + up) / 2;
+
4598  if(cmp(*(beg+mid), key))
+
4599  {
+
4600  down = mid + 1;
+
4601  }
+
4602  else
+
4603  {
+
4604  up = mid;
+
4605  }
+
4606  }
+
4607  return beg + down;
+
4608 }
+
4609 
+
4610 template<typename CmpLess, typename IterT, typename KeyT>
+
4611 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
+
4612 {
+
4613  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
+
4614  beg, end, value, cmp);
+
4615  if(it == end ||
+
4616  (!cmp(*it, value) && !cmp(value, *it)))
+
4617  {
+
4618  return it;
+
4619  }
+
4620  return end;
+
4621 }
+
4622 
+
4623 /*
+
4624 Returns true if all pointers in the array are not-null and unique.
+
4625 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
+
4626 T must be pointer type, e.g. VmaAllocation, VmaPool.
+
4627 */
+
4628 template<typename T>
+
4629 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
+
4630 {
+
4631  for(uint32_t i = 0; i < count; ++i)
+
4632  {
+
4633  const T iPtr = arr[i];
+
4634  if(iPtr == VMA_NULL)
+
4635  {
+
4636  return false;
+
4637  }
+
4638  for(uint32_t j = i + 1; j < count; ++j)
+
4639  {
+
4640  if(iPtr == arr[j])
+
4641  {
+
4642  return false;
+
4643  }
+
4644  }
4645  }
-
4646  else
-
4647  {
-
4648  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
-
4649  }
-
4650  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
-
4651  return result;
-
4652 }
-
4653 
-
4654 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
-
4655 {
-
4656  if((pAllocationCallbacks != VMA_NULL) &&
-
4657  (pAllocationCallbacks->pfnFree != VMA_NULL))
-
4658  {
-
4659  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
-
4660  }
-
4661  else
-
4662  {
-
4663  VMA_SYSTEM_FREE(ptr);
-
4664  }
-
4665 }
-
4666 
-
4667 template<typename T>
-
4668 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
-
4669 {
-
4670  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
-
4671 }
-
4672 
-
4673 template<typename T>
-
4674 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
-
4675 {
-
4676  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
+
4646  return true;
+
4647 }
+
4648 
+
4649 template<typename MainT, typename NewT>
+
4650 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+
4651 {
+
4652  newStruct->pNext = mainStruct->pNext;
+
4653  mainStruct->pNext = newStruct;
+
4654 }
+
4655 
+
4657 // Memory allocation
+
4658 
+
4659 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
+
4660 {
+
4661  void* result = VMA_NULL;
+
4662  if((pAllocationCallbacks != VMA_NULL) &&
+
4663  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
+
4664  {
+
4665  result = (*pAllocationCallbacks->pfnAllocation)(
+
4666  pAllocationCallbacks->pUserData,
+
4667  size,
+
4668  alignment,
+
4669  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
4670  }
+
4671  else
+
4672  {
+
4673  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
+
4674  }
+
4675  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
+
4676  return result;
4677 }
4678 
-
4679 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
-
4680 
-
4681 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
-
4682 
-
4683 template<typename T>
-
4684 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
-
4685 {
-
4686  ptr->~T();
-
4687  VmaFree(pAllocationCallbacks, ptr);
-
4688 }
-
4689 
-
4690 template<typename T>
-
4691 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
-
4692 {
-
4693  if(ptr != VMA_NULL)
-
4694  {
-
4695  for(size_t i = count; i--; )
-
4696  {
-
4697  ptr[i].~T();
-
4698  }
-
4699  VmaFree(pAllocationCallbacks, ptr);
-
4700  }
-
4701 }
-
4702 
-
4703 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
-
4704 {
-
4705  if(srcStr != VMA_NULL)
-
4706  {
-
4707  const size_t len = strlen(srcStr);
-
4708  char* const result = vma_new_array(allocs, char, len + 1);
-
4709  memcpy(result, srcStr, len + 1);
-
4710  return result;
-
4711  }
-
4712  else
-
4713  {
-
4714  return VMA_NULL;
-
4715  }
-
4716 }
-
4717 
-
4718 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
-
4719 {
-
4720  if(str != VMA_NULL)
-
4721  {
-
4722  const size_t len = strlen(str);
-
4723  vma_delete_array(allocs, str, len + 1);
-
4724  }
-
4725 }
-
4726 
-
4727 // STL-compatible allocator.
-
4728 template<typename T>
-
4729 class VmaStlAllocator
-
4730 {
-
4731 public:
-
4732  const VkAllocationCallbacks* const m_pCallbacks;
-
4733  typedef T value_type;
-
4734 
-
4735  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
-
4736  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
-
4737 
-
4738  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
-
4739  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
-
4740 
-
4741  template<typename U>
-
4742  bool operator==(const VmaStlAllocator<U>& rhs) const
-
4743  {
-
4744  return m_pCallbacks == rhs.m_pCallbacks;
-
4745  }
-
4746  template<typename U>
-
4747  bool operator!=(const VmaStlAllocator<U>& rhs) const
-
4748  {
-
4749  return m_pCallbacks != rhs.m_pCallbacks;
-
4750  }
+
4679 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
+
4680 {
+
4681  if((pAllocationCallbacks != VMA_NULL) &&
+
4682  (pAllocationCallbacks->pfnFree != VMA_NULL))
+
4683  {
+
4684  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+
4685  }
+
4686  else
+
4687  {
+
4688  VMA_SYSTEM_FREE(ptr);
+
4689  }
+
4690 }
+
4691 
+
4692 template<typename T>
+
4693 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
+
4694 {
+
4695  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
+
4696 }
+
4697 
+
4698 template<typename T>
+
4699 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
+
4700 {
+
4701  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
+
4702 }
+
4703 
+
4704 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
+
4705 
+
4706 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
+
4707 
+
4708 template<typename T>
+
4709 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+
4710 {
+
4711  ptr->~T();
+
4712  VmaFree(pAllocationCallbacks, ptr);
+
4713 }
+
4714 
+
4715 template<typename T>
+
4716 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
+
4717 {
+
4718  if(ptr != VMA_NULL)
+
4719  {
+
4720  for(size_t i = count; i--; )
+
4721  {
+
4722  ptr[i].~T();
+
4723  }
+
4724  VmaFree(pAllocationCallbacks, ptr);
+
4725  }
+
4726 }
+
4727 
+
4728 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
+
4729 {
+
4730  if(srcStr != VMA_NULL)
+
4731  {
+
4732  const size_t len = strlen(srcStr);
+
4733  char* const result = vma_new_array(allocs, char, len + 1);
+
4734  memcpy(result, srcStr, len + 1);
+
4735  return result;
+
4736  }
+
4737  else
+
4738  {
+
4739  return VMA_NULL;
+
4740  }
+
4741 }
+
4742 
+
4743 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
+
4744 {
+
4745  if(str != VMA_NULL)
+
4746  {
+
4747  const size_t len = strlen(str);
+
4748  vma_delete_array(allocs, str, len + 1);
+
4749  }
+
4750 }
4751 
-
4752  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
-
4753 };
-
4754 
-
4755 #if VMA_USE_STL_VECTOR
-
4756 
-
4757 #define VmaVector std::vector
-
4758 
-
4759 template<typename T, typename allocatorT>
-
4760 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
-
4761 {
-
4762  vec.insert(vec.begin() + index, item);
-
4763 }
-
4764 
-
4765 template<typename T, typename allocatorT>
-
4766 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
-
4767 {
-
4768  vec.erase(vec.begin() + index);
-
4769 }
-
4770 
-
4771 #else // #if VMA_USE_STL_VECTOR
-
4772 
-
4773 /* Class with interface compatible with subset of std::vector.
-
4774 T must be POD because constructors and destructors are not called and memcpy is
-
4775 used for these objects. */
-
4776 template<typename T, typename AllocatorT>
-
4777 class VmaVector
-
4778 {
-
4779 public:
-
4780  typedef T value_type;
+
4752 // STL-compatible allocator.
+
4753 template<typename T>
+
4754 class VmaStlAllocator
+
4755 {
+
4756 public:
+
4757  const VkAllocationCallbacks* const m_pCallbacks;
+
4758  typedef T value_type;
+
4759 
+
4760  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
+
4761  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
+
4762 
+
4763  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
+
4764  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
+
4765 
+
4766  template<typename U>
+
4767  bool operator==(const VmaStlAllocator<U>& rhs) const
+
4768  {
+
4769  return m_pCallbacks == rhs.m_pCallbacks;
+
4770  }
+
4771  template<typename U>
+
4772  bool operator!=(const VmaStlAllocator<U>& rhs) const
+
4773  {
+
4774  return m_pCallbacks != rhs.m_pCallbacks;
+
4775  }
+
4776 
+
4777  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
+
4778 };
+
4779 
+
4780 #if VMA_USE_STL_VECTOR
4781 
-
4782  VmaVector(const AllocatorT& allocator) :
-
4783  m_Allocator(allocator),
-
4784  m_pArray(VMA_NULL),
-
4785  m_Count(0),
-
4786  m_Capacity(0)
-
4787  {
-
4788  }
+
4782 #define VmaVector std::vector
+
4783 
+
4784 template<typename T, typename allocatorT>
+
4785 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
+
4786 {
+
4787  vec.insert(vec.begin() + index, item);
+
4788 }
4789 
-
4790  VmaVector(size_t count, const AllocatorT& allocator) :
-
4791  m_Allocator(allocator),
-
4792  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
-
4793  m_Count(count),
-
4794  m_Capacity(count)
-
4795  {
-
4796  }
-
4797 
-
4798  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
-
4799  // value is unused.
-
4800  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
-
4801  : VmaVector(count, allocator) {}
-
4802 
-
4803  VmaVector(const VmaVector<T, AllocatorT>& src) :
-
4804  m_Allocator(src.m_Allocator),
-
4805  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
-
4806  m_Count(src.m_Count),
-
4807  m_Capacity(src.m_Count)
-
4808  {
-
4809  if(m_Count != 0)
-
4810  {
-
4811  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
-
4812  }
+
4790 template<typename T, typename allocatorT>
+
4791 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
+
4792 {
+
4793  vec.erase(vec.begin() + index);
+
4794 }
+
4795 
+
4796 #else // #if VMA_USE_STL_VECTOR
+
4797 
+
4798 /* Class with interface compatible with subset of std::vector.
+
4799 T must be POD because constructors and destructors are not called and memcpy is
+
4800 used for these objects. */
+
4801 template<typename T, typename AllocatorT>
+
4802 class VmaVector
+
4803 {
+
4804 public:
+
4805  typedef T value_type;
+
4806 
+
4807  VmaVector(const AllocatorT& allocator) :
+
4808  m_Allocator(allocator),
+
4809  m_pArray(VMA_NULL),
+
4810  m_Count(0),
+
4811  m_Capacity(0)
+
4812  {
4813  }
-
4814 
-
4815  ~VmaVector()
-
4816  {
-
4817  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-
4818  }
-
4819 
-
4820  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
-
4821  {
-
4822  if(&rhs != this)
-
4823  {
-
4824  resize(rhs.m_Count);
-
4825  if(m_Count != 0)
-
4826  {
-
4827  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
-
4828  }
-
4829  }
-
4830  return *this;
-
4831  }
-
4832 
-
4833  bool empty() const { return m_Count == 0; }
-
4834  size_t size() const { return m_Count; }
-
4835  T* data() { return m_pArray; }
-
4836  const T* data() const { return m_pArray; }
-
4837 
-
4838  T& operator[](size_t index)
-
4839  {
-
4840  VMA_HEAVY_ASSERT(index < m_Count);
-
4841  return m_pArray[index];
-
4842  }
-
4843  const T& operator[](size_t index) const
-
4844  {
-
4845  VMA_HEAVY_ASSERT(index < m_Count);
-
4846  return m_pArray[index];
-
4847  }
-
4848 
-
4849  T& front()
-
4850  {
-
4851  VMA_HEAVY_ASSERT(m_Count > 0);
-
4852  return m_pArray[0];
-
4853  }
-
4854  const T& front() const
-
4855  {
-
4856  VMA_HEAVY_ASSERT(m_Count > 0);
-
4857  return m_pArray[0];
-
4858  }
-
4859  T& back()
-
4860  {
-
4861  VMA_HEAVY_ASSERT(m_Count > 0);
-
4862  return m_pArray[m_Count - 1];
-
4863  }
-
4864  const T& back() const
-
4865  {
-
4866  VMA_HEAVY_ASSERT(m_Count > 0);
-
4867  return m_pArray[m_Count - 1];
-
4868  }
-
4869 
-
4870  void reserve(size_t newCapacity, bool freeMemory = false)
-
4871  {
-
4872  newCapacity = VMA_MAX(newCapacity, m_Count);
-
4873 
-
4874  if((newCapacity < m_Capacity) && !freeMemory)
-
4875  {
-
4876  newCapacity = m_Capacity;
-
4877  }
-
4878 
-
4879  if(newCapacity != m_Capacity)
-
4880  {
-
4881  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
-
4882  if(m_Count != 0)
-
4883  {
-
4884  memcpy(newArray, m_pArray, m_Count * sizeof(T));
-
4885  }
-
4886  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-
4887  m_Capacity = newCapacity;
-
4888  m_pArray = newArray;
-
4889  }
-
4890  }
-
4891 
-
4892  void resize(size_t newCount, bool freeMemory = false)
-
4893  {
-
4894  size_t newCapacity = m_Capacity;
-
4895  if(newCount > m_Capacity)
-
4896  {
-
4897  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
-
4898  }
-
4899  else if(freeMemory)
+
4814 
+
4815  VmaVector(size_t count, const AllocatorT& allocator) :
+
4816  m_Allocator(allocator),
+
4817  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
+
4818  m_Count(count),
+
4819  m_Capacity(count)
+
4820  {
+
4821  }
+
4822 
+
4823  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
+
4824  // value is unused.
+
4825  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
+
4826  : VmaVector(count, allocator) {}
+
4827 
+
4828  VmaVector(const VmaVector<T, AllocatorT>& src) :
+
4829  m_Allocator(src.m_Allocator),
+
4830  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
+
4831  m_Count(src.m_Count),
+
4832  m_Capacity(src.m_Count)
+
4833  {
+
4834  if(m_Count != 0)
+
4835  {
+
4836  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
+
4837  }
+
4838  }
+
4839 
+
4840  ~VmaVector()
+
4841  {
+
4842  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+
4843  }
+
4844 
+
4845  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
+
4846  {
+
4847  if(&rhs != this)
+
4848  {
+
4849  resize(rhs.m_Count);
+
4850  if(m_Count != 0)
+
4851  {
+
4852  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
+
4853  }
+
4854  }
+
4855  return *this;
+
4856  }
+
4857 
+
4858  bool empty() const { return m_Count == 0; }
+
4859  size_t size() const { return m_Count; }
+
4860  T* data() { return m_pArray; }
+
4861  const T* data() const { return m_pArray; }
+
4862 
+
4863  T& operator[](size_t index)
+
4864  {
+
4865  VMA_HEAVY_ASSERT(index < m_Count);
+
4866  return m_pArray[index];
+
4867  }
+
4868  const T& operator[](size_t index) const
+
4869  {
+
4870  VMA_HEAVY_ASSERT(index < m_Count);
+
4871  return m_pArray[index];
+
4872  }
+
4873 
+
4874  T& front()
+
4875  {
+
4876  VMA_HEAVY_ASSERT(m_Count > 0);
+
4877  return m_pArray[0];
+
4878  }
+
4879  const T& front() const
+
4880  {
+
4881  VMA_HEAVY_ASSERT(m_Count > 0);
+
4882  return m_pArray[0];
+
4883  }
+
4884  T& back()
+
4885  {
+
4886  VMA_HEAVY_ASSERT(m_Count > 0);
+
4887  return m_pArray[m_Count - 1];
+
4888  }
+
4889  const T& back() const
+
4890  {
+
4891  VMA_HEAVY_ASSERT(m_Count > 0);
+
4892  return m_pArray[m_Count - 1];
+
4893  }
+
4894 
+
4895  void reserve(size_t newCapacity, bool freeMemory = false)
+
4896  {
+
4897  newCapacity = VMA_MAX(newCapacity, m_Count);
+
4898 
+
4899  if((newCapacity < m_Capacity) && !freeMemory)
4900  {
-
4901  newCapacity = newCount;
+
4901  newCapacity = m_Capacity;
4902  }
-
4903 
+
4903 
4904  if(newCapacity != m_Capacity)
4905  {
-
4906  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
-
4907  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
-
4908  if(elementsToCopy != 0)
-
4909  {
-
4910  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
-
4911  }
-
4912  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-
4913  m_Capacity = newCapacity;
-
4914  m_pArray = newArray;
-
4915  }
+
4906  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
+
4907  if(m_Count != 0)
+
4908  {
+
4909  memcpy(newArray, m_pArray, m_Count * sizeof(T));
+
4910  }
+
4911  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+
4912  m_Capacity = newCapacity;
+
4913  m_pArray = newArray;
+
4914  }
+
4915  }
4916 
-
4917  m_Count = newCount;
-
4918  }
-
4919 
-
4920  void clear(bool freeMemory = false)
-
4921  {
-
4922  resize(0, freeMemory);
-
4923  }
-
4924 
-
4925  void insert(size_t index, const T& src)
-
4926  {
-
4927  VMA_HEAVY_ASSERT(index <= m_Count);
-
4928  const size_t oldCount = size();
-
4929  resize(oldCount + 1);
-
4930  if(index < oldCount)
-
4931  {
-
4932  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
-
4933  }
-
4934  m_pArray[index] = src;
-
4935  }
-
4936 
-
4937  void remove(size_t index)
-
4938  {
-
4939  VMA_HEAVY_ASSERT(index < m_Count);
-
4940  const size_t oldCount = size();
-
4941  if(index < oldCount - 1)
-
4942  {
-
4943  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
-
4944  }
-
4945  resize(oldCount - 1);
-
4946  }
-
4947 
-
4948  void push_back(const T& src)
-
4949  {
-
4950  const size_t newIndex = size();
-
4951  resize(newIndex + 1);
-
4952  m_pArray[newIndex] = src;
-
4953  }
-
4954 
-
4955  void pop_back()
-
4956  {
-
4957  VMA_HEAVY_ASSERT(m_Count > 0);
-
4958  resize(size() - 1);
-
4959  }
-
4960 
-
4961  void push_front(const T& src)
-
4962  {
-
4963  insert(0, src);
-
4964  }
-
4965 
-
4966  void pop_front()
-
4967  {
-
4968  VMA_HEAVY_ASSERT(m_Count > 0);
-
4969  remove(0);
-
4970  }
-
4971 
-
4972  typedef T* iterator;
-
4973 
-
4974  iterator begin() { return m_pArray; }
-
4975  iterator end() { return m_pArray + m_Count; }
-
4976 
-
4977 private:
-
4978  AllocatorT m_Allocator;
-
4979  T* m_pArray;
-
4980  size_t m_Count;
-
4981  size_t m_Capacity;
-
4982 };
-
4983 
-
4984 template<typename T, typename allocatorT>
-
4985 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
-
4986 {
-
4987  vec.insert(index, item);
-
4988 }
-
4989 
-
4990 template<typename T, typename allocatorT>
-
4991 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
-
4992 {
-
4993  vec.remove(index);
-
4994 }
-
4995 
-
4996 #endif // #if VMA_USE_STL_VECTOR
-
4997 
-
4998 template<typename CmpLess, typename VectorT>
-
4999 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
-
5000 {
-
5001  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-
5002  vector.data(),
-
5003  vector.data() + vector.size(),
-
5004  value,
-
5005  CmpLess()) - vector.data();
-
5006  VmaVectorInsert(vector, indexToInsert, value);
-
5007  return indexToInsert;
-
5008 }
-
5009 
-
5010 template<typename CmpLess, typename VectorT>
-
5011 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
-
5012 {
-
5013  CmpLess comparator;
-
5014  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
-
5015  vector.begin(),
-
5016  vector.end(),
-
5017  value,
-
5018  comparator);
-
5019  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
-
5020  {
-
5021  size_t indexToRemove = it - vector.begin();
-
5022  VmaVectorRemove(vector, indexToRemove);
-
5023  return true;
-
5024  }
-
5025  return false;
-
5026 }
-
5027 
-
5029 // class VmaSmallVector
-
5030 
-
5031 /*
-
5032 This is a vector (a variable-sized array), optimized for the case when the array is small.
-
5033 
-
5034 It contains some number of elements in-place, which allows it to avoid heap allocation
-
5035 when the actual number of elements is below that threshold. This allows normal "small"
-
5036 cases to be fast without losing generality for large inputs.
-
5037 */
-
5038 
-
5039 template<typename T, typename AllocatorT, size_t N>
-
5040 class VmaSmallVector
-
5041 {
-
5042 public:
-
5043  typedef T value_type;
-
5044 
-
5045  VmaSmallVector(const AllocatorT& allocator) :
-
5046  m_Count(0),
-
5047  m_DynamicArray(allocator)
-
5048  {
+
4917  void resize(size_t newCount, bool freeMemory = false)
+
4918  {
+
4919  size_t newCapacity = m_Capacity;
+
4920  if(newCount > m_Capacity)
+
4921  {
+
4922  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
+
4923  }
+
4924  else if(freeMemory)
+
4925  {
+
4926  newCapacity = newCount;
+
4927  }
+
4928 
+
4929  if(newCapacity != m_Capacity)
+
4930  {
+
4931  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
+
4932  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
+
4933  if(elementsToCopy != 0)
+
4934  {
+
4935  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
+
4936  }
+
4937  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+
4938  m_Capacity = newCapacity;
+
4939  m_pArray = newArray;
+
4940  }
+
4941 
+
4942  m_Count = newCount;
+
4943  }
+
4944 
+
4945  void clear(bool freeMemory = false)
+
4946  {
+
4947  resize(0, freeMemory);
+
4948  }
+
4949 
+
4950  void insert(size_t index, const T& src)
+
4951  {
+
4952  VMA_HEAVY_ASSERT(index <= m_Count);
+
4953  const size_t oldCount = size();
+
4954  resize(oldCount + 1);
+
4955  if(index < oldCount)
+
4956  {
+
4957  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
+
4958  }
+
4959  m_pArray[index] = src;
+
4960  }
+
4961 
+
4962  void remove(size_t index)
+
4963  {
+
4964  VMA_HEAVY_ASSERT(index < m_Count);
+
4965  const size_t oldCount = size();
+
4966  if(index < oldCount - 1)
+
4967  {
+
4968  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
+
4969  }
+
4970  resize(oldCount - 1);
+
4971  }
+
4972 
+
4973  void push_back(const T& src)
+
4974  {
+
4975  const size_t newIndex = size();
+
4976  resize(newIndex + 1);
+
4977  m_pArray[newIndex] = src;
+
4978  }
+
4979 
+
4980  void pop_back()
+
4981  {
+
4982  VMA_HEAVY_ASSERT(m_Count > 0);
+
4983  resize(size() - 1);
+
4984  }
+
4985 
+
4986  void push_front(const T& src)
+
4987  {
+
4988  insert(0, src);
+
4989  }
+
4990 
+
4991  void pop_front()
+
4992  {
+
4993  VMA_HEAVY_ASSERT(m_Count > 0);
+
4994  remove(0);
+
4995  }
+
4996 
+
4997  typedef T* iterator;
+
4998 
+
4999  iterator begin() { return m_pArray; }
+
5000  iterator end() { return m_pArray + m_Count; }
+
5001 
+
5002 private:
+
5003  AllocatorT m_Allocator;
+
5004  T* m_pArray;
+
5005  size_t m_Count;
+
5006  size_t m_Capacity;
+
5007 };
+
5008 
+
5009 template<typename T, typename allocatorT>
+
5010 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
+
5011 {
+
5012  vec.insert(index, item);
+
5013 }
+
5014 
+
5015 template<typename T, typename allocatorT>
+
5016 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
+
5017 {
+
5018  vec.remove(index);
+
5019 }
+
5020 
+
5021 #endif // #if VMA_USE_STL_VECTOR
+
5022 
+
5023 template<typename CmpLess, typename VectorT>
+
5024 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
+
5025 {
+
5026  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+
5027  vector.data(),
+
5028  vector.data() + vector.size(),
+
5029  value,
+
5030  CmpLess()) - vector.data();
+
5031  VmaVectorInsert(vector, indexToInsert, value);
+
5032  return indexToInsert;
+
5033 }
+
5034 
+
5035 template<typename CmpLess, typename VectorT>
+
5036 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
+
5037 {
+
5038  CmpLess comparator;
+
5039  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
+
5040  vector.begin(),
+
5041  vector.end(),
+
5042  value,
+
5043  comparator);
+
5044  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
+
5045  {
+
5046  size_t indexToRemove = it - vector.begin();
+
5047  VmaVectorRemove(vector, indexToRemove);
+
5048  return true;
5049  }
-
5050  VmaSmallVector(size_t count, const AllocatorT& allocator) :
-
5051  m_Count(count),
-
5052  m_DynamicArray(count > N ? count : 0, allocator)
-
5053  {
-
5054  }
-
5055  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-
5056  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
-
5057  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-
5058  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
-
5059 
-
5060  bool empty() const { return m_Count == 0; }
-
5061  size_t size() const { return m_Count; }
-
5062  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-
5063  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-
5064 
-
5065  T& operator[](size_t index)
-
5066  {
-
5067  VMA_HEAVY_ASSERT(index < m_Count);
-
5068  return data()[index];
-
5069  }
-
5070  const T& operator[](size_t index) const
-
5071  {
-
5072  VMA_HEAVY_ASSERT(index < m_Count);
-
5073  return data()[index];
+
5050  return false;
+
5051 }
+
5052 
+
5054 // class VmaSmallVector
+
5055 
+
5056 /*
+
5057 This is a vector (a variable-sized array), optimized for the case when the array is small.
+
5058 
+
5059 It contains some number of elements in-place, which allows it to avoid heap allocation
+
5060 when the actual number of elements is below that threshold. This allows normal "small"
+
5061 cases to be fast without losing generality for large inputs.
+
5062 */
+
5063 
+
5064 template<typename T, typename AllocatorT, size_t N>
+
5065 class VmaSmallVector
+
5066 {
+
5067 public:
+
5068  typedef T value_type;
+
5069 
+
5070  VmaSmallVector(const AllocatorT& allocator) :
+
5071  m_Count(0),
+
5072  m_DynamicArray(allocator)
+
5073  {
5074  }
-
5075 
-
5076  T& front()
-
5077  {
-
5078  VMA_HEAVY_ASSERT(m_Count > 0);
-
5079  return data()[0];
-
5080  }
-
5081  const T& front() const
-
5082  {
-
5083  VMA_HEAVY_ASSERT(m_Count > 0);
-
5084  return data()[0];
-
5085  }
-
5086  T& back()
-
5087  {
-
5088  VMA_HEAVY_ASSERT(m_Count > 0);
-
5089  return data()[m_Count - 1];
-
5090  }
-
5091  const T& back() const
-
5092  {
-
5093  VMA_HEAVY_ASSERT(m_Count > 0);
-
5094  return data()[m_Count - 1];
-
5095  }
-
5096 
-
5097  void resize(size_t newCount, bool freeMemory = false)
-
5098  {
-
5099  if(newCount > N && m_Count > N)
-
5100  {
-
5101  // Any direction, staying in m_DynamicArray
-
5102  m_DynamicArray.resize(newCount, freeMemory);
-
5103  }
-
5104  else if(newCount > N && m_Count <= N)
-
5105  {
-
5106  // Growing, moving from m_StaticArray to m_DynamicArray
-
5107  m_DynamicArray.resize(newCount, freeMemory);
-
5108  if(m_Count > 0)
-
5109  {
-
5110  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
-
5111  }
-
5112  }
-
5113  else if(newCount <= N && m_Count > N)
-
5114  {
-
5115  // Shrinking, moving from m_DynamicArray to m_StaticArray
-
5116  if(newCount > 0)
-
5117  {
-
5118  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
-
5119  }
-
5120  m_DynamicArray.resize(0, freeMemory);
-
5121  }
-
5122  else
-
5123  {
-
5124  // Any direction, staying in m_StaticArray - nothing to do here
-
5125  }
-
5126  m_Count = newCount;
-
5127  }
-
5128 
-
5129  void clear(bool freeMemory = false)
-
5130  {
-
5131  m_DynamicArray.clear(freeMemory);
-
5132  m_Count = 0;
-
5133  }
-
5134 
-
5135  void insert(size_t index, const T& src)
-
5136  {
-
5137  VMA_HEAVY_ASSERT(index <= m_Count);
-
5138  const size_t oldCount = size();
-
5139  resize(oldCount + 1);
-
5140  T* const dataPtr = data();
-
5141  if(index < oldCount)
-
5142  {
-
5143  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
-
5144  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
-
5145  }
-
5146  dataPtr[index] = src;
-
5147  }
-
5148 
-
5149  void remove(size_t index)
-
5150  {
-
5151  VMA_HEAVY_ASSERT(index < m_Count);
-
5152  const size_t oldCount = size();
-
5153  if(index < oldCount - 1)
-
5154  {
-
5155  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
-
5156  T* const dataPtr = data();
-
5157  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
-
5158  }
-
5159  resize(oldCount - 1);
-
5160  }
-
5161 
-
5162  void push_back(const T& src)
-
5163  {
-
5164  const size_t newIndex = size();
-
5165  resize(newIndex + 1);
-
5166  data()[newIndex] = src;
-
5167  }
-
5168 
-
5169  void pop_back()
-
5170  {
-
5171  VMA_HEAVY_ASSERT(m_Count > 0);
-
5172  resize(size() - 1);
-
5173  }
-
5174 
-
5175  void push_front(const T& src)
-
5176  {
-
5177  insert(0, src);
-
5178  }
-
5179 
-
5180  void pop_front()
-
5181  {
-
5182  VMA_HEAVY_ASSERT(m_Count > 0);
-
5183  remove(0);
-
5184  }
-
5185 
-
5186  typedef T* iterator;
-
5187 
-
5188  iterator begin() { return data(); }
-
5189  iterator end() { return data() + m_Count; }
-
5190 
-
5191 private:
-
5192  size_t m_Count;
-
5193  T m_StaticArray[N]; // Used when m_Size <= N
-
5194  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
-
5195 };
-
5196 
-
5198 // class VmaPoolAllocator
+
5075  VmaSmallVector(size_t count, const AllocatorT& allocator) :
+
5076  m_Count(count),
+
5077  m_DynamicArray(count > N ? count : 0, allocator)
+
5078  {
+
5079  }
+
5080  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+
5081  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
+
5082  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+
5083  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
+
5084 
+
5085  bool empty() const { return m_Count == 0; }
+
5086  size_t size() const { return m_Count; }
+
5087  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+
5088  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+
5089 
+
5090  T& operator[](size_t index)
+
5091  {
+
5092  VMA_HEAVY_ASSERT(index < m_Count);
+
5093  return data()[index];
+
5094  }
+
5095  const T& operator[](size_t index) const
+
5096  {
+
5097  VMA_HEAVY_ASSERT(index < m_Count);
+
5098  return data()[index];
+
5099  }
+
5100 
+
5101  T& front()
+
5102  {
+
5103  VMA_HEAVY_ASSERT(m_Count > 0);
+
5104  return data()[0];
+
5105  }
+
5106  const T& front() const
+
5107  {
+
5108  VMA_HEAVY_ASSERT(m_Count > 0);
+
5109  return data()[0];
+
5110  }
+
5111  T& back()
+
5112  {
+
5113  VMA_HEAVY_ASSERT(m_Count > 0);
+
5114  return data()[m_Count - 1];
+
5115  }
+
5116  const T& back() const
+
5117  {
+
5118  VMA_HEAVY_ASSERT(m_Count > 0);
+
5119  return data()[m_Count - 1];
+
5120  }
+
5121 
+
5122  void resize(size_t newCount, bool freeMemory = false)
+
5123  {
+
5124  if(newCount > N && m_Count > N)
+
5125  {
+
5126  // Any direction, staying in m_DynamicArray
+
5127  m_DynamicArray.resize(newCount, freeMemory);
+
5128  }
+
5129  else if(newCount > N && m_Count <= N)
+
5130  {
+
5131  // Growing, moving from m_StaticArray to m_DynamicArray
+
5132  m_DynamicArray.resize(newCount, freeMemory);
+
5133  if(m_Count > 0)
+
5134  {
+
5135  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
+
5136  }
+
5137  }
+
5138  else if(newCount <= N && m_Count > N)
+
5139  {
+
5140  // Shrinking, moving from m_DynamicArray to m_StaticArray
+
5141  if(newCount > 0)
+
5142  {
+
5143  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
+
5144  }
+
5145  m_DynamicArray.resize(0, freeMemory);
+
5146  }
+
5147  else
+
5148  {
+
5149  // Any direction, staying in m_StaticArray - nothing to do here
+
5150  }
+
5151  m_Count = newCount;
+
5152  }
+
5153 
+
5154  void clear(bool freeMemory = false)
+
5155  {
+
5156  m_DynamicArray.clear(freeMemory);
+
5157  m_Count = 0;
+
5158  }
+
5159 
+
5160  void insert(size_t index, const T& src)
+
5161  {
+
5162  VMA_HEAVY_ASSERT(index <= m_Count);
+
5163  const size_t oldCount = size();
+
5164  resize(oldCount + 1);
+
5165  T* const dataPtr = data();
+
5166  if(index < oldCount)
+
5167  {
+
5168  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
+
5169  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
+
5170  }
+
5171  dataPtr[index] = src;
+
5172  }
+
5173 
+
5174  void remove(size_t index)
+
5175  {
+
5176  VMA_HEAVY_ASSERT(index < m_Count);
+
5177  const size_t oldCount = size();
+
5178  if(index < oldCount - 1)
+
5179  {
+
5180  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
+
5181  T* const dataPtr = data();
+
5182  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
+
5183  }
+
5184  resize(oldCount - 1);
+
5185  }
+
5186 
+
5187  void push_back(const T& src)
+
5188  {
+
5189  const size_t newIndex = size();
+
5190  resize(newIndex + 1);
+
5191  data()[newIndex] = src;
+
5192  }
+
5193 
+
5194  void pop_back()
+
5195  {
+
5196  VMA_HEAVY_ASSERT(m_Count > 0);
+
5197  resize(size() - 1);
+
5198  }
5199 
-
5200 /*
-
5201 Allocator for objects of type T using a list of arrays (pools) to speed up
-
5202 allocation. Number of elements that can be allocated is not bounded because
-
5203 allocator can create multiple blocks.
-
5204 */
-
5205 template<typename T>
-
5206 class VmaPoolAllocator
-
5207 {
-
5208  VMA_CLASS_NO_COPY(VmaPoolAllocator)
-
5209 public:
-
5210  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
-
5211  ~VmaPoolAllocator();
-
5212  template<typename... Types> T* Alloc(Types... args);
-
5213  void Free(T* ptr);
-
5214 
-
5215 private:
-
5216  union Item
-
5217  {
-
5218  uint32_t NextFreeIndex;
-
5219  alignas(T) char Value[sizeof(T)];
-
5220  };
+
5200  void push_front(const T& src)
+
5201  {
+
5202  insert(0, src);
+
5203  }
+
5204 
+
5205  void pop_front()
+
5206  {
+
5207  VMA_HEAVY_ASSERT(m_Count > 0);
+
5208  remove(0);
+
5209  }
+
5210 
+
5211  typedef T* iterator;
+
5212 
+
5213  iterator begin() { return data(); }
+
5214  iterator end() { return data() + m_Count; }
+
5215 
+
5216 private:
+
5217  size_t m_Count;
+
5218  T m_StaticArray[N]; // Used when m_Size <= N
+
5219  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
+
5220 };
5221 
-
5222  struct ItemBlock
-
5223  {
-
5224  Item* pItems;
-
5225  uint32_t Capacity;
-
5226  uint32_t FirstFreeIndex;
-
5227  };
-
5228 
-
5229  const VkAllocationCallbacks* m_pAllocationCallbacks;
-
5230  const uint32_t m_FirstBlockCapacity;
-
5231  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
-
5232 
-
5233  ItemBlock& CreateNewBlock();
-
5234 };
-
5235 
-
5236 template<typename T>
-
5237 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
-
5238  m_pAllocationCallbacks(pAllocationCallbacks),
-
5239  m_FirstBlockCapacity(firstBlockCapacity),
-
5240  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
-
5241 {
-
5242  VMA_ASSERT(m_FirstBlockCapacity > 1);
-
5243 }
-
5244 
-
5245 template<typename T>
-
5246 VmaPoolAllocator<T>::~VmaPoolAllocator()
-
5247 {
-
5248  for(size_t i = m_ItemBlocks.size(); i--; )
-
5249  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
-
5250  m_ItemBlocks.clear();
-
5251 }
-
5252 
-
5253 template<typename T>
-
5254 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
-
5255 {
-
5256  for(size_t i = m_ItemBlocks.size(); i--; )
-
5257  {
-
5258  ItemBlock& block = m_ItemBlocks[i];
-
5259  // This block has some free items: Use first one.
-
5260  if(block.FirstFreeIndex != UINT32_MAX)
-
5261  {
-
5262  Item* const pItem = &block.pItems[block.FirstFreeIndex];
-
5263  block.FirstFreeIndex = pItem->NextFreeIndex;
-
5264  T* result = (T*)&pItem->Value;
-
5265  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
-
5266  return result;
-
5267  }
-
5268  }
+
5223 // class VmaPoolAllocator
+
5224 
+
5225 /*
+
5226 Allocator for objects of type T using a list of arrays (pools) to speed up
+
5227 allocation. Number of elements that can be allocated is not bounded because
+
5228 allocator can create multiple blocks.
+
5229 */
+
5230 template<typename T>
+
5231 class VmaPoolAllocator
+
5232 {
+
5233  VMA_CLASS_NO_COPY(VmaPoolAllocator)
+
5234 public:
+
5235  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
+
5236  ~VmaPoolAllocator();
+
5237  template<typename... Types> T* Alloc(Types... args);
+
5238  void Free(T* ptr);
+
5239 
+
5240 private:
+
5241  union Item
+
5242  {
+
5243  uint32_t NextFreeIndex;
+
5244  alignas(T) char Value[sizeof(T)];
+
5245  };
+
5246 
+
5247  struct ItemBlock
+
5248  {
+
5249  Item* pItems;
+
5250  uint32_t Capacity;
+
5251  uint32_t FirstFreeIndex;
+
5252  };
+
5253 
+
5254  const VkAllocationCallbacks* m_pAllocationCallbacks;
+
5255  const uint32_t m_FirstBlockCapacity;
+
5256  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
+
5257 
+
5258  ItemBlock& CreateNewBlock();
+
5259 };
+
5260 
+
5261 template<typename T>
+
5262 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
+
5263  m_pAllocationCallbacks(pAllocationCallbacks),
+
5264  m_FirstBlockCapacity(firstBlockCapacity),
+
5265  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
+
5266 {
+
5267  VMA_ASSERT(m_FirstBlockCapacity > 1);
+
5268 }
5269 
-
5270  // No block has free item: Create new one and use it.
-
5271  ItemBlock& newBlock = CreateNewBlock();
-
5272  Item* const pItem = &newBlock.pItems[0];
-
5273  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
-
5274  T* result = (T*)&pItem->Value;
-
5275  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
-
5276  return result;
-
5277 }
-
5278 
-
5279 template<typename T>
-
5280 void VmaPoolAllocator<T>::Free(T* ptr)
-
5281 {
-
5282  // Search all memory blocks to find ptr.
-
5283  for(size_t i = m_ItemBlocks.size(); i--; )
-
5284  {
-
5285  ItemBlock& block = m_ItemBlocks[i];
-
5286 
-
5287  // Casting to union.
-
5288  Item* pItemPtr;
-
5289  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
-
5290 
-
5291  // Check if pItemPtr is in address range of this block.
-
5292  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
-
5293  {
-
5294  ptr->~T(); // Explicit destructor call.
-
5295  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
-
5296  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
-
5297  block.FirstFreeIndex = index;
-
5298  return;
-
5299  }
-
5300  }
-
5301  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
+
5270 template<typename T>
+
5271 VmaPoolAllocator<T>::~VmaPoolAllocator()
+
5272 {
+
5273  for(size_t i = m_ItemBlocks.size(); i--; )
+
5274  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
+
5275  m_ItemBlocks.clear();
+
5276 }
+
5277 
+
5278 template<typename T>
+
5279 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
+
5280 {
+
5281  for(size_t i = m_ItemBlocks.size(); i--; )
+
5282  {
+
5283  ItemBlock& block = m_ItemBlocks[i];
+
5284  // This block has some free items: Use first one.
+
5285  if(block.FirstFreeIndex != UINT32_MAX)
+
5286  {
+
5287  Item* const pItem = &block.pItems[block.FirstFreeIndex];
+
5288  block.FirstFreeIndex = pItem->NextFreeIndex;
+
5289  T* result = (T*)&pItem->Value;
+
5290  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
+
5291  return result;
+
5292  }
+
5293  }
+
5294 
+
5295  // No block has free item: Create new one and use it.
+
5296  ItemBlock& newBlock = CreateNewBlock();
+
5297  Item* const pItem = &newBlock.pItems[0];
+
5298  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
+
5299  T* result = (T*)&pItem->Value;
+
5300  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
+
5301  return result;
5302 }
5303 
5304 template<typename T>
-
5305 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
+
5305 void VmaPoolAllocator<T>::Free(T* ptr)
5306 {
-
5307  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
-
5308  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
-
5309 
-
5310  const ItemBlock newBlock = {
-
5311  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
-
5312  newBlockCapacity,
-
5313  0 };
-
5314 
-
5315  m_ItemBlocks.push_back(newBlock);
-
5316 
-
5317  // Setup singly-linked list of all free items in this block.
-
5318  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
-
5319  newBlock.pItems[i].NextFreeIndex = i + 1;
-
5320  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
-
5321  return m_ItemBlocks.back();
-
5322 }
-
5323 
-
5325 // class VmaRawList, VmaList
-
5326 
-
5327 #if VMA_USE_STL_LIST
+
5307  // Search all memory blocks to find ptr.
+
5308  for(size_t i = m_ItemBlocks.size(); i--; )
+
5309  {
+
5310  ItemBlock& block = m_ItemBlocks[i];
+
5311 
+
5312  // Casting to union.
+
5313  Item* pItemPtr;
+
5314  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
+
5315 
+
5316  // Check if pItemPtr is in address range of this block.
+
5317  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
+
5318  {
+
5319  ptr->~T(); // Explicit destructor call.
+
5320  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
+
5321  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
+
5322  block.FirstFreeIndex = index;
+
5323  return;
+
5324  }
+
5325  }
+
5326  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
+
5327 }
5328 
-
5329 #define VmaList std::list
-
5330 
-
5331 #else // #if VMA_USE_STL_LIST
-
5332 
-
5333 template<typename T>
-
5334 struct VmaListItem
-
5335 {
-
5336  VmaListItem* pPrev;
-
5337  VmaListItem* pNext;
-
5338  T Value;
-
5339 };
-
5340 
-
5341 // Doubly linked list.
-
5342 template<typename T>
-
5343 class VmaRawList
-
5344 {
-
5345  VMA_CLASS_NO_COPY(VmaRawList)
-
5346 public:
-
5347  typedef VmaListItem<T> ItemType;
+
5329 template<typename T>
+
5330 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
+
5331 {
+
5332  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
+
5333  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
+
5334 
+
5335  const ItemBlock newBlock = {
+
5336  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
+
5337  newBlockCapacity,
+
5338  0 };
+
5339 
+
5340  m_ItemBlocks.push_back(newBlock);
+
5341 
+
5342  // Setup singly-linked list of all free items in this block.
+
5343  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
+
5344  newBlock.pItems[i].NextFreeIndex = i + 1;
+
5345  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
+
5346  return m_ItemBlocks.back();
+
5347 }
5348 
-
5349  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
-
5350  ~VmaRawList();
-
5351  void Clear();
-
5352 
-
5353  size_t GetCount() const { return m_Count; }
-
5354  bool IsEmpty() const { return m_Count == 0; }
+
5350 // class VmaRawList, VmaList
+
5351 
+
5352 #if VMA_USE_STL_LIST
+
5353 
+
5354 #define VmaList std::list
5355 
-
5356  ItemType* Front() { return m_pFront; }
-
5357  const ItemType* Front() const { return m_pFront; }
-
5358  ItemType* Back() { return m_pBack; }
-
5359  const ItemType* Back() const { return m_pBack; }
-
5360 
-
5361  ItemType* PushBack();
-
5362  ItemType* PushFront();
-
5363  ItemType* PushBack(const T& value);
-
5364  ItemType* PushFront(const T& value);
-
5365  void PopBack();
-
5366  void PopFront();
-
5367 
-
5368  // Item can be null - it means PushBack.
-
5369  ItemType* InsertBefore(ItemType* pItem);
-
5370  // Item can be null - it means PushFront.
-
5371  ItemType* InsertAfter(ItemType* pItem);
-
5372 
-
5373  ItemType* InsertBefore(ItemType* pItem, const T& value);
-
5374  ItemType* InsertAfter(ItemType* pItem, const T& value);
-
5375 
-
5376  void Remove(ItemType* pItem);
+
5356 #else // #if VMA_USE_STL_LIST
+
5357 
+
5358 template<typename T>
+
5359 struct VmaListItem
+
5360 {
+
5361  VmaListItem* pPrev;
+
5362  VmaListItem* pNext;
+
5363  T Value;
+
5364 };
+
5365 
+
5366 // Doubly linked list.
+
5367 template<typename T>
+
5368 class VmaRawList
+
5369 {
+
5370  VMA_CLASS_NO_COPY(VmaRawList)
+
5371 public:
+
5372  typedef VmaListItem<T> ItemType;
+
5373 
+
5374  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
+
5375  ~VmaRawList();
+
5376  void Clear();
5377 
-
5378 private:
-
5379  const VkAllocationCallbacks* const m_pAllocationCallbacks;
-
5380  VmaPoolAllocator<ItemType> m_ItemAllocator;
-
5381  ItemType* m_pFront;
-
5382  ItemType* m_pBack;
-
5383  size_t m_Count;
-
5384 };
+
5378  size_t GetCount() const { return m_Count; }
+
5379  bool IsEmpty() const { return m_Count == 0; }
+
5380 
+
5381  ItemType* Front() { return m_pFront; }
+
5382  const ItemType* Front() const { return m_pFront; }
+
5383  ItemType* Back() { return m_pBack; }
+
5384  const ItemType* Back() const { return m_pBack; }
5385 
-
5386 template<typename T>
-
5387 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
-
5388  m_pAllocationCallbacks(pAllocationCallbacks),
-
5389  m_ItemAllocator(pAllocationCallbacks, 128),
-
5390  m_pFront(VMA_NULL),
-
5391  m_pBack(VMA_NULL),
-
5392  m_Count(0)
-
5393 {
-
5394 }
-
5395 
-
5396 template<typename T>
-
5397 VmaRawList<T>::~VmaRawList()
-
5398 {
-
5399  // Intentionally not calling Clear, because that would be unnecessary
-
5400  // computations to return all items to m_ItemAllocator as free.
-
5401 }
+
5386  ItemType* PushBack();
+
5387  ItemType* PushFront();
+
5388  ItemType* PushBack(const T& value);
+
5389  ItemType* PushFront(const T& value);
+
5390  void PopBack();
+
5391  void PopFront();
+
5392 
+
5393  // Item can be null - it means PushBack.
+
5394  ItemType* InsertBefore(ItemType* pItem);
+
5395  // Item can be null - it means PushFront.
+
5396  ItemType* InsertAfter(ItemType* pItem);
+
5397 
+
5398  ItemType* InsertBefore(ItemType* pItem, const T& value);
+
5399  ItemType* InsertAfter(ItemType* pItem, const T& value);
+
5400 
+
5401  void Remove(ItemType* pItem);
5402 
-
5403 template<typename T>
-
5404 void VmaRawList<T>::Clear()
-
5405 {
-
5406  if(IsEmpty() == false)
-
5407  {
-
5408  ItemType* pItem = m_pBack;
-
5409  while(pItem != VMA_NULL)
-
5410  {
-
5411  ItemType* const pPrevItem = pItem->pPrev;
-
5412  m_ItemAllocator.Free(pItem);
-
5413  pItem = pPrevItem;
-
5414  }
-
5415  m_pFront = VMA_NULL;
-
5416  m_pBack = VMA_NULL;
-
5417  m_Count = 0;
-
5418  }
+
5403 private:
+
5404  const VkAllocationCallbacks* const m_pAllocationCallbacks;
+
5405  VmaPoolAllocator<ItemType> m_ItemAllocator;
+
5406  ItemType* m_pFront;
+
5407  ItemType* m_pBack;
+
5408  size_t m_Count;
+
5409 };
+
5410 
+
5411 template<typename T>
+
5412 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
+
5413  m_pAllocationCallbacks(pAllocationCallbacks),
+
5414  m_ItemAllocator(pAllocationCallbacks, 128),
+
5415  m_pFront(VMA_NULL),
+
5416  m_pBack(VMA_NULL),
+
5417  m_Count(0)
+
5418 {
5419 }
5420 
5421 template<typename T>
-
5422 VmaListItem<T>* VmaRawList<T>::PushBack()
+
5422 VmaRawList<T>::~VmaRawList()
5423 {
-
5424  ItemType* const pNewItem = m_ItemAllocator.Alloc();
-
5425  pNewItem->pNext = VMA_NULL;
-
5426  if(IsEmpty())
-
5427  {
-
5428  pNewItem->pPrev = VMA_NULL;
-
5429  m_pFront = pNewItem;
-
5430  m_pBack = pNewItem;
-
5431  m_Count = 1;
-
5432  }
-
5433  else
-
5434  {
-
5435  pNewItem->pPrev = m_pBack;
-
5436  m_pBack->pNext = pNewItem;
-
5437  m_pBack = pNewItem;
-
5438  ++m_Count;
-
5439  }
-
5440  return pNewItem;
-
5441 }
-
5442 
-
5443 template<typename T>
-
5444 VmaListItem<T>* VmaRawList<T>::PushFront()
-
5445 {
-
5446  ItemType* const pNewItem = m_ItemAllocator.Alloc();
-
5447  pNewItem->pPrev = VMA_NULL;
-
5448  if(IsEmpty())
-
5449  {
-
5450  pNewItem->pNext = VMA_NULL;
-
5451  m_pFront = pNewItem;
-
5452  m_pBack = pNewItem;
-
5453  m_Count = 1;
-
5454  }
-
5455  else
-
5456  {
-
5457  pNewItem->pNext = m_pFront;
-
5458  m_pFront->pPrev = pNewItem;
-
5459  m_pFront = pNewItem;
-
5460  ++m_Count;
-
5461  }
-
5462  return pNewItem;
-
5463 }
-
5464 
-
5465 template<typename T>
-
5466 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
-
5467 {
-
5468  ItemType* const pNewItem = PushBack();
-
5469  pNewItem->Value = value;
-
5470  return pNewItem;
-
5471 }
-
5472 
-
5473 template<typename T>
-
5474 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
-
5475 {
-
5476  ItemType* const pNewItem = PushFront();
-
5477  pNewItem->Value = value;
-
5478  return pNewItem;
-
5479 }
-
5480 
-
5481 template<typename T>
-
5482 void VmaRawList<T>::PopBack()
-
5483 {
-
5484  VMA_HEAVY_ASSERT(m_Count > 0);
-
5485  ItemType* const pBackItem = m_pBack;
-
5486  ItemType* const pPrevItem = pBackItem->pPrev;
-
5487  if(pPrevItem != VMA_NULL)
-
5488  {
-
5489  pPrevItem->pNext = VMA_NULL;
-
5490  }
-
5491  m_pBack = pPrevItem;
-
5492  m_ItemAllocator.Free(pBackItem);
-
5493  --m_Count;
-
5494 }
-
5495 
-
5496 template<typename T>
-
5497 void VmaRawList<T>::PopFront()
-
5498 {
-
5499  VMA_HEAVY_ASSERT(m_Count > 0);
-
5500  ItemType* const pFrontItem = m_pFront;
-
5501  ItemType* const pNextItem = pFrontItem->pNext;
-
5502  if(pNextItem != VMA_NULL)
-
5503  {
-
5504  pNextItem->pPrev = VMA_NULL;
-
5505  }
-
5506  m_pFront = pNextItem;
-
5507  m_ItemAllocator.Free(pFrontItem);
-
5508  --m_Count;
-
5509 }
-
5510 
-
5511 template<typename T>
-
5512 void VmaRawList<T>::Remove(ItemType* pItem)
-
5513 {
-
5514  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
-
5515  VMA_HEAVY_ASSERT(m_Count > 0);
-
5516 
-
5517  if(pItem->pPrev != VMA_NULL)
-
5518  {
-
5519  pItem->pPrev->pNext = pItem->pNext;
-
5520  }
-
5521  else
-
5522  {
-
5523  VMA_HEAVY_ASSERT(m_pFront == pItem);
-
5524  m_pFront = pItem->pNext;
-
5525  }
-
5526 
-
5527  if(pItem->pNext != VMA_NULL)
+
5424  // Intentionally not calling Clear, because that would be unnecessary
+
5425  // computations to return all items to m_ItemAllocator as free.
+
5426 }
+
5427 
+
5428 template<typename T>
+
5429 void VmaRawList<T>::Clear()
+
5430 {
+
5431  if(IsEmpty() == false)
+
5432  {
+
5433  ItemType* pItem = m_pBack;
+
5434  while(pItem != VMA_NULL)
+
5435  {
+
5436  ItemType* const pPrevItem = pItem->pPrev;
+
5437  m_ItemAllocator.Free(pItem);
+
5438  pItem = pPrevItem;
+
5439  }
+
5440  m_pFront = VMA_NULL;
+
5441  m_pBack = VMA_NULL;
+
5442  m_Count = 0;
+
5443  }
+
5444 }
+
5445 
+
5446 template<typename T>
+
5447 VmaListItem<T>* VmaRawList<T>::PushBack()
+
5448 {
+
5449  ItemType* const pNewItem = m_ItemAllocator.Alloc();
+
5450  pNewItem->pNext = VMA_NULL;
+
5451  if(IsEmpty())
+
5452  {
+
5453  pNewItem->pPrev = VMA_NULL;
+
5454  m_pFront = pNewItem;
+
5455  m_pBack = pNewItem;
+
5456  m_Count = 1;
+
5457  }
+
5458  else
+
5459  {
+
5460  pNewItem->pPrev = m_pBack;
+
5461  m_pBack->pNext = pNewItem;
+
5462  m_pBack = pNewItem;
+
5463  ++m_Count;
+
5464  }
+
5465  return pNewItem;
+
5466 }
+
5467 
+
5468 template<typename T>
+
5469 VmaListItem<T>* VmaRawList<T>::PushFront()
+
5470 {
+
5471  ItemType* const pNewItem = m_ItemAllocator.Alloc();
+
5472  pNewItem->pPrev = VMA_NULL;
+
5473  if(IsEmpty())
+
5474  {
+
5475  pNewItem->pNext = VMA_NULL;
+
5476  m_pFront = pNewItem;
+
5477  m_pBack = pNewItem;
+
5478  m_Count = 1;
+
5479  }
+
5480  else
+
5481  {
+
5482  pNewItem->pNext = m_pFront;
+
5483  m_pFront->pPrev = pNewItem;
+
5484  m_pFront = pNewItem;
+
5485  ++m_Count;
+
5486  }
+
5487  return pNewItem;
+
5488 }
+
5489 
+
5490 template<typename T>
+
5491 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
+
5492 {
+
5493  ItemType* const pNewItem = PushBack();
+
5494  pNewItem->Value = value;
+
5495  return pNewItem;
+
5496 }
+
5497 
+
5498 template<typename T>
+
5499 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
+
5500 {
+
5501  ItemType* const pNewItem = PushFront();
+
5502  pNewItem->Value = value;
+
5503  return pNewItem;
+
5504 }
+
5505 
+
5506 template<typename T>
+
5507 void VmaRawList<T>::PopBack()
+
5508 {
+
5509  VMA_HEAVY_ASSERT(m_Count > 0);
+
5510  ItemType* const pBackItem = m_pBack;
+
5511  ItemType* const pPrevItem = pBackItem->pPrev;
+
5512  if(pPrevItem != VMA_NULL)
+
5513  {
+
5514  pPrevItem->pNext = VMA_NULL;
+
5515  }
+
5516  m_pBack = pPrevItem;
+
5517  m_ItemAllocator.Free(pBackItem);
+
5518  --m_Count;
+
5519 }
+
5520 
+
5521 template<typename T>
+
5522 void VmaRawList<T>::PopFront()
+
5523 {
+
5524  VMA_HEAVY_ASSERT(m_Count > 0);
+
5525  ItemType* const pFrontItem = m_pFront;
+
5526  ItemType* const pNextItem = pFrontItem->pNext;
+
5527  if(pNextItem != VMA_NULL)
5528  {
-
5529  pItem->pNext->pPrev = pItem->pPrev;
+
5529  pNextItem->pPrev = VMA_NULL;
5530  }
-
5531  else
-
5532  {
-
5533  VMA_HEAVY_ASSERT(m_pBack == pItem);
-
5534  m_pBack = pItem->pPrev;
-
5535  }
-
5536 
-
5537  m_ItemAllocator.Free(pItem);
-
5538  --m_Count;
-
5539 }
-
5540 
-
5541 template<typename T>
-
5542 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
-
5543 {
-
5544  if(pItem != VMA_NULL)
-
5545  {
-
5546  ItemType* const prevItem = pItem->pPrev;
-
5547  ItemType* const newItem = m_ItemAllocator.Alloc();
-
5548  newItem->pPrev = prevItem;
-
5549  newItem->pNext = pItem;
-
5550  pItem->pPrev = newItem;
-
5551  if(prevItem != VMA_NULL)
-
5552  {
-
5553  prevItem->pNext = newItem;
-
5554  }
-
5555  else
-
5556  {
-
5557  VMA_HEAVY_ASSERT(m_pFront == pItem);
-
5558  m_pFront = newItem;
-
5559  }
-
5560  ++m_Count;
-
5561  return newItem;
-
5562  }
-
5563  else
-
5564  return PushBack();
-
5565 }
-
5566 
-
5567 template<typename T>
-
5568 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
-
5569 {
-
5570  if(pItem != VMA_NULL)
-
5571  {
-
5572  ItemType* const nextItem = pItem->pNext;
-
5573  ItemType* const newItem = m_ItemAllocator.Alloc();
-
5574  newItem->pNext = nextItem;
-
5575  newItem->pPrev = pItem;
-
5576  pItem->pNext = newItem;
-
5577  if(nextItem != VMA_NULL)
-
5578  {
-
5579  nextItem->pPrev = newItem;
-
5580  }
-
5581  else
-
5582  {
-
5583  VMA_HEAVY_ASSERT(m_pBack == pItem);
-
5584  m_pBack = newItem;
-
5585  }
-
5586  ++m_Count;
-
5587  return newItem;
-
5588  }
-
5589  else
-
5590  return PushFront();
-
5591 }
-
5592 
-
5593 template<typename T>
-
5594 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
-
5595 {
-
5596  ItemType* const newItem = InsertBefore(pItem);
-
5597  newItem->Value = value;
-
5598  return newItem;
-
5599 }
-
5600 
-
5601 template<typename T>
-
5602 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
-
5603 {
-
5604  ItemType* const newItem = InsertAfter(pItem);
-
5605  newItem->Value = value;
-
5606  return newItem;
-
5607 }
-
5608 
-
5609 template<typename T, typename AllocatorT>
-
5610 class VmaList
-
5611 {
-
5612  VMA_CLASS_NO_COPY(VmaList)
-
5613 public:
-
5614  class iterator
-
5615  {
-
5616  public:
-
5617  iterator() :
-
5618  m_pList(VMA_NULL),
-
5619  m_pItem(VMA_NULL)
-
5620  {
-
5621  }
-
5622 
-
5623  T& operator*() const
-
5624  {
-
5625  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5626  return m_pItem->Value;
-
5627  }
-
5628  T* operator->() const
-
5629  {
-
5630  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5631  return &m_pItem->Value;
-
5632  }
+
5531  m_pFront = pNextItem;
+
5532  m_ItemAllocator.Free(pFrontItem);
+
5533  --m_Count;
+
5534 }
+
5535 
+
5536 template<typename T>
+
5537 void VmaRawList<T>::Remove(ItemType* pItem)
+
5538 {
+
5539  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
+
5540  VMA_HEAVY_ASSERT(m_Count > 0);
+
5541 
+
5542  if(pItem->pPrev != VMA_NULL)
+
5543  {
+
5544  pItem->pPrev->pNext = pItem->pNext;
+
5545  }
+
5546  else
+
5547  {
+
5548  VMA_HEAVY_ASSERT(m_pFront == pItem);
+
5549  m_pFront = pItem->pNext;
+
5550  }
+
5551 
+
5552  if(pItem->pNext != VMA_NULL)
+
5553  {
+
5554  pItem->pNext->pPrev = pItem->pPrev;
+
5555  }
+
5556  else
+
5557  {
+
5558  VMA_HEAVY_ASSERT(m_pBack == pItem);
+
5559  m_pBack = pItem->pPrev;
+
5560  }
+
5561 
+
5562  m_ItemAllocator.Free(pItem);
+
5563  --m_Count;
+
5564 }
+
5565 
+
5566 template<typename T>
+
5567 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
+
5568 {
+
5569  if(pItem != VMA_NULL)
+
5570  {
+
5571  ItemType* const prevItem = pItem->pPrev;
+
5572  ItemType* const newItem = m_ItemAllocator.Alloc();
+
5573  newItem->pPrev = prevItem;
+
5574  newItem->pNext = pItem;
+
5575  pItem->pPrev = newItem;
+
5576  if(prevItem != VMA_NULL)
+
5577  {
+
5578  prevItem->pNext = newItem;
+
5579  }
+
5580  else
+
5581  {
+
5582  VMA_HEAVY_ASSERT(m_pFront == pItem);
+
5583  m_pFront = newItem;
+
5584  }
+
5585  ++m_Count;
+
5586  return newItem;
+
5587  }
+
5588  else
+
5589  return PushBack();
+
5590 }
+
5591 
+
5592 template<typename T>
+
5593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
+
5594 {
+
5595  if(pItem != VMA_NULL)
+
5596  {
+
5597  ItemType* const nextItem = pItem->pNext;
+
5598  ItemType* const newItem = m_ItemAllocator.Alloc();
+
5599  newItem->pNext = nextItem;
+
5600  newItem->pPrev = pItem;
+
5601  pItem->pNext = newItem;
+
5602  if(nextItem != VMA_NULL)
+
5603  {
+
5604  nextItem->pPrev = newItem;
+
5605  }
+
5606  else
+
5607  {
+
5608  VMA_HEAVY_ASSERT(m_pBack == pItem);
+
5609  m_pBack = newItem;
+
5610  }
+
5611  ++m_Count;
+
5612  return newItem;
+
5613  }
+
5614  else
+
5615  return PushFront();
+
5616 }
+
5617 
+
5618 template<typename T>
+
5619 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
+
5620 {
+
5621  ItemType* const newItem = InsertBefore(pItem);
+
5622  newItem->Value = value;
+
5623  return newItem;
+
5624 }
+
5625 
+
5626 template<typename T>
+
5627 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
+
5628 {
+
5629  ItemType* const newItem = InsertAfter(pItem);
+
5630  newItem->Value = value;
+
5631  return newItem;
+
5632 }
5633 
-
5634  iterator& operator++()
-
5635  {
-
5636  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5637  m_pItem = m_pItem->pNext;
-
5638  return *this;
-
5639  }
-
5640  iterator& operator--()
-
5641  {
-
5642  if(m_pItem != VMA_NULL)
-
5643  {
-
5644  m_pItem = m_pItem->pPrev;
-
5645  }
-
5646  else
-
5647  {
-
5648  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-
5649  m_pItem = m_pList->Back();
-
5650  }
-
5651  return *this;
+
5634 template<typename T, typename AllocatorT>
+
5635 class VmaList
+
5636 {
+
5637  VMA_CLASS_NO_COPY(VmaList)
+
5638 public:
+
5639  class iterator
+
5640  {
+
5641  public:
+
5642  iterator() :
+
5643  m_pList(VMA_NULL),
+
5644  m_pItem(VMA_NULL)
+
5645  {
+
5646  }
+
5647 
+
5648  T& operator*() const
+
5649  {
+
5650  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5651  return m_pItem->Value;
5652  }
-
5653 
-
5654  iterator operator++(int)
-
5655  {
-
5656  iterator result = *this;
-
5657  ++*this;
-
5658  return result;
-
5659  }
-
5660  iterator operator--(int)
-
5661  {
-
5662  iterator result = *this;
-
5663  --*this;
-
5664  return result;
-
5665  }
-
5666 
-
5667  bool operator==(const iterator& rhs) const
-
5668  {
-
5669  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
-
5670  return m_pItem == rhs.m_pItem;
-
5671  }
-
5672  bool operator!=(const iterator& rhs) const
-
5673  {
-
5674  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
-
5675  return m_pItem != rhs.m_pItem;
-
5676  }
-
5677 
-
5678  private:
-
5679  VmaRawList<T>* m_pList;
-
5680  VmaListItem<T>* m_pItem;
-
5681 
-
5682  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
-
5683  m_pList(pList),
-
5684  m_pItem(pItem)
-
5685  {
-
5686  }
-
5687 
-
5688  friend class VmaList<T, AllocatorT>;
-
5689  };
-
5690 
-
5691  class const_iterator
-
5692  {
-
5693  public:
-
5694  const_iterator() :
-
5695  m_pList(VMA_NULL),
-
5696  m_pItem(VMA_NULL)
-
5697  {
-
5698  }
-
5699 
-
5700  const_iterator(const iterator& src) :
-
5701  m_pList(src.m_pList),
-
5702  m_pItem(src.m_pItem)
-
5703  {
-
5704  }
-
5705 
-
5706  const T& operator*() const
-
5707  {
-
5708  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5709  return m_pItem->Value;
-
5710  }
-
5711  const T* operator->() const
-
5712  {
-
5713  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5714  return &m_pItem->Value;
-
5715  }
-
5716 
-
5717  const_iterator& operator++()
-
5718  {
-
5719  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
-
5720  m_pItem = m_pItem->pNext;
-
5721  return *this;
-
5722  }
-
5723  const_iterator& operator--()
-
5724  {
-
5725  if(m_pItem != VMA_NULL)
-
5726  {
-
5727  m_pItem = m_pItem->pPrev;
-
5728  }
-
5729  else
-
5730  {
-
5731  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-
5732  m_pItem = m_pList->Back();
-
5733  }
-
5734  return *this;
+
5653  T* operator->() const
+
5654  {
+
5655  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5656  return &m_pItem->Value;
+
5657  }
+
5658 
+
5659  iterator& operator++()
+
5660  {
+
5661  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5662  m_pItem = m_pItem->pNext;
+
5663  return *this;
+
5664  }
+
5665  iterator& operator--()
+
5666  {
+
5667  if(m_pItem != VMA_NULL)
+
5668  {
+
5669  m_pItem = m_pItem->pPrev;
+
5670  }
+
5671  else
+
5672  {
+
5673  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+
5674  m_pItem = m_pList->Back();
+
5675  }
+
5676  return *this;
+
5677  }
+
5678 
+
5679  iterator operator++(int)
+
5680  {
+
5681  iterator result = *this;
+
5682  ++*this;
+
5683  return result;
+
5684  }
+
5685  iterator operator--(int)
+
5686  {
+
5687  iterator result = *this;
+
5688  --*this;
+
5689  return result;
+
5690  }
+
5691 
+
5692  bool operator==(const iterator& rhs) const
+
5693  {
+
5694  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+
5695  return m_pItem == rhs.m_pItem;
+
5696  }
+
5697  bool operator!=(const iterator& rhs) const
+
5698  {
+
5699  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+
5700  return m_pItem != rhs.m_pItem;
+
5701  }
+
5702 
+
5703  private:
+
5704  VmaRawList<T>* m_pList;
+
5705  VmaListItem<T>* m_pItem;
+
5706 
+
5707  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
+
5708  m_pList(pList),
+
5709  m_pItem(pItem)
+
5710  {
+
5711  }
+
5712 
+
5713  friend class VmaList<T, AllocatorT>;
+
5714  };
+
5715 
+
5716  class const_iterator
+
5717  {
+
5718  public:
+
5719  const_iterator() :
+
5720  m_pList(VMA_NULL),
+
5721  m_pItem(VMA_NULL)
+
5722  {
+
5723  }
+
5724 
+
5725  const_iterator(const iterator& src) :
+
5726  m_pList(src.m_pList),
+
5727  m_pItem(src.m_pItem)
+
5728  {
+
5729  }
+
5730 
+
5731  const T& operator*() const
+
5732  {
+
5733  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5734  return m_pItem->Value;
5735  }
-
5736 
-
5737  const_iterator operator++(int)
-
5738  {
-
5739  const_iterator result = *this;
-
5740  ++*this;
-
5741  return result;
-
5742  }
-
5743  const_iterator operator--(int)
-
5744  {
-
5745  const_iterator result = *this;
-
5746  --*this;
-
5747  return result;
-
5748  }
-
5749 
-
5750  bool operator==(const const_iterator& rhs) const
-
5751  {
-
5752  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
-
5753  return m_pItem == rhs.m_pItem;
-
5754  }
-
5755  bool operator!=(const const_iterator& rhs) const
-
5756  {
-
5757  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
-
5758  return m_pItem != rhs.m_pItem;
-
5759  }
-
5760 
-
5761  private:
-
5762  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
-
5763  m_pList(pList),
-
5764  m_pItem(pItem)
-
5765  {
-
5766  }
-
5767 
-
5768  const VmaRawList<T>* m_pList;
-
5769  const VmaListItem<T>* m_pItem;
-
5770 
-
5771  friend class VmaList<T, AllocatorT>;
-
5772  };
-
5773 
-
5774  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
-
5775 
-
5776  bool empty() const { return m_RawList.IsEmpty(); }
-
5777  size_t size() const { return m_RawList.GetCount(); }
-
5778 
-
5779  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
-
5780  iterator end() { return iterator(&m_RawList, VMA_NULL); }
-
5781 
-
5782  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
-
5783  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
-
5784 
-
5785  void clear() { m_RawList.Clear(); }
-
5786  void push_back(const T& value) { m_RawList.PushBack(value); }
-
5787  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
-
5788  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
-
5789 
-
5790 private:
-
5791  VmaRawList<T> m_RawList;
-
5792 };
-
5793 
-
5794 #endif // #if VMA_USE_STL_LIST
+
5736  const T* operator->() const
+
5737  {
+
5738  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5739  return &m_pItem->Value;
+
5740  }
+
5741 
+
5742  const_iterator& operator++()
+
5743  {
+
5744  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+
5745  m_pItem = m_pItem->pNext;
+
5746  return *this;
+
5747  }
+
5748  const_iterator& operator--()
+
5749  {
+
5750  if(m_pItem != VMA_NULL)
+
5751  {
+
5752  m_pItem = m_pItem->pPrev;
+
5753  }
+
5754  else
+
5755  {
+
5756  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+
5757  m_pItem = m_pList->Back();
+
5758  }
+
5759  return *this;
+
5760  }
+
5761 
+
5762  const_iterator operator++(int)
+
5763  {
+
5764  const_iterator result = *this;
+
5765  ++*this;
+
5766  return result;
+
5767  }
+
5768  const_iterator operator--(int)
+
5769  {
+
5770  const_iterator result = *this;
+
5771  --*this;
+
5772  return result;
+
5773  }
+
5774 
+
5775  bool operator==(const const_iterator& rhs) const
+
5776  {
+
5777  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+
5778  return m_pItem == rhs.m_pItem;
+
5779  }
+
5780  bool operator!=(const const_iterator& rhs) const
+
5781  {
+
5782  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+
5783  return m_pItem != rhs.m_pItem;
+
5784  }
+
5785 
+
5786  private:
+
5787  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
+
5788  m_pList(pList),
+
5789  m_pItem(pItem)
+
5790  {
+
5791  }
+
5792 
+
5793  const VmaRawList<T>* m_pList;
+
5794  const VmaListItem<T>* m_pItem;
5795 
-
5797 // class VmaMap
+
5796  friend class VmaList<T, AllocatorT>;
+
5797  };
5798 
-
5799 // Unused in this version.
-
5800 #if 0
-
5801 
-
5802 #if VMA_USE_STL_UNORDERED_MAP
+
5799  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
+
5800 
+
5801  bool empty() const { return m_RawList.IsEmpty(); }
+
5802  size_t size() const { return m_RawList.GetCount(); }
5803 
-
5804 #define VmaPair std::pair
-
5805 
-
5806 #define VMA_MAP_TYPE(KeyT, ValueT) \
-
5807  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
-
5808 
-
5809 #else // #if VMA_USE_STL_UNORDERED_MAP
-
5810 
-
5811 template<typename T1, typename T2>
-
5812 struct VmaPair
-
5813 {
-
5814  T1 first;
-
5815  T2 second;
-
5816 
-
5817  VmaPair() : first(), second() { }
-
5818  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
-
5819 };
+
5804  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
+
5805  iterator end() { return iterator(&m_RawList, VMA_NULL); }
+
5806 
+
5807  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
+
5808  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
+
5809 
+
5810  void clear() { m_RawList.Clear(); }
+
5811  void push_back(const T& value) { m_RawList.PushBack(value); }
+
5812  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
+
5813  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+
5814 
+
5815 private:
+
5816  VmaRawList<T> m_RawList;
+
5817 };
+
5818 
+
5819 #endif // #if VMA_USE_STL_LIST
5820 
-
5821 /* Class compatible with subset of interface of std::unordered_map.
-
5822 KeyT, ValueT must be POD because they will be stored in VmaVector.
-
5823 */
-
5824 template<typename KeyT, typename ValueT>
-
5825 class VmaMap
-
5826 {
-
5827 public:
-
5828  typedef VmaPair<KeyT, ValueT> PairType;
-
5829  typedef PairType* iterator;
+
5822 // class VmaMap
+
5823 
+
5824 // Unused in this version.
+
5825 #if 0
+
5826 
+
5827 #if VMA_USE_STL_UNORDERED_MAP
+
5828 
+
5829 #define VmaPair std::pair
5830 
-
5831  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
-
5832 
-
5833  iterator begin() { return m_Vector.begin(); }
-
5834  iterator end() { return m_Vector.end(); }
+
5831 #define VMA_MAP_TYPE(KeyT, ValueT) \
+
5832  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
+
5833 
+
5834 #else // #if VMA_USE_STL_UNORDERED_MAP
5835 
-
5836  void insert(const PairType& pair);
-
5837  iterator find(const KeyT& key);
-
5838  void erase(iterator it);
-
5839 
-
5840 private:
-
5841  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
-
5842 };
-
5843 
-
5844 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
+
5836 template<typename T1, typename T2>
+
5837 struct VmaPair
+
5838 {
+
5839  T1 first;
+
5840  T2 second;
+
5841 
+
5842  VmaPair() : first(), second() { }
+
5843  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
+
5844 };
5845 
-
5846 template<typename FirstT, typename SecondT>
-
5847 struct VmaPairFirstLess
-
5848 {
-
5849  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
-
5850  {
-
5851  return lhs.first < rhs.first;
-
5852  }
-
5853  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
-
5854  {
-
5855  return lhs.first < rhsFirst;
-
5856  }
-
5857 };
-
5858 
-
5859 template<typename KeyT, typename ValueT>
-
5860 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
-
5861 {
-
5862  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-
5863  m_Vector.data(),
-
5864  m_Vector.data() + m_Vector.size(),
-
5865  pair,
-
5866  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
-
5867  VmaVectorInsert(m_Vector, indexToInsert, pair);
-
5868 }
-
5869 
-
5870 template<typename KeyT, typename ValueT>
-
5871 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
-
5872 {
-
5873  PairType* it = VmaBinaryFindFirstNotLess(
-
5874  m_Vector.data(),
-
5875  m_Vector.data() + m_Vector.size(),
-
5876  key,
-
5877  VmaPairFirstLess<KeyT, ValueT>());
-
5878  if((it != m_Vector.end()) && (it->first == key))
-
5879  {
-
5880  return it;
+
5846 /* Class compatible with subset of interface of std::unordered_map.
+
5847 KeyT, ValueT must be POD because they will be stored in VmaVector.
+
5848 */
+
5849 template<typename KeyT, typename ValueT>
+
5850 class VmaMap
+
5851 {
+
5852 public:
+
5853  typedef VmaPair<KeyT, ValueT> PairType;
+
5854  typedef PairType* iterator;
+
5855 
+
5856  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
+
5857 
+
5858  iterator begin() { return m_Vector.begin(); }
+
5859  iterator end() { return m_Vector.end(); }
+
5860 
+
5861  void insert(const PairType& pair);
+
5862  iterator find(const KeyT& key);
+
5863  void erase(iterator it);
+
5864 
+
5865 private:
+
5866  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
+
5867 };
+
5868 
+
5869 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
+
5870 
+
5871 template<typename FirstT, typename SecondT>
+
5872 struct VmaPairFirstLess
+
5873 {
+
5874  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
+
5875  {
+
5876  return lhs.first < rhs.first;
+
5877  }
+
5878  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
+
5879  {
+
5880  return lhs.first < rhsFirst;
5881  }
-
5882  else
-
5883  {
-
5884  return m_Vector.end();
-
5885  }
-
5886 }
-
5887 
-
5888 template<typename KeyT, typename ValueT>
-
5889 void VmaMap<KeyT, ValueT>::erase(iterator it)
-
5890 {
-
5891  VmaVectorRemove(m_Vector, it - m_Vector.begin());
-
5892 }
-
5893 
-
5894 #endif // #if VMA_USE_STL_UNORDERED_MAP
-
5895 
-
5896 #endif // #if 0
-
5897 
-
5899 
-
5900 class VmaDeviceMemoryBlock;
-
5901 
-
5902 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
-
5903 
-
5904 struct VmaAllocation_T
-
5905 {
-
5906 private:
-
5907  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
-
5908 
-
5909  enum FLAGS
-
5910  {
-
5911  FLAG_USER_DATA_STRING = 0x01,
-
5912  };
-
5913 
-
5914 public:
-
5915  enum ALLOCATION_TYPE
-
5916  {
-
5917  ALLOCATION_TYPE_NONE,
-
5918  ALLOCATION_TYPE_BLOCK,
-
5919  ALLOCATION_TYPE_DEDICATED,
-
5920  };
-
5921 
-
5922  /*
-
5923  This struct is allocated using VmaPoolAllocator.
-
5924  */
-
5925 
-
5926  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
-
5927  m_Alignment{1},
-
5928  m_Size{0},
-
5929  m_pUserData{VMA_NULL},
-
5930  m_LastUseFrameIndex{currentFrameIndex},
-
5931  m_MemoryTypeIndex{0},
-
5932  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
-
5933  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
-
5934  m_MapCount{0},
-
5935  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
-
5936  {
-
5937 #if VMA_STATS_STRING_ENABLED
-
5938  m_CreationFrameIndex = currentFrameIndex;
-
5939  m_BufferImageUsage = 0;
-
5940 #endif
-
5941  }
-
5942 
-
5943  ~VmaAllocation_T()
-
5944  {
-
5945  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
+
5882 };
+
5883 
+
5884 template<typename KeyT, typename ValueT>
+
5885 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
+
5886 {
+
5887  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+
5888  m_Vector.data(),
+
5889  m_Vector.data() + m_Vector.size(),
+
5890  pair,
+
5891  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
+
5892  VmaVectorInsert(m_Vector, indexToInsert, pair);
+
5893 }
+
5894 
+
5895 template<typename KeyT, typename ValueT>
+
5896 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
+
5897 {
+
5898  PairType* it = VmaBinaryFindFirstNotLess(
+
5899  m_Vector.data(),
+
5900  m_Vector.data() + m_Vector.size(),
+
5901  key,
+
5902  VmaPairFirstLess<KeyT, ValueT>());
+
5903  if((it != m_Vector.end()) && (it->first == key))
+
5904  {
+
5905  return it;
+
5906  }
+
5907  else
+
5908  {
+
5909  return m_Vector.end();
+
5910  }
+
5911 }
+
5912 
+
5913 template<typename KeyT, typename ValueT>
+
5914 void VmaMap<KeyT, ValueT>::erase(iterator it)
+
5915 {
+
5916  VmaVectorRemove(m_Vector, it - m_Vector.begin());
+
5917 }
+
5918 
+
5919 #endif // #if VMA_USE_STL_UNORDERED_MAP
+
5920 
+
5921 #endif // #if 0
+
5922 
+
5924 
+
5925 class VmaDeviceMemoryBlock;
+
5926 
+
5927 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
+
5928 
+
5929 struct VmaAllocation_T
+
5930 {
+
5931 private:
+
5932  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
+
5933 
+
5934  enum FLAGS
+
5935  {
+
5936  FLAG_USER_DATA_STRING = 0x01,
+
5937  };
+
5938 
+
5939 public:
+
5940  enum ALLOCATION_TYPE
+
5941  {
+
5942  ALLOCATION_TYPE_NONE,
+
5943  ALLOCATION_TYPE_BLOCK,
+
5944  ALLOCATION_TYPE_DEDICATED,
+
5945  };
5946 
-
5947  // Check if owned string was freed.
-
5948  VMA_ASSERT(m_pUserData == VMA_NULL);
-
5949  }
+
5947  /*
+
5948  This struct is allocated using VmaPoolAllocator.
+
5949  */
5950 
-
5951  void InitBlockAllocation(
-
5952  VmaDeviceMemoryBlock* block,
-
5953  VkDeviceSize offset,
-
5954  VkDeviceSize alignment,
-
5955  VkDeviceSize size,
-
5956  uint32_t memoryTypeIndex,
-
5957  VmaSuballocationType suballocationType,
-
5958  bool mapped,
-
5959  bool canBecomeLost)
-
5960  {
-
5961  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-
5962  VMA_ASSERT(block != VMA_NULL);
-
5963  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
-
5964  m_Alignment = alignment;
-
5965  m_Size = size;
-
5966  m_MemoryTypeIndex = memoryTypeIndex;
-
5967  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
-
5968  m_SuballocationType = (uint8_t)suballocationType;
-
5969  m_BlockAllocation.m_Block = block;
-
5970  m_BlockAllocation.m_Offset = offset;
-
5971  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
-
5972  }
-
5973 
-
5974  void InitLost()
-
5975  {
-
5976  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-
5977  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
-
5978  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
-
5979  m_MemoryTypeIndex = 0;
-
5980  m_BlockAllocation.m_Block = VMA_NULL;
-
5981  m_BlockAllocation.m_Offset = 0;
-
5982  m_BlockAllocation.m_CanBecomeLost = true;
-
5983  }
-
5984 
-
5985  void ChangeBlockAllocation(
-
5986  VmaAllocator hAllocator,
-
5987  VmaDeviceMemoryBlock* block,
-
5988  VkDeviceSize offset);
-
5989 
-
5990  void ChangeOffset(VkDeviceSize newOffset);
-
5991 
-
5992  // pMappedData not null means allocation is created with MAPPED flag.
-
5993  void InitDedicatedAllocation(
-
5994  uint32_t memoryTypeIndex,
-
5995  VkDeviceMemory hMemory,
-
5996  VmaSuballocationType suballocationType,
-
5997  void* pMappedData,
-
5998  VkDeviceSize size)
-
5999  {
-
6000  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-
6001  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
-
6002  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
-
6003  m_Alignment = 0;
-
6004  m_Size = size;
-
6005  m_MemoryTypeIndex = memoryTypeIndex;
-
6006  m_SuballocationType = (uint8_t)suballocationType;
-
6007  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
-
6008  m_DedicatedAllocation.m_hMemory = hMemory;
-
6009  m_DedicatedAllocation.m_pMappedData = pMappedData;
-
6010  }
-
6011 
-
6012  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
-
6013  VkDeviceSize GetAlignment() const { return m_Alignment; }
-
6014  VkDeviceSize GetSize() const { return m_Size; }
-
6015  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
-
6016  void* GetUserData() const { return m_pUserData; }
-
6017  void SetUserData(VmaAllocator hAllocator, void* pUserData);
-
6018  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
-
6019 
-
6020  VmaDeviceMemoryBlock* GetBlock() const
-
6021  {
-
6022  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-
6023  return m_BlockAllocation.m_Block;
-
6024  }
-
6025  VkDeviceSize GetOffset() const;
-
6026  VkDeviceMemory GetMemory() const;
-
6027  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-
6028  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
-
6029  void* GetMappedData() const;
-
6030  bool CanBecomeLost() const;
-
6031 
-
6032  uint32_t GetLastUseFrameIndex() const
-
6033  {
-
6034  return m_LastUseFrameIndex.load();
+
5951  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
+
5952  m_Alignment{1},
+
5953  m_Size{0},
+
5954  m_pUserData{VMA_NULL},
+
5955  m_LastUseFrameIndex{currentFrameIndex},
+
5956  m_MemoryTypeIndex{0},
+
5957  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
+
5958  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
+
5959  m_MapCount{0},
+
5960  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
+
5961  {
+
5962 #if VMA_STATS_STRING_ENABLED
+
5963  m_CreationFrameIndex = currentFrameIndex;
+
5964  m_BufferImageUsage = 0;
+
5965 #endif
+
5966  }
+
5967 
+
5968  ~VmaAllocation_T()
+
5969  {
+
5970  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
+
5971 
+
5972  // Check if owned string was freed.
+
5973  VMA_ASSERT(m_pUserData == VMA_NULL);
+
5974  }
+
5975 
+
5976  void InitBlockAllocation(
+
5977  VmaDeviceMemoryBlock* block,
+
5978  VkDeviceSize offset,
+
5979  VkDeviceSize alignment,
+
5980  VkDeviceSize size,
+
5981  uint32_t memoryTypeIndex,
+
5982  VmaSuballocationType suballocationType,
+
5983  bool mapped,
+
5984  bool canBecomeLost)
+
5985  {
+
5986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+
5987  VMA_ASSERT(block != VMA_NULL);
+
5988  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+
5989  m_Alignment = alignment;
+
5990  m_Size = size;
+
5991  m_MemoryTypeIndex = memoryTypeIndex;
+
5992  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+
5993  m_SuballocationType = (uint8_t)suballocationType;
+
5994  m_BlockAllocation.m_Block = block;
+
5995  m_BlockAllocation.m_Offset = offset;
+
5996  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
+
5997  }
+
5998 
+
5999  void InitLost()
+
6000  {
+
6001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+
6002  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
+
6003  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+
6004  m_MemoryTypeIndex = 0;
+
6005  m_BlockAllocation.m_Block = VMA_NULL;
+
6006  m_BlockAllocation.m_Offset = 0;
+
6007  m_BlockAllocation.m_CanBecomeLost = true;
+
6008  }
+
6009 
+
6010  void ChangeBlockAllocation(
+
6011  VmaAllocator hAllocator,
+
6012  VmaDeviceMemoryBlock* block,
+
6013  VkDeviceSize offset);
+
6014 
+
6015  void ChangeOffset(VkDeviceSize newOffset);
+
6016 
+
6017  // pMappedData not null means allocation is created with MAPPED flag.
+
6018  void InitDedicatedAllocation(
+
6019  uint32_t memoryTypeIndex,
+
6020  VkDeviceMemory hMemory,
+
6021  VmaSuballocationType suballocationType,
+
6022  void* pMappedData,
+
6023  VkDeviceSize size)
+
6024  {
+
6025  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+
6026  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
+
6027  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
+
6028  m_Alignment = 0;
+
6029  m_Size = size;
+
6030  m_MemoryTypeIndex = memoryTypeIndex;
+
6031  m_SuballocationType = (uint8_t)suballocationType;
+
6032  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+
6033  m_DedicatedAllocation.m_hMemory = hMemory;
+
6034  m_DedicatedAllocation.m_pMappedData = pMappedData;
6035  }
-
6036  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
-
6037  {
-
6038  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
-
6039  }
-
6040  /*
-
6041  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
-
6042  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
-
6043  - Else, returns false.
-
6044 
-
6045  If hAllocation is already lost, assert - you should not call it then.
-
6046  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
-
6047  */
-
6048  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
6049 
-
6050  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
-
6051  {
-
6052  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
-
6053  outInfo.blockCount = 1;
-
6054  outInfo.allocationCount = 1;
-
6055  outInfo.unusedRangeCount = 0;
-
6056  outInfo.usedBytes = m_Size;
-
6057  outInfo.unusedBytes = 0;
-
6058  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
-
6059  outInfo.unusedRangeSizeMin = UINT64_MAX;
-
6060  outInfo.unusedRangeSizeMax = 0;
-
6061  }
-
6062 
-
6063  void BlockAllocMap();
-
6064  void BlockAllocUnmap();
-
6065  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
-
6066  void DedicatedAllocUnmap(VmaAllocator hAllocator);
-
6067 
-
6068 #if VMA_STATS_STRING_ENABLED
-
6069  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
-
6070  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
-
6071 
-
6072  void InitBufferImageUsage(uint32_t bufferImageUsage)
-
6073  {
-
6074  VMA_ASSERT(m_BufferImageUsage == 0);
-
6075  m_BufferImageUsage = bufferImageUsage;
-
6076  }
-
6077 
-
6078  void PrintParameters(class VmaJsonWriter& json) const;
-
6079 #endif
-
6080 
-
6081 private:
-
6082  VkDeviceSize m_Alignment;
-
6083  VkDeviceSize m_Size;
-
6084  void* m_pUserData;
-
6085  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
-
6086  uint32_t m_MemoryTypeIndex;
-
6087  uint8_t m_Type; // ALLOCATION_TYPE
-
6088  uint8_t m_SuballocationType; // VmaSuballocationType
-
6089  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
-
6090  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
-
6091  uint8_t m_MapCount;
-
6092  uint8_t m_Flags; // enum FLAGS
-
6093 
-
6094  // Allocation out of VmaDeviceMemoryBlock.
-
6095  struct BlockAllocation
-
6096  {
-
6097  VmaDeviceMemoryBlock* m_Block;
-
6098  VkDeviceSize m_Offset;
-
6099  bool m_CanBecomeLost;
-
6100  };
-
6101 
-
6102  // Allocation for an object that has its own private VkDeviceMemory.
-
6103  struct DedicatedAllocation
-
6104  {
-
6105  VkDeviceMemory m_hMemory;
-
6106  void* m_pMappedData; // Not null means memory is mapped.
-
6107  };
-
6108 
-
6109  union
-
6110  {
-
6111  // Allocation out of VmaDeviceMemoryBlock.
-
6112  BlockAllocation m_BlockAllocation;
-
6113  // Allocation for an object that has its own private VkDeviceMemory.
-
6114  DedicatedAllocation m_DedicatedAllocation;
-
6115  };
-
6116 
-
6117 #if VMA_STATS_STRING_ENABLED
-
6118  uint32_t m_CreationFrameIndex;
-
6119  uint32_t m_BufferImageUsage; // 0 if unknown.
-
6120 #endif
-
6121 
-
6122  void FreeUserDataString(VmaAllocator hAllocator);
-
6123 };
-
6124 
-
6125 /*
-
6126 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
-
6127 allocated memory block or free.
-
6128 */
-
6129 struct VmaSuballocation
-
6130 {
-
6131  VkDeviceSize offset;
-
6132  VkDeviceSize size;
-
6133  VmaAllocation hAllocation;
-
6134  VmaSuballocationType type;
-
6135 };
-
6136 
-
6137 // Comparator for offsets.
-
6138 struct VmaSuballocationOffsetLess
-
6139 {
-
6140  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-
6141  {
-
6142  return lhs.offset < rhs.offset;
-
6143  }
-
6144 };
-
6145 struct VmaSuballocationOffsetGreater
-
6146 {
-
6147  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-
6148  {
-
6149  return lhs.offset > rhs.offset;
-
6150  }
-
6151 };
-
6152 
-
6153 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
-
6154 
-
6155 // Cost of one additional allocation lost, as equivalent in bytes.
-
6156 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
-
6157 
-
6158 enum class VmaAllocationRequestType
-
6159 {
-
6160  Normal,
-
6161  // Used by "Linear" algorithm.
-
6162  UpperAddress,
-
6163  EndOf1st,
-
6164  EndOf2nd,
-
6165 };
-
6166 
-
6167 /*
-
6168 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
-
6169 
-
6170 If canMakeOtherLost was false:
-
6171 - item points to a FREE suballocation.
-
6172 - itemsToMakeLostCount is 0.
-
6173 
-
6174 If canMakeOtherLost was true:
-
6175 - item points to first of sequence of suballocations, which are either FREE,
-
6176  or point to VmaAllocations that can become lost.
-
6177 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
-
6178  the requested allocation to succeed.
-
6179 */
-
6180 struct VmaAllocationRequest
-
6181 {
-
6182  VkDeviceSize offset;
-
6183  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
-
6184  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
-
6185  VmaSuballocationList::iterator item;
-
6186  size_t itemsToMakeLostCount;
-
6187  void* customData;
-
6188  VmaAllocationRequestType type;
-
6189 
-
6190  VkDeviceSize CalcCost() const
-
6191  {
-
6192  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
-
6193  }
-
6194 };
-
6195 
-
6196 /*
-
6197 Data structure used for bookkeeping of allocations and unused ranges of memory
-
6198 in a single VkDeviceMemory block.
-
6199 */
-
6200 class VmaBlockMetadata
-
6201 {
-
6202 public:
-
6203  VmaBlockMetadata(VmaAllocator hAllocator);
-
6204  virtual ~VmaBlockMetadata() { }
-
6205  virtual void Init(VkDeviceSize size) { m_Size = size; }
-
6206 
-
6207  // Validates all data structures inside this object. If not valid, returns false.
-
6208  virtual bool Validate() const = 0;
-
6209  VkDeviceSize GetSize() const { return m_Size; }
-
6210  virtual size_t GetAllocationCount() const = 0;
-
6211  virtual VkDeviceSize GetSumFreeSize() const = 0;
-
6212  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
-
6213  // Returns true if this block is empty - contains only single free suballocation.
-
6214  virtual bool IsEmpty() const = 0;
-
6215 
-
6216  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
-
6217  // Shouldn't modify blockCount.
-
6218  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
-
6219 
-
6220 #if VMA_STATS_STRING_ENABLED
-
6221  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
-
6222 #endif
-
6223 
-
6224  // Tries to find a place for suballocation with given parameters inside this block.
-
6225  // If succeeded, fills pAllocationRequest and returns true.
-
6226  // If failed, returns false.
-
6227  virtual bool CreateAllocationRequest(
-
6228  uint32_t currentFrameIndex,
-
6229  uint32_t frameInUseCount,
-
6230  VkDeviceSize bufferImageGranularity,
-
6231  VkDeviceSize allocSize,
-
6232  VkDeviceSize allocAlignment,
-
6233  bool upperAddress,
-
6234  VmaSuballocationType allocType,
-
6235  bool canMakeOtherLost,
-
6236  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
-
6237  uint32_t strategy,
-
6238  VmaAllocationRequest* pAllocationRequest) = 0;
-
6239 
-
6240  virtual bool MakeRequestedAllocationsLost(
-
6241  uint32_t currentFrameIndex,
-
6242  uint32_t frameInUseCount,
-
6243  VmaAllocationRequest* pAllocationRequest) = 0;
+
6036 
+
6037  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
+
6038  VkDeviceSize GetAlignment() const { return m_Alignment; }
+
6039  VkDeviceSize GetSize() const { return m_Size; }
+
6040  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
+
6041  void* GetUserData() const { return m_pUserData; }
+
6042  void SetUserData(VmaAllocator hAllocator, void* pUserData);
+
6043  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
+
6044 
+
6045  VmaDeviceMemoryBlock* GetBlock() const
+
6046  {
+
6047  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+
6048  return m_BlockAllocation.m_Block;
+
6049  }
+
6050  VkDeviceSize GetOffset() const;
+
6051  VkDeviceMemory GetMemory() const;
+
6052  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+
6053  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
+
6054  void* GetMappedData() const;
+
6055  bool CanBecomeLost() const;
+
6056 
+
6057  uint32_t GetLastUseFrameIndex() const
+
6058  {
+
6059  return m_LastUseFrameIndex.load();
+
6060  }
+
6061  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
+
6062  {
+
6063  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
+
6064  }
+
6065  /*
+
6066  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
+
6067  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
+
6068  - Else, returns false.
+
6069 
+
6070  If hAllocation is already lost, assert - you should not call it then.
+
6071  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
+
6072  */
+
6073  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
6074 
+
6075  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
+
6076  {
+
6077  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
+
6078  outInfo.blockCount = 1;
+
6079  outInfo.allocationCount = 1;
+
6080  outInfo.unusedRangeCount = 0;
+
6081  outInfo.usedBytes = m_Size;
+
6082  outInfo.unusedBytes = 0;
+
6083  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
+
6084  outInfo.unusedRangeSizeMin = UINT64_MAX;
+
6085  outInfo.unusedRangeSizeMax = 0;
+
6086  }
+
6087 
+
6088  void BlockAllocMap();
+
6089  void BlockAllocUnmap();
+
6090  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
+
6091  void DedicatedAllocUnmap(VmaAllocator hAllocator);
+
6092 
+
6093 #if VMA_STATS_STRING_ENABLED
+
6094  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
+
6095  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
+
6096 
+
6097  void InitBufferImageUsage(uint32_t bufferImageUsage)
+
6098  {
+
6099  VMA_ASSERT(m_BufferImageUsage == 0);
+
6100  m_BufferImageUsage = bufferImageUsage;
+
6101  }
+
6102 
+
6103  void PrintParameters(class VmaJsonWriter& json) const;
+
6104 #endif
+
6105 
+
6106 private:
+
6107  VkDeviceSize m_Alignment;
+
6108  VkDeviceSize m_Size;
+
6109  void* m_pUserData;
+
6110  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
+
6111  uint32_t m_MemoryTypeIndex;
+
6112  uint8_t m_Type; // ALLOCATION_TYPE
+
6113  uint8_t m_SuballocationType; // VmaSuballocationType
+
6114  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
+
6115  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
+
6116  uint8_t m_MapCount;
+
6117  uint8_t m_Flags; // enum FLAGS
+
6118 
+
6119  // Allocation out of VmaDeviceMemoryBlock.
+
6120  struct BlockAllocation
+
6121  {
+
6122  VmaDeviceMemoryBlock* m_Block;
+
6123  VkDeviceSize m_Offset;
+
6124  bool m_CanBecomeLost;
+
6125  };
+
6126 
+
6127  // Allocation for an object that has its own private VkDeviceMemory.
+
6128  struct DedicatedAllocation
+
6129  {
+
6130  VkDeviceMemory m_hMemory;
+
6131  void* m_pMappedData; // Not null means memory is mapped.
+
6132  };
+
6133 
+
6134  union
+
6135  {
+
6136  // Allocation out of VmaDeviceMemoryBlock.
+
6137  BlockAllocation m_BlockAllocation;
+
6138  // Allocation for an object that has its own private VkDeviceMemory.
+
6139  DedicatedAllocation m_DedicatedAllocation;
+
6140  };
+
6141 
+
6142 #if VMA_STATS_STRING_ENABLED
+
6143  uint32_t m_CreationFrameIndex;
+
6144  uint32_t m_BufferImageUsage; // 0 if unknown.
+
6145 #endif
+
6146 
+
6147  void FreeUserDataString(VmaAllocator hAllocator);
+
6148 };
+
6149 
+
6150 /*
+
6151 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
+
6152 allocated memory block or free.
+
6153 */
+
6154 struct VmaSuballocation
+
6155 {
+
6156  VkDeviceSize offset;
+
6157  VkDeviceSize size;
+
6158  VmaAllocation hAllocation;
+
6159  VmaSuballocationType type;
+
6160 };
+
6161 
+
6162 // Comparator for offsets.
+
6163 struct VmaSuballocationOffsetLess
+
6164 {
+
6165  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
+
6166  {
+
6167  return lhs.offset < rhs.offset;
+
6168  }
+
6169 };
+
6170 struct VmaSuballocationOffsetGreater
+
6171 {
+
6172  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
+
6173  {
+
6174  return lhs.offset > rhs.offset;
+
6175  }
+
6176 };
+
6177 
+
6178 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
+
6179 
+
6180 // Cost of one additional allocation lost, as equivalent in bytes.
+
6181 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
+
6182 
+
6183 enum class VmaAllocationRequestType
+
6184 {
+
6185  Normal,
+
6186  // Used by "Linear" algorithm.
+
6187  UpperAddress,
+
6188  EndOf1st,
+
6189  EndOf2nd,
+
6190 };
+
6191 
+
6192 /*
+
6193 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
+
6194 
+
6195 If canMakeOtherLost was false:
+
6196 - item points to a FREE suballocation.
+
6197 - itemsToMakeLostCount is 0.
+
6198 
+
6199 If canMakeOtherLost was true:
+
6200 - item points to first of sequence of suballocations, which are either FREE,
+
6201  or point to VmaAllocations that can become lost.
+
6202 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
+
6203  the requested allocation to succeed.
+
6204 */
+
6205 struct VmaAllocationRequest
+
6206 {
+
6207  VkDeviceSize offset;
+
6208  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
+
6209  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
+
6210  VmaSuballocationList::iterator item;
+
6211  size_t itemsToMakeLostCount;
+
6212  void* customData;
+
6213  VmaAllocationRequestType type;
+
6214 
+
6215  VkDeviceSize CalcCost() const
+
6216  {
+
6217  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
+
6218  }
+
6219 };
+
6220 
+
6221 /*
+
6222 Data structure used for bookkeeping of allocations and unused ranges of memory
+
6223 in a single VkDeviceMemory block.
+
6224 */
+
6225 class VmaBlockMetadata
+
6226 {
+
6227 public:
+
6228  VmaBlockMetadata(VmaAllocator hAllocator);
+
6229  virtual ~VmaBlockMetadata() { }
+
6230  virtual void Init(VkDeviceSize size) { m_Size = size; }
+
6231 
+
6232  // Validates all data structures inside this object. If not valid, returns false.
+
6233  virtual bool Validate() const = 0;
+
6234  VkDeviceSize GetSize() const { return m_Size; }
+
6235  virtual size_t GetAllocationCount() const = 0;
+
6236  virtual VkDeviceSize GetSumFreeSize() const = 0;
+
6237  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
+
6238  // Returns true if this block is empty - contains only single free suballocation.
+
6239  virtual bool IsEmpty() const = 0;
+
6240 
+
6241  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
+
6242  // Shouldn't modify blockCount.
+
6243  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6244 
-
6245  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
-
6246 
-
6247  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
+
6245 #if VMA_STATS_STRING_ENABLED
+
6246  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
+
6247 #endif
6248 
-
6249  // Makes actual allocation based on request. Request must already be checked and valid.
-
6250  virtual void Alloc(
-
6251  const VmaAllocationRequest& request,
-
6252  VmaSuballocationType type,
-
6253  VkDeviceSize allocSize,
-
6254  VmaAllocation hAllocation) = 0;
-
6255 
-
6256  // Frees suballocation assigned to given memory region.
-
6257  virtual void Free(const VmaAllocation allocation) = 0;
-
6258  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
-
6259 
-
6260 protected:
-
6261  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
-
6262 
-
6263 #if VMA_STATS_STRING_ENABLED
-
6264  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
-
6265  VkDeviceSize unusedBytes,
-
6266  size_t allocationCount,
-
6267  size_t unusedRangeCount) const;
-
6268  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-
6269  VkDeviceSize offset,
-
6270  VmaAllocation hAllocation) const;
-
6271  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-
6272  VkDeviceSize offset,
-
6273  VkDeviceSize size) const;
-
6274  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
-
6275 #endif
-
6276 
-
6277 private:
-
6278  VkDeviceSize m_Size;
-
6279  const VkAllocationCallbacks* m_pAllocationCallbacks;
-
6280 };
-
6281 
-
6282 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
-
6283  VMA_ASSERT(0 && "Validation failed: " #cond); \
-
6284  return false; \
-
6285  } } while(false)
-
6286 
-
6287 class VmaBlockMetadata_Generic : public VmaBlockMetadata
-
6288 {
-
6289  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
-
6290 public:
-
6291  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
-
6292  virtual ~VmaBlockMetadata_Generic();
-
6293  virtual void Init(VkDeviceSize size);
-
6294 
-
6295  virtual bool Validate() const;
-
6296  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
-
6297  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
-
6298  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
-
6299  virtual bool IsEmpty() const;
-
6300 
-
6301  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
-
6302  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
-
6303 
-
6304 #if VMA_STATS_STRING_ENABLED
-
6305  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
-
6306 #endif
-
6307 
-
6308  virtual bool CreateAllocationRequest(
-
6309  uint32_t currentFrameIndex,
-
6310  uint32_t frameInUseCount,
-
6311  VkDeviceSize bufferImageGranularity,
-
6312  VkDeviceSize allocSize,
-
6313  VkDeviceSize allocAlignment,
-
6314  bool upperAddress,
-
6315  VmaSuballocationType allocType,
-
6316  bool canMakeOtherLost,
-
6317  uint32_t strategy,
-
6318  VmaAllocationRequest* pAllocationRequest);
+
6249  // Tries to find a place for suballocation with given parameters inside this block.
+
6250  // If succeeded, fills pAllocationRequest and returns true.
+
6251  // If failed, returns false.
+
6252  virtual bool CreateAllocationRequest(
+
6253  uint32_t currentFrameIndex,
+
6254  uint32_t frameInUseCount,
+
6255  VkDeviceSize bufferImageGranularity,
+
6256  VkDeviceSize allocSize,
+
6257  VkDeviceSize allocAlignment,
+
6258  bool upperAddress,
+
6259  VmaSuballocationType allocType,
+
6260  bool canMakeOtherLost,
+
6261  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
+
6262  uint32_t strategy,
+
6263  VmaAllocationRequest* pAllocationRequest) = 0;
+
6264 
+
6265  virtual bool MakeRequestedAllocationsLost(
+
6266  uint32_t currentFrameIndex,
+
6267  uint32_t frameInUseCount,
+
6268  VmaAllocationRequest* pAllocationRequest) = 0;
+
6269 
+
6270  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
+
6271 
+
6272  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
+
6273 
+
6274  // Makes actual allocation based on request. Request must already be checked and valid.
+
6275  virtual void Alloc(
+
6276  const VmaAllocationRequest& request,
+
6277  VmaSuballocationType type,
+
6278  VkDeviceSize allocSize,
+
6279  VmaAllocation hAllocation) = 0;
+
6280 
+
6281  // Frees suballocation assigned to given memory region.
+
6282  virtual void Free(const VmaAllocation allocation) = 0;
+
6283  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
+
6284 
+
6285 protected:
+
6286  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
+
6287 
+
6288 #if VMA_STATS_STRING_ENABLED
+
6289  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
+
6290  VkDeviceSize unusedBytes,
+
6291  size_t allocationCount,
+
6292  size_t unusedRangeCount) const;
+
6293  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
+
6294  VkDeviceSize offset,
+
6295  VmaAllocation hAllocation) const;
+
6296  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
+
6297  VkDeviceSize offset,
+
6298  VkDeviceSize size) const;
+
6299  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
+
6300 #endif
+
6301 
+
6302 private:
+
6303  VkDeviceSize m_Size;
+
6304  const VkAllocationCallbacks* m_pAllocationCallbacks;
+
6305 };
+
6306 
+
6307 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
+
6308  VMA_ASSERT(0 && "Validation failed: " #cond); \
+
6309  return false; \
+
6310  } } while(false)
+
6311 
+
6312 class VmaBlockMetadata_Generic : public VmaBlockMetadata
+
6313 {
+
6314  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
+
6315 public:
+
6316  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
+
6317  virtual ~VmaBlockMetadata_Generic();
+
6318  virtual void Init(VkDeviceSize size);
6319 
-
6320  virtual bool MakeRequestedAllocationsLost(
-
6321  uint32_t currentFrameIndex,
-
6322  uint32_t frameInUseCount,
-
6323  VmaAllocationRequest* pAllocationRequest);
-
6324 
-
6325  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
6326 
-
6327  virtual VkResult CheckCorruption(const void* pBlockData);
+
6320  virtual bool Validate() const;
+
6321  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
+
6322  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
+
6323  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+
6324  virtual bool IsEmpty() const;
+
6325 
+
6326  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
+
6327  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6328 
-
6329  virtual void Alloc(
-
6330  const VmaAllocationRequest& request,
-
6331  VmaSuballocationType type,
-
6332  VkDeviceSize allocSize,
-
6333  VmaAllocation hAllocation);
-
6334 
-
6335  virtual void Free(const VmaAllocation allocation);
-
6336  virtual void FreeAtOffset(VkDeviceSize offset);
-
6337 
-
6339  // For defragmentation
-
6340 
-
6341  bool IsBufferImageGranularityConflictPossible(
-
6342  VkDeviceSize bufferImageGranularity,
-
6343  VmaSuballocationType& inOutPrevSuballocType) const;
+
6329 #if VMA_STATS_STRING_ENABLED
+
6330  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
+
6331 #endif
+
6332 
+
6333  virtual bool CreateAllocationRequest(
+
6334  uint32_t currentFrameIndex,
+
6335  uint32_t frameInUseCount,
+
6336  VkDeviceSize bufferImageGranularity,
+
6337  VkDeviceSize allocSize,
+
6338  VkDeviceSize allocAlignment,
+
6339  bool upperAddress,
+
6340  VmaSuballocationType allocType,
+
6341  bool canMakeOtherLost,
+
6342  uint32_t strategy,
+
6343  VmaAllocationRequest* pAllocationRequest);
6344 
-
6345 private:
-
6346  friend class VmaDefragmentationAlgorithm_Generic;
-
6347  friend class VmaDefragmentationAlgorithm_Fast;
-
6348 
-
6349  uint32_t m_FreeCount;
-
6350  VkDeviceSize m_SumFreeSize;
-
6351  VmaSuballocationList m_Suballocations;
-
6352  // Suballocations that are free and have size greater than certain threshold.
-
6353  // Sorted by size, ascending.
-
6354  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
-
6355 
-
6356  bool ValidateFreeSuballocationList() const;
-
6357 
-
6358  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
-
6359  // If yes, fills pOffset and returns true. If no, returns false.
-
6360  bool CheckAllocation(
-
6361  uint32_t currentFrameIndex,
-
6362  uint32_t frameInUseCount,
-
6363  VkDeviceSize bufferImageGranularity,
-
6364  VkDeviceSize allocSize,
-
6365  VkDeviceSize allocAlignment,
-
6366  VmaSuballocationType allocType,
-
6367  VmaSuballocationList::const_iterator suballocItem,
-
6368  bool canMakeOtherLost,
-
6369  VkDeviceSize* pOffset,
-
6370  size_t* itemsToMakeLostCount,
-
6371  VkDeviceSize* pSumFreeSize,
-
6372  VkDeviceSize* pSumItemSize) const;
-
6373  // Given free suballocation, it merges it with following one, which must also be free.
-
6374  void MergeFreeWithNext(VmaSuballocationList::iterator item);
-
6375  // Releases given suballocation, making it free.
-
6376  // Merges it with adjacent free suballocations if applicable.
-
6377  // Returns iterator to new free suballocation at this place.
-
6378  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
-
6379  // Given free suballocation, it inserts it into sorted list of
-
6380  // m_FreeSuballocationsBySize if it's suitable.
-
6381  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
-
6382  // Given free suballocation, it removes it from sorted list of
-
6383  // m_FreeSuballocationsBySize if it's suitable.
-
6384  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
-
6385 };
-
6386 
-
6387 /*
-
6388 Allocations and their references in internal data structure look like this:
-
6389 
-
6390 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
-
6391 
-
6392  0 +-------+
-
6393  | |
-
6394  | |
-
6395  | |
-
6396  +-------+
-
6397  | Alloc | 1st[m_1stNullItemsBeginCount]
-
6398  +-------+
-
6399  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-
6400  +-------+
-
6401  | ... |
-
6402  +-------+
-
6403  | Alloc | 1st[1st.size() - 1]
-
6404  +-------+
-
6405  | |
-
6406  | |
-
6407  | |
-
6408 GetSize() +-------+
-
6409 
-
6410 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
-
6411 
-
6412  0 +-------+
-
6413  | Alloc | 2nd[0]
-
6414  +-------+
-
6415  | Alloc | 2nd[1]
-
6416  +-------+
-
6417  | ... |
-
6418  +-------+
-
6419  | Alloc | 2nd[2nd.size() - 1]
-
6420  +-------+
-
6421  | |
-
6422  | |
-
6423  | |
-
6424  +-------+
-
6425  | Alloc | 1st[m_1stNullItemsBeginCount]
-
6426  +-------+
-
6427  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-
6428  +-------+
-
6429  | ... |
-
6430  +-------+
-
6431  | Alloc | 1st[1st.size() - 1]
-
6432  +-------+
-
6433  | |
-
6434 GetSize() +-------+
-
6435 
-
6436 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
-
6437 
-
6438  0 +-------+
-
6439  | |
-
6440  | |
-
6441  | |
-
6442  +-------+
-
6443  | Alloc | 1st[m_1stNullItemsBeginCount]
-
6444  +-------+
-
6445  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-
6446  +-------+
-
6447  | ... |
-
6448  +-------+
-
6449  | Alloc | 1st[1st.size() - 1]
-
6450  +-------+
-
6451  | |
-
6452  | |
-
6453  | |
-
6454  +-------+
-
6455  | Alloc | 2nd[2nd.size() - 1]
-
6456  +-------+
-
6457  | ... |
-
6458  +-------+
-
6459  | Alloc | 2nd[1]
-
6460  +-------+
-
6461  | Alloc | 2nd[0]
-
6462 GetSize() +-------+
-
6463 
-
6464 */
-
6465 class VmaBlockMetadata_Linear : public VmaBlockMetadata
-
6466 {
-
6467  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
-
6468 public:
-
6469  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
-
6470  virtual ~VmaBlockMetadata_Linear();
-
6471  virtual void Init(VkDeviceSize size);
-
6472 
-
6473  virtual bool Validate() const;
-
6474  virtual size_t GetAllocationCount() const;
-
6475  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
-
6476  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
-
6477  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
-
6478 
-
6479  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
-
6480  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
-
6481 
-
6482 #if VMA_STATS_STRING_ENABLED
-
6483  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
-
6484 #endif
-
6485 
-
6486  virtual bool CreateAllocationRequest(
-
6487  uint32_t currentFrameIndex,
-
6488  uint32_t frameInUseCount,
-
6489  VkDeviceSize bufferImageGranularity,
-
6490  VkDeviceSize allocSize,
-
6491  VkDeviceSize allocAlignment,
-
6492  bool upperAddress,
-
6493  VmaSuballocationType allocType,
-
6494  bool canMakeOtherLost,
-
6495  uint32_t strategy,
-
6496  VmaAllocationRequest* pAllocationRequest);
+
6345  virtual bool MakeRequestedAllocationsLost(
+
6346  uint32_t currentFrameIndex,
+
6347  uint32_t frameInUseCount,
+
6348  VmaAllocationRequest* pAllocationRequest);
+
6349 
+
6350  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
6351 
+
6352  virtual VkResult CheckCorruption(const void* pBlockData);
+
6353 
+
6354  virtual void Alloc(
+
6355  const VmaAllocationRequest& request,
+
6356  VmaSuballocationType type,
+
6357  VkDeviceSize allocSize,
+
6358  VmaAllocation hAllocation);
+
6359 
+
6360  virtual void Free(const VmaAllocation allocation);
+
6361  virtual void FreeAtOffset(VkDeviceSize offset);
+
6362 
+
6364  // For defragmentation
+
6365 
+
6366  bool IsBufferImageGranularityConflictPossible(
+
6367  VkDeviceSize bufferImageGranularity,
+
6368  VmaSuballocationType& inOutPrevSuballocType) const;
+
6369 
+
6370 private:
+
6371  friend class VmaDefragmentationAlgorithm_Generic;
+
6372  friend class VmaDefragmentationAlgorithm_Fast;
+
6373 
+
6374  uint32_t m_FreeCount;
+
6375  VkDeviceSize m_SumFreeSize;
+
6376  VmaSuballocationList m_Suballocations;
+
6377  // Suballocations that are free and have size greater than certain threshold.
+
6378  // Sorted by size, ascending.
+
6379  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
+
6380 
+
6381  bool ValidateFreeSuballocationList() const;
+
6382 
+
6383  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
+
6384  // If yes, fills pOffset and returns true. If no, returns false.
+
6385  bool CheckAllocation(
+
6386  uint32_t currentFrameIndex,
+
6387  uint32_t frameInUseCount,
+
6388  VkDeviceSize bufferImageGranularity,
+
6389  VkDeviceSize allocSize,
+
6390  VkDeviceSize allocAlignment,
+
6391  VmaSuballocationType allocType,
+
6392  VmaSuballocationList::const_iterator suballocItem,
+
6393  bool canMakeOtherLost,
+
6394  VkDeviceSize* pOffset,
+
6395  size_t* itemsToMakeLostCount,
+
6396  VkDeviceSize* pSumFreeSize,
+
6397  VkDeviceSize* pSumItemSize) const;
+
6398  // Given free suballocation, it merges it with following one, which must also be free.
+
6399  void MergeFreeWithNext(VmaSuballocationList::iterator item);
+
6400  // Releases given suballocation, making it free.
+
6401  // Merges it with adjacent free suballocations if applicable.
+
6402  // Returns iterator to new free suballocation at this place.
+
6403  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
+
6404  // Given free suballocation, it inserts it into sorted list of
+
6405  // m_FreeSuballocationsBySize if it's suitable.
+
6406  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
+
6407  // Given free suballocation, it removes it from sorted list of
+
6408  // m_FreeSuballocationsBySize if it's suitable.
+
6409  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
+
6410 };
+
6411 
+
6412 /*
+
6413 Allocations and their references in internal data structure look like this:
+
6414 
+
6415 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
+
6416 
+
6417  0 +-------+
+
6418  | |
+
6419  | |
+
6420  | |
+
6421  +-------+
+
6422  | Alloc | 1st[m_1stNullItemsBeginCount]
+
6423  +-------+
+
6424  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+
6425  +-------+
+
6426  | ... |
+
6427  +-------+
+
6428  | Alloc | 1st[1st.size() - 1]
+
6429  +-------+
+
6430  | |
+
6431  | |
+
6432  | |
+
6433 GetSize() +-------+
+
6434 
+
6435 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
+
6436 
+
6437  0 +-------+
+
6438  | Alloc | 2nd[0]
+
6439  +-------+
+
6440  | Alloc | 2nd[1]
+
6441  +-------+
+
6442  | ... |
+
6443  +-------+
+
6444  | Alloc | 2nd[2nd.size() - 1]
+
6445  +-------+
+
6446  | |
+
6447  | |
+
6448  | |
+
6449  +-------+
+
6450  | Alloc | 1st[m_1stNullItemsBeginCount]
+
6451  +-------+
+
6452  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+
6453  +-------+
+
6454  | ... |
+
6455  +-------+
+
6456  | Alloc | 1st[1st.size() - 1]
+
6457  +-------+
+
6458  | |
+
6459 GetSize() +-------+
+
6460 
+
6461 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
+
6462 
+
6463  0 +-------+
+
6464  | |
+
6465  | |
+
6466  | |
+
6467  +-------+
+
6468  | Alloc | 1st[m_1stNullItemsBeginCount]
+
6469  +-------+
+
6470  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+
6471  +-------+
+
6472  | ... |
+
6473  +-------+
+
6474  | Alloc | 1st[1st.size() - 1]
+
6475  +-------+
+
6476  | |
+
6477  | |
+
6478  | |
+
6479  +-------+
+
6480  | Alloc | 2nd[2nd.size() - 1]
+
6481  +-------+
+
6482  | ... |
+
6483  +-------+
+
6484  | Alloc | 2nd[1]
+
6485  +-------+
+
6486  | Alloc | 2nd[0]
+
6487 GetSize() +-------+
+
6488 
+
6489 */
+
6490 class VmaBlockMetadata_Linear : public VmaBlockMetadata
+
6491 {
+
6492  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
+
6493 public:
+
6494  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
+
6495  virtual ~VmaBlockMetadata_Linear();
+
6496  virtual void Init(VkDeviceSize size);
6497 
-
6498  virtual bool MakeRequestedAllocationsLost(
-
6499  uint32_t currentFrameIndex,
-
6500  uint32_t frameInUseCount,
-
6501  VmaAllocationRequest* pAllocationRequest);
-
6502 
-
6503  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
6504 
-
6505  virtual VkResult CheckCorruption(const void* pBlockData);
+
6498  virtual bool Validate() const;
+
6499  virtual size_t GetAllocationCount() const;
+
6500  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
+
6501  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+
6502  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
+
6503 
+
6504  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
+
6505  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6506 
-
6507  virtual void Alloc(
-
6508  const VmaAllocationRequest& request,
-
6509  VmaSuballocationType type,
-
6510  VkDeviceSize allocSize,
-
6511  VmaAllocation hAllocation);
-
6512 
-
6513  virtual void Free(const VmaAllocation allocation);
-
6514  virtual void FreeAtOffset(VkDeviceSize offset);
-
6515 
-
6516 private:
-
6517  /*
-
6518  There are two suballocation vectors, used in ping-pong way.
-
6519  The one with index m_1stVectorIndex is called 1st.
-
6520  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
-
6521  2nd can be non-empty only when 1st is not empty.
-
6522  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
-
6523  */
-
6524  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
-
6525 
-
6526  enum SECOND_VECTOR_MODE
-
6527  {
-
6528  SECOND_VECTOR_EMPTY,
-
6529  /*
-
6530  Suballocations in 2nd vector are created later than the ones in 1st, but they
-
6531  all have smaller offset.
-
6532  */
-
6533  SECOND_VECTOR_RING_BUFFER,
-
6534  /*
-
6535  Suballocations in 2nd vector are upper side of double stack.
-
6536  They all have offsets higher than those in 1st vector.
-
6537  Top of this stack means smaller offsets, but higher indices in this vector.
-
6538  */
-
6539  SECOND_VECTOR_DOUBLE_STACK,
-
6540  };
-
6541 
-
6542  VkDeviceSize m_SumFreeSize;
-
6543  SuballocationVectorType m_Suballocations0, m_Suballocations1;
-
6544  uint32_t m_1stVectorIndex;
-
6545  SECOND_VECTOR_MODE m_2ndVectorMode;
-
6546 
-
6547  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-
6548  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-
6549  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-
6550  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-
6551 
-
6552  // Number of items in 1st vector with hAllocation = null at the beginning.
-
6553  size_t m_1stNullItemsBeginCount;
-
6554  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
-
6555  size_t m_1stNullItemsMiddleCount;
-
6556  // Number of items in 2nd vector with hAllocation = null.
-
6557  size_t m_2ndNullItemsCount;
-
6558 
-
6559  bool ShouldCompact1st() const;
-
6560  void CleanupAfterFree();
-
6561 
-
6562  bool CreateAllocationRequest_LowerAddress(
-
6563  uint32_t currentFrameIndex,
-
6564  uint32_t frameInUseCount,
-
6565  VkDeviceSize bufferImageGranularity,
-
6566  VkDeviceSize allocSize,
-
6567  VkDeviceSize allocAlignment,
-
6568  VmaSuballocationType allocType,
-
6569  bool canMakeOtherLost,
-
6570  uint32_t strategy,
-
6571  VmaAllocationRequest* pAllocationRequest);
-
6572  bool CreateAllocationRequest_UpperAddress(
-
6573  uint32_t currentFrameIndex,
-
6574  uint32_t frameInUseCount,
-
6575  VkDeviceSize bufferImageGranularity,
-
6576  VkDeviceSize allocSize,
-
6577  VkDeviceSize allocAlignment,
-
6578  VmaSuballocationType allocType,
-
6579  bool canMakeOtherLost,
-
6580  uint32_t strategy,
-
6581  VmaAllocationRequest* pAllocationRequest);
-
6582 };
+
6507 #if VMA_STATS_STRING_ENABLED
+
6508  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
+
6509 #endif
+
6510 
+
6511  virtual bool CreateAllocationRequest(
+
6512  uint32_t currentFrameIndex,
+
6513  uint32_t frameInUseCount,
+
6514  VkDeviceSize bufferImageGranularity,
+
6515  VkDeviceSize allocSize,
+
6516  VkDeviceSize allocAlignment,
+
6517  bool upperAddress,
+
6518  VmaSuballocationType allocType,
+
6519  bool canMakeOtherLost,
+
6520  uint32_t strategy,
+
6521  VmaAllocationRequest* pAllocationRequest);
+
6522 
+
6523  virtual bool MakeRequestedAllocationsLost(
+
6524  uint32_t currentFrameIndex,
+
6525  uint32_t frameInUseCount,
+
6526  VmaAllocationRequest* pAllocationRequest);
+
6527 
+
6528  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
6529 
+
6530  virtual VkResult CheckCorruption(const void* pBlockData);
+
6531 
+
6532  virtual void Alloc(
+
6533  const VmaAllocationRequest& request,
+
6534  VmaSuballocationType type,
+
6535  VkDeviceSize allocSize,
+
6536  VmaAllocation hAllocation);
+
6537 
+
6538  virtual void Free(const VmaAllocation allocation);
+
6539  virtual void FreeAtOffset(VkDeviceSize offset);
+
6540 
+
6541 private:
+
6542  /*
+
6543  There are two suballocation vectors, used in ping-pong way.
+
6544  The one with index m_1stVectorIndex is called 1st.
+
6545  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
+
6546  2nd can be non-empty only when 1st is not empty.
+
6547  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
+
6548  */
+
6549  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
+
6550 
+
6551  enum SECOND_VECTOR_MODE
+
6552  {
+
6553  SECOND_VECTOR_EMPTY,
+
6554  /*
+
6555  Suballocations in 2nd vector are created later than the ones in 1st, but they
+
6556  all have smaller offset.
+
6557  */
+
6558  SECOND_VECTOR_RING_BUFFER,
+
6559  /*
+
6560  Suballocations in 2nd vector are upper side of double stack.
+
6561  They all have offsets higher than those in 1st vector.
+
6562  Top of this stack means smaller offsets, but higher indices in this vector.
+
6563  */
+
6564  SECOND_VECTOR_DOUBLE_STACK,
+
6565  };
+
6566 
+
6567  VkDeviceSize m_SumFreeSize;
+
6568  SuballocationVectorType m_Suballocations0, m_Suballocations1;
+
6569  uint32_t m_1stVectorIndex;
+
6570  SECOND_VECTOR_MODE m_2ndVectorMode;
+
6571 
+
6572  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+
6573  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+
6574  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+
6575  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+
6576 
+
6577  // Number of items in 1st vector with hAllocation = null at the beginning.
+
6578  size_t m_1stNullItemsBeginCount;
+
6579  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
+
6580  size_t m_1stNullItemsMiddleCount;
+
6581  // Number of items in 2nd vector with hAllocation = null.
+
6582  size_t m_2ndNullItemsCount;
6583 
-
6584 /*
-
6585 - GetSize() is the original size of allocated memory block.
-
6586 - m_UsableSize is this size aligned down to a power of two.
-
6587  All allocations and calculations happen relative to m_UsableSize.
-
6588 - GetUnusableSize() is the difference between them.
-
6589  It is repoted as separate, unused range, not available for allocations.
-
6590 
-
6591 Node at level 0 has size = m_UsableSize.
-
6592 Each next level contains nodes with size 2 times smaller than current level.
-
6593 m_LevelCount is the maximum number of levels to use in the current object.
-
6594 */
-
6595 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
-
6596 {
-
6597  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
-
6598 public:
-
6599  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
-
6600  virtual ~VmaBlockMetadata_Buddy();
-
6601  virtual void Init(VkDeviceSize size);
-
6602 
-
6603  virtual bool Validate() const;
-
6604  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
-
6605  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
-
6606  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
-
6607  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
+
6584  bool ShouldCompact1st() const;
+
6585  void CleanupAfterFree();
+
6586 
+
6587  bool CreateAllocationRequest_LowerAddress(
+
6588  uint32_t currentFrameIndex,
+
6589  uint32_t frameInUseCount,
+
6590  VkDeviceSize bufferImageGranularity,
+
6591  VkDeviceSize allocSize,
+
6592  VkDeviceSize allocAlignment,
+
6593  VmaSuballocationType allocType,
+
6594  bool canMakeOtherLost,
+
6595  uint32_t strategy,
+
6596  VmaAllocationRequest* pAllocationRequest);
+
6597  bool CreateAllocationRequest_UpperAddress(
+
6598  uint32_t currentFrameIndex,
+
6599  uint32_t frameInUseCount,
+
6600  VkDeviceSize bufferImageGranularity,
+
6601  VkDeviceSize allocSize,
+
6602  VkDeviceSize allocAlignment,
+
6603  VmaSuballocationType allocType,
+
6604  bool canMakeOtherLost,
+
6605  uint32_t strategy,
+
6606  VmaAllocationRequest* pAllocationRequest);
+
6607 };
6608 
-
6609  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
-
6610  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
-
6611 
-
6612 #if VMA_STATS_STRING_ENABLED
-
6613  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
-
6614 #endif
-
6615 
-
6616  virtual bool CreateAllocationRequest(
-
6617  uint32_t currentFrameIndex,
-
6618  uint32_t frameInUseCount,
-
6619  VkDeviceSize bufferImageGranularity,
-
6620  VkDeviceSize allocSize,
-
6621  VkDeviceSize allocAlignment,
-
6622  bool upperAddress,
-
6623  VmaSuballocationType allocType,
-
6624  bool canMakeOtherLost,
-
6625  uint32_t strategy,
-
6626  VmaAllocationRequest* pAllocationRequest);
+
6609 /*
+
6610 - GetSize() is the original size of allocated memory block.
+
6611 - m_UsableSize is this size aligned down to a power of two.
+
6612  All allocations and calculations happen relative to m_UsableSize.
+
6613 - GetUnusableSize() is the difference between them.
+
6614  It is repoted as separate, unused range, not available for allocations.
+
6615 
+
6616 Node at level 0 has size = m_UsableSize.
+
6617 Each next level contains nodes with size 2 times smaller than current level.
+
6618 m_LevelCount is the maximum number of levels to use in the current object.
+
6619 */
+
6620 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
+
6621 {
+
6622  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
+
6623 public:
+
6624  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
+
6625  virtual ~VmaBlockMetadata_Buddy();
+
6626  virtual void Init(VkDeviceSize size);
6627 
-
6628  virtual bool MakeRequestedAllocationsLost(
-
6629  uint32_t currentFrameIndex,
-
6630  uint32_t frameInUseCount,
-
6631  VmaAllocationRequest* pAllocationRequest);
-
6632 
-
6633  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
6634 
-
6635  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
+
6628  virtual bool Validate() const;
+
6629  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
+
6630  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
+
6631  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+
6632  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
+
6633 
+
6634  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
+
6635  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6636 
-
6637  virtual void Alloc(
-
6638  const VmaAllocationRequest& request,
-
6639  VmaSuballocationType type,
-
6640  VkDeviceSize allocSize,
-
6641  VmaAllocation hAllocation);
-
6642 
-
6643  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
-
6644  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
-
6645 
-
6646 private:
-
6647  static const VkDeviceSize MIN_NODE_SIZE = 32;
-
6648  static const size_t MAX_LEVELS = 30;
-
6649 
-
6650  struct ValidationContext
-
6651  {
-
6652  size_t calculatedAllocationCount;
-
6653  size_t calculatedFreeCount;
-
6654  VkDeviceSize calculatedSumFreeSize;
-
6655 
-
6656  ValidationContext() :
-
6657  calculatedAllocationCount(0),
-
6658  calculatedFreeCount(0),
-
6659  calculatedSumFreeSize(0) { }
-
6660  };
+
6637 #if VMA_STATS_STRING_ENABLED
+
6638  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
+
6639 #endif
+
6640 
+
6641  virtual bool CreateAllocationRequest(
+
6642  uint32_t currentFrameIndex,
+
6643  uint32_t frameInUseCount,
+
6644  VkDeviceSize bufferImageGranularity,
+
6645  VkDeviceSize allocSize,
+
6646  VkDeviceSize allocAlignment,
+
6647  bool upperAddress,
+
6648  VmaSuballocationType allocType,
+
6649  bool canMakeOtherLost,
+
6650  uint32_t strategy,
+
6651  VmaAllocationRequest* pAllocationRequest);
+
6652 
+
6653  virtual bool MakeRequestedAllocationsLost(
+
6654  uint32_t currentFrameIndex,
+
6655  uint32_t frameInUseCount,
+
6656  VmaAllocationRequest* pAllocationRequest);
+
6657 
+
6658  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
6659 
+
6660  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6661 
-
6662  struct Node
-
6663  {
-
6664  VkDeviceSize offset;
-
6665  enum TYPE
-
6666  {
-
6667  TYPE_FREE,
-
6668  TYPE_ALLOCATION,
-
6669  TYPE_SPLIT,
-
6670  TYPE_COUNT
-
6671  } type;
-
6672  Node* parent;
-
6673  Node* buddy;
+
6662  virtual void Alloc(
+
6663  const VmaAllocationRequest& request,
+
6664  VmaSuballocationType type,
+
6665  VkDeviceSize allocSize,
+
6666  VmaAllocation hAllocation);
+
6667 
+
6668  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
+
6669  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
+
6670 
+
6671 private:
+
6672  static const VkDeviceSize MIN_NODE_SIZE = 32;
+
6673  static const size_t MAX_LEVELS = 30;
6674 
-
6675  union
-
6676  {
-
6677  struct
-
6678  {
-
6679  Node* prev;
-
6680  Node* next;
-
6681  } free;
-
6682  struct
-
6683  {
-
6684  VmaAllocation alloc;
-
6685  } allocation;
-
6686  struct
-
6687  {
-
6688  Node* leftChild;
-
6689  } split;
-
6690  };
-
6691  };
-
6692 
-
6693  // Size of the memory block aligned down to a power of two.
-
6694  VkDeviceSize m_UsableSize;
-
6695  uint32_t m_LevelCount;
-
6696 
-
6697  Node* m_Root;
-
6698  struct {
-
6699  Node* front;
-
6700  Node* back;
-
6701  } m_FreeList[MAX_LEVELS];
-
6702  // Number of nodes in the tree with type == TYPE_ALLOCATION.
-
6703  size_t m_AllocationCount;
-
6704  // Number of nodes in the tree with type == TYPE_FREE.
-
6705  size_t m_FreeCount;
-
6706  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
-
6707  VkDeviceSize m_SumFreeSize;
-
6708 
-
6709  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
-
6710  void DeleteNode(Node* node);
-
6711  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
-
6712  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
-
6713  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
-
6714  // Alloc passed just for validation. Can be null.
-
6715  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
-
6716  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
-
6717  // Adds node to the front of FreeList at given level.
-
6718  // node->type must be FREE.
-
6719  // node->free.prev, next can be undefined.
-
6720  void AddToFreeListFront(uint32_t level, Node* node);
-
6721  // Removes node from FreeList at given level.
-
6722  // node->type must be FREE.
-
6723  // node->free.prev, next stay untouched.
-
6724  void RemoveFromFreeList(uint32_t level, Node* node);
-
6725 
-
6726 #if VMA_STATS_STRING_ENABLED
-
6727  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
-
6728 #endif
-
6729 };
-
6730 
-
6731 /*
-
6732 Represents a single block of device memory (`VkDeviceMemory`) with all the
-
6733 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
-
6734 
-
6735 Thread-safety: This class must be externally synchronized.
-
6736 */
-
6737 class VmaDeviceMemoryBlock
-
6738 {
-
6739  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
-
6740 public:
-
6741  VmaBlockMetadata* m_pMetadata;
-
6742 
-
6743  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
-
6744 
-
6745  ~VmaDeviceMemoryBlock()
-
6746  {
-
6747  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
-
6748  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
-
6749  }
+
6675  struct ValidationContext
+
6676  {
+
6677  size_t calculatedAllocationCount;
+
6678  size_t calculatedFreeCount;
+
6679  VkDeviceSize calculatedSumFreeSize;
+
6680 
+
6681  ValidationContext() :
+
6682  calculatedAllocationCount(0),
+
6683  calculatedFreeCount(0),
+
6684  calculatedSumFreeSize(0) { }
+
6685  };
+
6686 
+
6687  struct Node
+
6688  {
+
6689  VkDeviceSize offset;
+
6690  enum TYPE
+
6691  {
+
6692  TYPE_FREE,
+
6693  TYPE_ALLOCATION,
+
6694  TYPE_SPLIT,
+
6695  TYPE_COUNT
+
6696  } type;
+
6697  Node* parent;
+
6698  Node* buddy;
+
6699 
+
6700  union
+
6701  {
+
6702  struct
+
6703  {
+
6704  Node* prev;
+
6705  Node* next;
+
6706  } free;
+
6707  struct
+
6708  {
+
6709  VmaAllocation alloc;
+
6710  } allocation;
+
6711  struct
+
6712  {
+
6713  Node* leftChild;
+
6714  } split;
+
6715  };
+
6716  };
+
6717 
+
6718  // Size of the memory block aligned down to a power of two.
+
6719  VkDeviceSize m_UsableSize;
+
6720  uint32_t m_LevelCount;
+
6721 
+
6722  Node* m_Root;
+
6723  struct {
+
6724  Node* front;
+
6725  Node* back;
+
6726  } m_FreeList[MAX_LEVELS];
+
6727  // Number of nodes in the tree with type == TYPE_ALLOCATION.
+
6728  size_t m_AllocationCount;
+
6729  // Number of nodes in the tree with type == TYPE_FREE.
+
6730  size_t m_FreeCount;
+
6731  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
+
6732  VkDeviceSize m_SumFreeSize;
+
6733 
+
6734  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
+
6735  void DeleteNode(Node* node);
+
6736  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
+
6737  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
+
6738  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
+
6739  // Alloc passed just for validation. Can be null.
+
6740  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
+
6741  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
+
6742  // Adds node to the front of FreeList at given level.
+
6743  // node->type must be FREE.
+
6744  // node->free.prev, next can be undefined.
+
6745  void AddToFreeListFront(uint32_t level, Node* node);
+
6746  // Removes node from FreeList at given level.
+
6747  // node->type must be FREE.
+
6748  // node->free.prev, next stay untouched.
+
6749  void RemoveFromFreeList(uint32_t level, Node* node);
6750 
-
6751  // Always call after construction.
-
6752  void Init(
-
6753  VmaAllocator hAllocator,
-
6754  VmaPool hParentPool,
-
6755  uint32_t newMemoryTypeIndex,
-
6756  VkDeviceMemory newMemory,
-
6757  VkDeviceSize newSize,
-
6758  uint32_t id,
-
6759  uint32_t algorithm);
-
6760  // Always call before destruction.
-
6761  void Destroy(VmaAllocator allocator);
-
6762 
-
6763  VmaPool GetParentPool() const { return m_hParentPool; }
-
6764  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
-
6765  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-
6766  uint32_t GetId() const { return m_Id; }
-
6767  void* GetMappedData() const { return m_pMappedData; }
-
6768 
-
6769  // Validates all data structures inside this object. If not valid, returns false.
-
6770  bool Validate() const;
-
6771 
-
6772  VkResult CheckCorruption(VmaAllocator hAllocator);
-
6773 
-
6774  // ppData can be null.
-
6775  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
-
6776  void Unmap(VmaAllocator hAllocator, uint32_t count);
-
6777 
-
6778  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-
6779  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-
6780 
-
6781  VkResult BindBufferMemory(
-
6782  const VmaAllocator hAllocator,
-
6783  const VmaAllocation hAllocation,
-
6784  VkDeviceSize allocationLocalOffset,
-
6785  VkBuffer hBuffer,
-
6786  const void* pNext);
-
6787  VkResult BindImageMemory(
-
6788  const VmaAllocator hAllocator,
-
6789  const VmaAllocation hAllocation,
-
6790  VkDeviceSize allocationLocalOffset,
-
6791  VkImage hImage,
-
6792  const void* pNext);
+
6751 #if VMA_STATS_STRING_ENABLED
+
6752  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
+
6753 #endif
+
6754 };
+
6755 
+
6756 /*
+
6757 Represents a single block of device memory (`VkDeviceMemory`) with all the
+
6758 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
+
6759 
+
6760 Thread-safety: This class must be externally synchronized.
+
6761 */
+
6762 class VmaDeviceMemoryBlock
+
6763 {
+
6764  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
+
6765 public:
+
6766  VmaBlockMetadata* m_pMetadata;
+
6767 
+
6768  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
+
6769 
+
6770  ~VmaDeviceMemoryBlock()
+
6771  {
+
6772  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
+
6773  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
6774  }
+
6775 
+
6776  // Always call after construction.
+
6777  void Init(
+
6778  VmaAllocator hAllocator,
+
6779  VmaPool hParentPool,
+
6780  uint32_t newMemoryTypeIndex,
+
6781  VkDeviceMemory newMemory,
+
6782  VkDeviceSize newSize,
+
6783  uint32_t id,
+
6784  uint32_t algorithm);
+
6785  // Always call before destruction.
+
6786  void Destroy(VmaAllocator allocator);
+
6787 
+
6788  VmaPool GetParentPool() const { return m_hParentPool; }
+
6789  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
+
6790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+
6791  uint32_t GetId() const { return m_Id; }
+
6792  void* GetMappedData() const { return m_pMappedData; }
6793 
-
6794 private:
-
6795  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
-
6796  uint32_t m_MemoryTypeIndex;
-
6797  uint32_t m_Id;
-
6798  VkDeviceMemory m_hMemory;
-
6799 
-
6800  /*
-
6801  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
-
6802  Also protects m_MapCount, m_pMappedData.
-
6803  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
-
6804  */
-
6805  VMA_MUTEX m_Mutex;
-
6806  uint32_t m_MapCount;
-
6807  void* m_pMappedData;
-
6808 };
-
6809 
-
6810 struct VmaPointerLess
-
6811 {
-
6812  bool operator()(const void* lhs, const void* rhs) const
-
6813  {
-
6814  return lhs < rhs;
-
6815  }
-
6816 };
-
6817 
-
6818 struct VmaDefragmentationMove
-
6819 {
-
6820  size_t srcBlockIndex;
-
6821  size_t dstBlockIndex;
-
6822  VkDeviceSize srcOffset;
-
6823  VkDeviceSize dstOffset;
-
6824  VkDeviceSize size;
-
6825  VmaAllocation hAllocation;
-
6826  VmaDeviceMemoryBlock* pSrcBlock;
-
6827  VmaDeviceMemoryBlock* pDstBlock;
-
6828 };
-
6829 
-
6830 class VmaDefragmentationAlgorithm;
-
6831 
-
6832 /*
-
6833 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
-
6834 Vulkan memory type.
-
6835 
-
6836 Synchronized internally with a mutex.
-
6837 */
-
6838 struct VmaBlockVector
-
6839 {
-
6840  VMA_CLASS_NO_COPY(VmaBlockVector)
-
6841 public:
-
6842  VmaBlockVector(
-
6843  VmaAllocator hAllocator,
-
6844  VmaPool hParentPool,
-
6845  uint32_t memoryTypeIndex,
-
6846  VkDeviceSize preferredBlockSize,
-
6847  size_t minBlockCount,
-
6848  size_t maxBlockCount,
-
6849  VkDeviceSize bufferImageGranularity,
-
6850  uint32_t frameInUseCount,
-
6851  bool explicitBlockSize,
-
6852  uint32_t algorithm);
-
6853  ~VmaBlockVector();
+
6794  // Validates all data structures inside this object. If not valid, returns false.
+
6795  bool Validate() const;
+
6796 
+
6797  VkResult CheckCorruption(VmaAllocator hAllocator);
+
6798 
+
6799  // ppData can be null.
+
6800  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
+
6801  void Unmap(VmaAllocator hAllocator, uint32_t count);
+
6802 
+
6803  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+
6804  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+
6805 
+
6806  VkResult BindBufferMemory(
+
6807  const VmaAllocator hAllocator,
+
6808  const VmaAllocation hAllocation,
+
6809  VkDeviceSize allocationLocalOffset,
+
6810  VkBuffer hBuffer,
+
6811  const void* pNext);
+
6812  VkResult BindImageMemory(
+
6813  const VmaAllocator hAllocator,
+
6814  const VmaAllocation hAllocation,
+
6815  VkDeviceSize allocationLocalOffset,
+
6816  VkImage hImage,
+
6817  const void* pNext);
+
6818 
+
6819 private:
+
6820  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
+
6821  uint32_t m_MemoryTypeIndex;
+
6822  uint32_t m_Id;
+
6823  VkDeviceMemory m_hMemory;
+
6824 
+
6825  /*
+
6826  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
+
6827  Also protects m_MapCount, m_pMappedData.
+
6828  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
+
6829  */
+
6830  VMA_MUTEX m_Mutex;
+
6831  uint32_t m_MapCount;
+
6832  void* m_pMappedData;
+
6833 };
+
6834 
+
6835 struct VmaPointerLess
+
6836 {
+
6837  bool operator()(const void* lhs, const void* rhs) const
+
6838  {
+
6839  return lhs < rhs;
+
6840  }
+
6841 };
+
6842 
+
6843 struct VmaDefragmentationMove
+
6844 {
+
6845  size_t srcBlockIndex;
+
6846  size_t dstBlockIndex;
+
6847  VkDeviceSize srcOffset;
+
6848  VkDeviceSize dstOffset;
+
6849  VkDeviceSize size;
+
6850  VmaAllocation hAllocation;
+
6851  VmaDeviceMemoryBlock* pSrcBlock;
+
6852  VmaDeviceMemoryBlock* pDstBlock;
+
6853 };
6854 
-
6855  VkResult CreateMinBlocks();
+
6855 class VmaDefragmentationAlgorithm;
6856 
-
6857  VmaAllocator GetAllocator() const { return m_hAllocator; }
-
6858  VmaPool GetParentPool() const { return m_hParentPool; }
-
6859  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
-
6860  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-
6861  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
-
6862  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
-
6863  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
-
6864  uint32_t GetAlgorithm() const { return m_Algorithm; }
-
6865 
-
6866  void GetPoolStats(VmaPoolStats* pStats);
-
6867 
-
6868  bool IsEmpty();
-
6869  bool IsCorruptionDetectionEnabled() const;
-
6870 
-
6871  VkResult Allocate(
-
6872  uint32_t currentFrameIndex,
-
6873  VkDeviceSize size,
-
6874  VkDeviceSize alignment,
-
6875  const VmaAllocationCreateInfo& createInfo,
-
6876  VmaSuballocationType suballocType,
-
6877  size_t allocationCount,
-
6878  VmaAllocation* pAllocations);
+
6857 /*
+
6858 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
+
6859 Vulkan memory type.
+
6860 
+
6861 Synchronized internally with a mutex.
+
6862 */
+
6863 struct VmaBlockVector
+
6864 {
+
6865  VMA_CLASS_NO_COPY(VmaBlockVector)
+
6866 public:
+
6867  VmaBlockVector(
+
6868  VmaAllocator hAllocator,
+
6869  VmaPool hParentPool,
+
6870  uint32_t memoryTypeIndex,
+
6871  VkDeviceSize preferredBlockSize,
+
6872  size_t minBlockCount,
+
6873  size_t maxBlockCount,
+
6874  VkDeviceSize bufferImageGranularity,
+
6875  uint32_t frameInUseCount,
+
6876  bool explicitBlockSize,
+
6877  uint32_t algorithm);
+
6878  ~VmaBlockVector();
6879 
-
6880  void Free(const VmaAllocation hAllocation);
+
6880  VkResult CreateMinBlocks();
6881 
-
6882  // Adds statistics of this BlockVector to pStats.
-
6883  void AddStats(VmaStats* pStats);
-
6884 
-
6885 #if VMA_STATS_STRING_ENABLED
-
6886  void PrintDetailedMap(class VmaJsonWriter& json);
-
6887 #endif
-
6888 
-
6889  void MakePoolAllocationsLost(
-
6890  uint32_t currentFrameIndex,
-
6891  size_t* pLostAllocationCount);
-
6892  VkResult CheckCorruption();
-
6893 
-
6894  // Saves results in pCtx->res.
-
6895  void Defragment(
-
6896  class VmaBlockVectorDefragmentationContext* pCtx,
- -
6898  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
-
6899  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
-
6900  VkCommandBuffer commandBuffer);
-
6901  void DefragmentationEnd(
-
6902  class VmaBlockVectorDefragmentationContext* pCtx,
-
6903  uint32_t flags,
-
6904  VmaDefragmentationStats* pStats);
-
6905 
-
6906  uint32_t ProcessDefragmentations(
-
6907  class VmaBlockVectorDefragmentationContext *pCtx,
-
6908  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
+
6882  VmaAllocator GetAllocator() const { return m_hAllocator; }
+
6883  VmaPool GetParentPool() const { return m_hParentPool; }
+
6884  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
+
6885  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+
6886  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
+
6887  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
+
6888  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
+
6889  uint32_t GetAlgorithm() const { return m_Algorithm; }
+
6890 
+
6891  void GetPoolStats(VmaPoolStats* pStats);
+
6892 
+
6893  bool IsEmpty();
+
6894  bool IsCorruptionDetectionEnabled() const;
+
6895 
+
6896  VkResult Allocate(
+
6897  uint32_t currentFrameIndex,
+
6898  VkDeviceSize size,
+
6899  VkDeviceSize alignment,
+
6900  const VmaAllocationCreateInfo& createInfo,
+
6901  VmaSuballocationType suballocType,
+
6902  size_t allocationCount,
+
6903  VmaAllocation* pAllocations);
+
6904 
+
6905  void Free(const VmaAllocation hAllocation);
+
6906 
+
6907  // Adds statistics of this BlockVector to pStats.
+
6908  void AddStats(VmaStats* pStats);
6909 
-
6910  void CommitDefragmentations(
-
6911  class VmaBlockVectorDefragmentationContext *pCtx,
-
6912  VmaDefragmentationStats* pStats);
+
6910 #if VMA_STATS_STRING_ENABLED
+
6911  void PrintDetailedMap(class VmaJsonWriter& json);
+
6912 #endif
6913 
-
6915  // To be used only while the m_Mutex is locked. Used during defragmentation.
-
6916 
-
6917  size_t GetBlockCount() const { return m_Blocks.size(); }
-
6918  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
-
6919  size_t CalcAllocationCount() const;
-
6920  bool IsBufferImageGranularityConflictPossible() const;
-
6921 
-
6922 private:
-
6923  friend class VmaDefragmentationAlgorithm_Generic;
-
6924 
-
6925  const VmaAllocator m_hAllocator;
-
6926  const VmaPool m_hParentPool;
-
6927  const uint32_t m_MemoryTypeIndex;
-
6928  const VkDeviceSize m_PreferredBlockSize;
-
6929  const size_t m_MinBlockCount;
-
6930  const size_t m_MaxBlockCount;
-
6931  const VkDeviceSize m_BufferImageGranularity;
-
6932  const uint32_t m_FrameInUseCount;
-
6933  const bool m_ExplicitBlockSize;
-
6934  const uint32_t m_Algorithm;
-
6935  VMA_RW_MUTEX m_Mutex;
-
6936 
-
6937  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
-
6938  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
-
6939  bool m_HasEmptyBlock;
-
6940  // Incrementally sorted by sumFreeSize, ascending.
-
6941  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
-
6942  uint32_t m_NextBlockId;
-
6943 
-
6944  VkDeviceSize CalcMaxBlockSize() const;
-
6945 
-
6946  // Finds and removes given block from vector.
-
6947  void Remove(VmaDeviceMemoryBlock* pBlock);
-
6948 
-
6949  // Performs single step in sorting m_Blocks. They may not be fully sorted
-
6950  // after this call.
-
6951  void IncrementallySortBlocks();
-
6952 
-
6953  VkResult AllocatePage(
-
6954  uint32_t currentFrameIndex,
-
6955  VkDeviceSize size,
-
6956  VkDeviceSize alignment,
-
6957  const VmaAllocationCreateInfo& createInfo,
-
6958  VmaSuballocationType suballocType,
-
6959  VmaAllocation* pAllocation);
-
6960 
-
6961  // To be used only without CAN_MAKE_OTHER_LOST flag.
-
6962  VkResult AllocateFromBlock(
-
6963  VmaDeviceMemoryBlock* pBlock,
-
6964  uint32_t currentFrameIndex,
-
6965  VkDeviceSize size,
-
6966  VkDeviceSize alignment,
-
6967  VmaAllocationCreateFlags allocFlags,
-
6968  void* pUserData,
-
6969  VmaSuballocationType suballocType,
-
6970  uint32_t strategy,
-
6971  VmaAllocation* pAllocation);
-
6972 
-
6973  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
-
6974 
-
6975  // Saves result to pCtx->res.
-
6976  void ApplyDefragmentationMovesCpu(
-
6977  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
6978  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
-
6979  // Saves result to pCtx->res.
-
6980  void ApplyDefragmentationMovesGpu(
-
6981  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
6982  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
6983  VkCommandBuffer commandBuffer);
-
6984 
-
6985  /*
-
6986  Used during defragmentation. pDefragmentationStats is optional. It's in/out
-
6987  - updated with new data.
-
6988  */
-
6989  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
-
6990 
-
6991  void UpdateHasEmptyBlock();
-
6992 };
-
6993 
-
6994 struct VmaPool_T
-
6995 {
-
6996  VMA_CLASS_NO_COPY(VmaPool_T)
-
6997 public:
-
6998  VmaBlockVector m_BlockVector;
+
6914  void MakePoolAllocationsLost(
+
6915  uint32_t currentFrameIndex,
+
6916  size_t* pLostAllocationCount);
+
6917  VkResult CheckCorruption();
+
6918 
+
6919  // Saves results in pCtx->res.
+
6920  void Defragment(
+
6921  class VmaBlockVectorDefragmentationContext* pCtx,
+ +
6923  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
+
6924  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
+
6925  VkCommandBuffer commandBuffer);
+
6926  void DefragmentationEnd(
+
6927  class VmaBlockVectorDefragmentationContext* pCtx,
+
6928  uint32_t flags,
+
6929  VmaDefragmentationStats* pStats);
+
6930 
+
6931  uint32_t ProcessDefragmentations(
+
6932  class VmaBlockVectorDefragmentationContext *pCtx,
+
6933  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
+
6934 
+
6935  void CommitDefragmentations(
+
6936  class VmaBlockVectorDefragmentationContext *pCtx,
+
6937  VmaDefragmentationStats* pStats);
+
6938 
+
6940  // To be used only while the m_Mutex is locked. Used during defragmentation.
+
6941 
+
6942  size_t GetBlockCount() const { return m_Blocks.size(); }
+
6943  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
+
6944  size_t CalcAllocationCount() const;
+
6945  bool IsBufferImageGranularityConflictPossible() const;
+
6946 
+
6947 private:
+
6948  friend class VmaDefragmentationAlgorithm_Generic;
+
6949 
+
6950  const VmaAllocator m_hAllocator;
+
6951  const VmaPool m_hParentPool;
+
6952  const uint32_t m_MemoryTypeIndex;
+
6953  const VkDeviceSize m_PreferredBlockSize;
+
6954  const size_t m_MinBlockCount;
+
6955  const size_t m_MaxBlockCount;
+
6956  const VkDeviceSize m_BufferImageGranularity;
+
6957  const uint32_t m_FrameInUseCount;
+
6958  const bool m_ExplicitBlockSize;
+
6959  const uint32_t m_Algorithm;
+
6960  VMA_RW_MUTEX m_Mutex;
+
6961 
+
6962  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
+
6963  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
+
6964  bool m_HasEmptyBlock;
+
6965  // Incrementally sorted by sumFreeSize, ascending.
+
6966  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
+
6967  uint32_t m_NextBlockId;
+
6968 
+
6969  VkDeviceSize CalcMaxBlockSize() const;
+
6970 
+
6971  // Finds and removes given block from vector.
+
6972  void Remove(VmaDeviceMemoryBlock* pBlock);
+
6973 
+
6974  // Performs single step in sorting m_Blocks. They may not be fully sorted
+
6975  // after this call.
+
6976  void IncrementallySortBlocks();
+
6977 
+
6978  VkResult AllocatePage(
+
6979  uint32_t currentFrameIndex,
+
6980  VkDeviceSize size,
+
6981  VkDeviceSize alignment,
+
6982  const VmaAllocationCreateInfo& createInfo,
+
6983  VmaSuballocationType suballocType,
+
6984  VmaAllocation* pAllocation);
+
6985 
+
6986  // To be used only without CAN_MAKE_OTHER_LOST flag.
+
6987  VkResult AllocateFromBlock(
+
6988  VmaDeviceMemoryBlock* pBlock,
+
6989  uint32_t currentFrameIndex,
+
6990  VkDeviceSize size,
+
6991  VkDeviceSize alignment,
+
6992  VmaAllocationCreateFlags allocFlags,
+
6993  void* pUserData,
+
6994  VmaSuballocationType suballocType,
+
6995  uint32_t strategy,
+
6996  VmaAllocation* pAllocation);
+
6997 
+
6998  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6999 
-
7000  VmaPool_T(
-
7001  VmaAllocator hAllocator,
-
7002  const VmaPoolCreateInfo& createInfo,
-
7003  VkDeviceSize preferredBlockSize);
-
7004  ~VmaPool_T();
-
7005 
-
7006  uint32_t GetId() const { return m_Id; }
-
7007  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
-
7008 
-
7009  const char* GetName() const { return m_Name; }
-
7010  void SetName(const char* pName);
-
7011 
-
7012 #if VMA_STATS_STRING_ENABLED
-
7013  //void PrintDetailedMap(class VmaStringBuilder& sb);
-
7014 #endif
+
7000  // Saves result to pCtx->res.
+
7001  void ApplyDefragmentationMovesCpu(
+
7002  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
7003  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
+
7004  // Saves result to pCtx->res.
+
7005  void ApplyDefragmentationMovesGpu(
+
7006  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
7007  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
7008  VkCommandBuffer commandBuffer);
+
7009 
+
7010  /*
+
7011  Used during defragmentation. pDefragmentationStats is optional. It's in/out
+
7012  - updated with new data.
+
7013  */
+
7014  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7015 
-
7016 private:
-
7017  uint32_t m_Id;
-
7018  char* m_Name;
-
7019 };
-
7020 
-
7021 /*
-
7022 Performs defragmentation:
-
7023 
-
7024 - Updates `pBlockVector->m_pMetadata`.
-
7025 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
-
7026 - Does not move actual data, only returns requested moves as `moves`.
-
7027 */
-
7028 class VmaDefragmentationAlgorithm
-
7029 {
-
7030  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
-
7031 public:
-
7032  VmaDefragmentationAlgorithm(
-
7033  VmaAllocator hAllocator,
-
7034  VmaBlockVector* pBlockVector,
-
7035  uint32_t currentFrameIndex) :
-
7036  m_hAllocator(hAllocator),
-
7037  m_pBlockVector(pBlockVector),
-
7038  m_CurrentFrameIndex(currentFrameIndex)
-
7039  {
-
7040  }
-
7041  virtual ~VmaDefragmentationAlgorithm()
-
7042  {
-
7043  }
-
7044 
-
7045  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
-
7046  virtual void AddAll() = 0;
-
7047 
-
7048  virtual VkResult Defragment(
-
7049  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
7050  VkDeviceSize maxBytesToMove,
-
7051  uint32_t maxAllocationsToMove,
-
7052  VmaDefragmentationFlags flags) = 0;
-
7053 
-
7054  virtual VkDeviceSize GetBytesMoved() const = 0;
-
7055  virtual uint32_t GetAllocationsMoved() const = 0;
-
7056 
-
7057 protected:
-
7058  VmaAllocator const m_hAllocator;
-
7059  VmaBlockVector* const m_pBlockVector;
-
7060  const uint32_t m_CurrentFrameIndex;
-
7061 
-
7062  struct AllocationInfo
-
7063  {
-
7064  VmaAllocation m_hAllocation;
-
7065  VkBool32* m_pChanged;
-
7066 
-
7067  AllocationInfo() :
-
7068  m_hAllocation(VK_NULL_HANDLE),
-
7069  m_pChanged(VMA_NULL)
-
7070  {
-
7071  }
-
7072  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
-
7073  m_hAllocation(hAlloc),
-
7074  m_pChanged(pChanged)
-
7075  {
-
7076  }
-
7077  };
-
7078 };
-
7079 
-
7080 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
-
7081 {
-
7082  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
-
7083 public:
-
7084  VmaDefragmentationAlgorithm_Generic(
-
7085  VmaAllocator hAllocator,
-
7086  VmaBlockVector* pBlockVector,
-
7087  uint32_t currentFrameIndex,
-
7088  bool overlappingMoveSupported);
-
7089  virtual ~VmaDefragmentationAlgorithm_Generic();
-
7090 
-
7091  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
-
7092  virtual void AddAll() { m_AllAllocations = true; }
-
7093 
-
7094  virtual VkResult Defragment(
-
7095  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
7096  VkDeviceSize maxBytesToMove,
-
7097  uint32_t maxAllocationsToMove,
-
7098  VmaDefragmentationFlags flags);
-
7099 
-
7100  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
-
7101  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
-
7102 
-
7103 private:
-
7104  uint32_t m_AllocationCount;
-
7105  bool m_AllAllocations;
-
7106 
-
7107  VkDeviceSize m_BytesMoved;
-
7108  uint32_t m_AllocationsMoved;
-
7109 
-
7110  struct AllocationInfoSizeGreater
-
7111  {
-
7112  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
-
7113  {
-
7114  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
-
7115  }
-
7116  };
-
7117 
-
7118  struct AllocationInfoOffsetGreater
-
7119  {
-
7120  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
-
7121  {
-
7122  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
-
7123  }
-
7124  };
-
7125 
-
7126  struct BlockInfo
-
7127  {
-
7128  size_t m_OriginalBlockIndex;
-
7129  VmaDeviceMemoryBlock* m_pBlock;
-
7130  bool m_HasNonMovableAllocations;
-
7131  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
-
7132 
-
7133  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
-
7134  m_OriginalBlockIndex(SIZE_MAX),
-
7135  m_pBlock(VMA_NULL),
-
7136  m_HasNonMovableAllocations(true),
-
7137  m_Allocations(pAllocationCallbacks)
-
7138  {
-
7139  }
-
7140 
-
7141  void CalcHasNonMovableAllocations()
-
7142  {
-
7143  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
-
7144  const size_t defragmentAllocCount = m_Allocations.size();
-
7145  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
-
7146  }
-
7147 
-
7148  void SortAllocationsBySizeDescending()
-
7149  {
-
7150  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
-
7151  }
-
7152 
-
7153  void SortAllocationsByOffsetDescending()
-
7154  {
-
7155  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
-
7156  }
-
7157  };
-
7158 
-
7159  struct BlockPointerLess
-
7160  {
-
7161  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
-
7162  {
-
7163  return pLhsBlockInfo->m_pBlock < pRhsBlock;
+
7016  void UpdateHasEmptyBlock();
+
7017 };
+
7018 
+
7019 struct VmaPool_T
+
7020 {
+
7021  VMA_CLASS_NO_COPY(VmaPool_T)
+
7022 public:
+
7023  VmaBlockVector m_BlockVector;
+
7024 
+
7025  VmaPool_T(
+
7026  VmaAllocator hAllocator,
+
7027  const VmaPoolCreateInfo& createInfo,
+
7028  VkDeviceSize preferredBlockSize);
+
7029  ~VmaPool_T();
+
7030 
+
7031  uint32_t GetId() const { return m_Id; }
+
7032  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
+
7033 
+
7034  const char* GetName() const { return m_Name; }
+
7035  void SetName(const char* pName);
+
7036 
+
7037 #if VMA_STATS_STRING_ENABLED
+
7038  //void PrintDetailedMap(class VmaStringBuilder& sb);
+
7039 #endif
+
7040 
+
7041 private:
+
7042  uint32_t m_Id;
+
7043  char* m_Name;
+
7044 };
+
7045 
+
7046 /*
+
7047 Performs defragmentation:
+
7048 
+
7049 - Updates `pBlockVector->m_pMetadata`.
+
7050 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
+
7051 - Does not move actual data, only returns requested moves as `moves`.
+
7052 */
+
7053 class VmaDefragmentationAlgorithm
+
7054 {
+
7055  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
+
7056 public:
+
7057  VmaDefragmentationAlgorithm(
+
7058  VmaAllocator hAllocator,
+
7059  VmaBlockVector* pBlockVector,
+
7060  uint32_t currentFrameIndex) :
+
7061  m_hAllocator(hAllocator),
+
7062  m_pBlockVector(pBlockVector),
+
7063  m_CurrentFrameIndex(currentFrameIndex)
+
7064  {
+
7065  }
+
7066  virtual ~VmaDefragmentationAlgorithm()
+
7067  {
+
7068  }
+
7069 
+
7070  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
+
7071  virtual void AddAll() = 0;
+
7072 
+
7073  virtual VkResult Defragment(
+
7074  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
7075  VkDeviceSize maxBytesToMove,
+
7076  uint32_t maxAllocationsToMove,
+
7077  VmaDefragmentationFlags flags) = 0;
+
7078 
+
7079  virtual VkDeviceSize GetBytesMoved() const = 0;
+
7080  virtual uint32_t GetAllocationsMoved() const = 0;
+
7081 
+
7082 protected:
+
7083  VmaAllocator const m_hAllocator;
+
7084  VmaBlockVector* const m_pBlockVector;
+
7085  const uint32_t m_CurrentFrameIndex;
+
7086 
+
7087  struct AllocationInfo
+
7088  {
+
7089  VmaAllocation m_hAllocation;
+
7090  VkBool32* m_pChanged;
+
7091 
+
7092  AllocationInfo() :
+
7093  m_hAllocation(VK_NULL_HANDLE),
+
7094  m_pChanged(VMA_NULL)
+
7095  {
+
7096  }
+
7097  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
+
7098  m_hAllocation(hAlloc),
+
7099  m_pChanged(pChanged)
+
7100  {
+
7101  }
+
7102  };
+
7103 };
+
7104 
+
7105 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
+
7106 {
+
7107  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
+
7108 public:
+
7109  VmaDefragmentationAlgorithm_Generic(
+
7110  VmaAllocator hAllocator,
+
7111  VmaBlockVector* pBlockVector,
+
7112  uint32_t currentFrameIndex,
+
7113  bool overlappingMoveSupported);
+
7114  virtual ~VmaDefragmentationAlgorithm_Generic();
+
7115 
+
7116  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
+
7117  virtual void AddAll() { m_AllAllocations = true; }
+
7118 
+
7119  virtual VkResult Defragment(
+
7120  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
7121  VkDeviceSize maxBytesToMove,
+
7122  uint32_t maxAllocationsToMove,
+
7123  VmaDefragmentationFlags flags);
+
7124 
+
7125  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
+
7126  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
+
7127 
+
7128 private:
+
7129  uint32_t m_AllocationCount;
+
7130  bool m_AllAllocations;
+
7131 
+
7132  VkDeviceSize m_BytesMoved;
+
7133  uint32_t m_AllocationsMoved;
+
7134 
+
7135  struct AllocationInfoSizeGreater
+
7136  {
+
7137  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
+
7138  {
+
7139  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
+
7140  }
+
7141  };
+
7142 
+
7143  struct AllocationInfoOffsetGreater
+
7144  {
+
7145  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
+
7146  {
+
7147  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
+
7148  }
+
7149  };
+
7150 
+
7151  struct BlockInfo
+
7152  {
+
7153  size_t m_OriginalBlockIndex;
+
7154  VmaDeviceMemoryBlock* m_pBlock;
+
7155  bool m_HasNonMovableAllocations;
+
7156  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
+
7157 
+
7158  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
+
7159  m_OriginalBlockIndex(SIZE_MAX),
+
7160  m_pBlock(VMA_NULL),
+
7161  m_HasNonMovableAllocations(true),
+
7162  m_Allocations(pAllocationCallbacks)
+
7163  {
7164  }
-
7165  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
-
7166  {
-
7167  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
-
7168  }
-
7169  };
-
7170 
-
7171  // 1. Blocks with some non-movable allocations go first.
-
7172  // 2. Blocks with smaller sumFreeSize go first.
-
7173  struct BlockInfoCompareMoveDestination
-
7174  {
-
7175  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
-
7176  {
-
7177  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
-
7178  {
-
7179  return true;
-
7180  }
-
7181  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
-
7182  {
-
7183  return false;
-
7184  }
-
7185  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
-
7186  {
-
7187  return true;
-
7188  }
-
7189  return false;
-
7190  }
-
7191  };
-
7192 
-
7193  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
-
7194  BlockInfoVector m_Blocks;
+
7165 
+
7166  void CalcHasNonMovableAllocations()
+
7167  {
+
7168  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
+
7169  const size_t defragmentAllocCount = m_Allocations.size();
+
7170  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
+
7171  }
+
7172 
+
7173  void SortAllocationsBySizeDescending()
+
7174  {
+
7175  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
+
7176  }
+
7177 
+
7178  void SortAllocationsByOffsetDescending()
+
7179  {
+
7180  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
+
7181  }
+
7182  };
+
7183 
+
7184  struct BlockPointerLess
+
7185  {
+
7186  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
+
7187  {
+
7188  return pLhsBlockInfo->m_pBlock < pRhsBlock;
+
7189  }
+
7190  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
+
7191  {
+
7192  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
+
7193  }
+
7194  };
7195 
-
7196  VkResult DefragmentRound(
-
7197  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
7198  VkDeviceSize maxBytesToMove,
-
7199  uint32_t maxAllocationsToMove,
-
7200  bool freeOldAllocations);
-
7201 
-
7202  size_t CalcBlocksWithNonMovableCount() const;
-
7203 
-
7204  static bool MoveMakesSense(
-
7205  size_t dstBlockIndex, VkDeviceSize dstOffset,
-
7206  size_t srcBlockIndex, VkDeviceSize srcOffset);
-
7207 };
-
7208 
-
7209 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
-
7210 {
-
7211  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
-
7212 public:
-
7213  VmaDefragmentationAlgorithm_Fast(
-
7214  VmaAllocator hAllocator,
-
7215  VmaBlockVector* pBlockVector,
-
7216  uint32_t currentFrameIndex,
-
7217  bool overlappingMoveSupported);
-
7218  virtual ~VmaDefragmentationAlgorithm_Fast();
-
7219 
-
7220  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
-
7221  virtual void AddAll() { m_AllAllocations = true; }
-
7222 
-
7223  virtual VkResult Defragment(
-
7224  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
7225  VkDeviceSize maxBytesToMove,
-
7226  uint32_t maxAllocationsToMove,
-
7227  VmaDefragmentationFlags flags);
+
7196  // 1. Blocks with some non-movable allocations go first.
+
7197  // 2. Blocks with smaller sumFreeSize go first.
+
7198  struct BlockInfoCompareMoveDestination
+
7199  {
+
7200  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
+
7201  {
+
7202  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
+
7203  {
+
7204  return true;
+
7205  }
+
7206  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
+
7207  {
+
7208  return false;
+
7209  }
+
7210  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
+
7211  {
+
7212  return true;
+
7213  }
+
7214  return false;
+
7215  }
+
7216  };
+
7217 
+
7218  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
+
7219  BlockInfoVector m_Blocks;
+
7220 
+
7221  VkResult DefragmentRound(
+
7222  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
7223  VkDeviceSize maxBytesToMove,
+
7224  uint32_t maxAllocationsToMove,
+
7225  bool freeOldAllocations);
+
7226 
+
7227  size_t CalcBlocksWithNonMovableCount() const;
7228 
-
7229  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
-
7230  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
-
7231 
-
7232 private:
-
7233  struct BlockInfo
-
7234  {
-
7235  size_t origBlockIndex;
-
7236  };
-
7237 
-
7238  class FreeSpaceDatabase
-
7239  {
-
7240  public:
-
7241  FreeSpaceDatabase()
-
7242  {
-
7243  FreeSpace s = {};
-
7244  s.blockInfoIndex = SIZE_MAX;
-
7245  for(size_t i = 0; i < MAX_COUNT; ++i)
-
7246  {
-
7247  m_FreeSpaces[i] = s;
-
7248  }
-
7249  }
-
7250 
-
7251  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
-
7252  {
-
7253  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
7254  {
-
7255  return;
-
7256  }
-
7257 
-
7258  // Find first invalid or the smallest structure.
-
7259  size_t bestIndex = SIZE_MAX;
-
7260  for(size_t i = 0; i < MAX_COUNT; ++i)
-
7261  {
-
7262  // Empty structure.
-
7263  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
-
7264  {
-
7265  bestIndex = i;
-
7266  break;
-
7267  }
-
7268  if(m_FreeSpaces[i].size < size &&
-
7269  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
-
7270  {
-
7271  bestIndex = i;
-
7272  }
+
7229  static bool MoveMakesSense(
+
7230  size_t dstBlockIndex, VkDeviceSize dstOffset,
+
7231  size_t srcBlockIndex, VkDeviceSize srcOffset);
+
7232 };
+
7233 
+
7234 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
+
7235 {
+
7236  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
+
7237 public:
+
7238  VmaDefragmentationAlgorithm_Fast(
+
7239  VmaAllocator hAllocator,
+
7240  VmaBlockVector* pBlockVector,
+
7241  uint32_t currentFrameIndex,
+
7242  bool overlappingMoveSupported);
+
7243  virtual ~VmaDefragmentationAlgorithm_Fast();
+
7244 
+
7245  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
+
7246  virtual void AddAll() { m_AllAllocations = true; }
+
7247 
+
7248  virtual VkResult Defragment(
+
7249  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
7250  VkDeviceSize maxBytesToMove,
+
7251  uint32_t maxAllocationsToMove,
+
7252  VmaDefragmentationFlags flags);
+
7253 
+
7254  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
+
7255  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
+
7256 
+
7257 private:
+
7258  struct BlockInfo
+
7259  {
+
7260  size_t origBlockIndex;
+
7261  };
+
7262 
+
7263  class FreeSpaceDatabase
+
7264  {
+
7265  public:
+
7266  FreeSpaceDatabase()
+
7267  {
+
7268  FreeSpace s = {};
+
7269  s.blockInfoIndex = SIZE_MAX;
+
7270  for(size_t i = 0; i < MAX_COUNT; ++i)
+
7271  {
+
7272  m_FreeSpaces[i] = s;
7273  }
-
7274 
-
7275  if(bestIndex != SIZE_MAX)
-
7276  {
-
7277  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
-
7278  m_FreeSpaces[bestIndex].offset = offset;
-
7279  m_FreeSpaces[bestIndex].size = size;
-
7280  }
-
7281  }
+
7274  }
+
7275 
+
7276  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
+
7277  {
+
7278  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
7279  {
+
7280  return;
+
7281  }
7282 
-
7283  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
-
7284  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
-
7285  {
-
7286  size_t bestIndex = SIZE_MAX;
-
7287  VkDeviceSize bestFreeSpaceAfter = 0;
-
7288  for(size_t i = 0; i < MAX_COUNT; ++i)
-
7289  {
-
7290  // Structure is valid.
-
7291  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
-
7292  {
-
7293  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
-
7294  // Allocation fits into this structure.
-
7295  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
-
7296  {
-
7297  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
-
7298  (dstOffset + size);
-
7299  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
-
7300  {
-
7301  bestIndex = i;
-
7302  bestFreeSpaceAfter = freeSpaceAfter;
-
7303  }
-
7304  }
-
7305  }
-
7306  }
-
7307 
-
7308  if(bestIndex != SIZE_MAX)
-
7309  {
-
7310  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
-
7311  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
-
7312 
-
7313  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
7314  {
-
7315  // Leave this structure for remaining empty space.
-
7316  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
-
7317  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
-
7318  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
-
7319  }
-
7320  else
-
7321  {
-
7322  // This structure becomes invalid.
-
7323  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
-
7324  }
-
7325 
-
7326  return true;
-
7327  }
-
7328 
-
7329  return false;
-
7330  }
-
7331 
-
7332  private:
-
7333  static const size_t MAX_COUNT = 4;
-
7334 
-
7335  struct FreeSpace
-
7336  {
-
7337  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
-
7338  VkDeviceSize offset;
-
7339  VkDeviceSize size;
-
7340  } m_FreeSpaces[MAX_COUNT];
-
7341  };
-
7342 
-
7343  const bool m_OverlappingMoveSupported;
-
7344 
-
7345  uint32_t m_AllocationCount;
-
7346  bool m_AllAllocations;
-
7347 
-
7348  VkDeviceSize m_BytesMoved;
-
7349  uint32_t m_AllocationsMoved;
+
7283  // Find first invalid or the smallest structure.
+
7284  size_t bestIndex = SIZE_MAX;
+
7285  for(size_t i = 0; i < MAX_COUNT; ++i)
+
7286  {
+
7287  // Empty structure.
+
7288  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
+
7289  {
+
7290  bestIndex = i;
+
7291  break;
+
7292  }
+
7293  if(m_FreeSpaces[i].size < size &&
+
7294  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
+
7295  {
+
7296  bestIndex = i;
+
7297  }
+
7298  }
+
7299 
+
7300  if(bestIndex != SIZE_MAX)
+
7301  {
+
7302  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
+
7303  m_FreeSpaces[bestIndex].offset = offset;
+
7304  m_FreeSpaces[bestIndex].size = size;
+
7305  }
+
7306  }
+
7307 
+
7308  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
+
7309  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
+
7310  {
+
7311  size_t bestIndex = SIZE_MAX;
+
7312  VkDeviceSize bestFreeSpaceAfter = 0;
+
7313  for(size_t i = 0; i < MAX_COUNT; ++i)
+
7314  {
+
7315  // Structure is valid.
+
7316  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
+
7317  {
+
7318  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
+
7319  // Allocation fits into this structure.
+
7320  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
+
7321  {
+
7322  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
+
7323  (dstOffset + size);
+
7324  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
+
7325  {
+
7326  bestIndex = i;
+
7327  bestFreeSpaceAfter = freeSpaceAfter;
+
7328  }
+
7329  }
+
7330  }
+
7331  }
+
7332 
+
7333  if(bestIndex != SIZE_MAX)
+
7334  {
+
7335  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
+
7336  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
+
7337 
+
7338  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
7339  {
+
7340  // Leave this structure for remaining empty space.
+
7341  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
+
7342  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
+
7343  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
+
7344  }
+
7345  else
+
7346  {
+
7347  // This structure becomes invalid.
+
7348  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
+
7349  }
7350 
-
7351  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
-
7352 
-
7353  void PreprocessMetadata();
-
7354  void PostprocessMetadata();
-
7355  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
-
7356 };
-
7357 
-
7358 struct VmaBlockDefragmentationContext
-
7359 {
-
7360  enum BLOCK_FLAG
-
7361  {
-
7362  BLOCK_FLAG_USED = 0x00000001,
-
7363  };
-
7364  uint32_t flags;
-
7365  VkBuffer hBuffer;
-
7366 };
+
7351  return true;
+
7352  }
+
7353 
+
7354  return false;
+
7355  }
+
7356 
+
7357  private:
+
7358  static const size_t MAX_COUNT = 4;
+
7359 
+
7360  struct FreeSpace
+
7361  {
+
7362  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
+
7363  VkDeviceSize offset;
+
7364  VkDeviceSize size;
+
7365  } m_FreeSpaces[MAX_COUNT];
+
7366  };
7367 
-
7368 class VmaBlockVectorDefragmentationContext
-
7369 {
-
7370  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
-
7371 public:
-
7372  VkResult res;
-
7373  bool mutexLocked;
-
7374  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
-
7375  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
-
7376  uint32_t defragmentationMovesProcessed;
-
7377  uint32_t defragmentationMovesCommitted;
-
7378  bool hasDefragmentationPlan;
-
7379 
-
7380  VmaBlockVectorDefragmentationContext(
-
7381  VmaAllocator hAllocator,
-
7382  VmaPool hCustomPool, // Optional.
-
7383  VmaBlockVector* pBlockVector,
-
7384  uint32_t currFrameIndex);
-
7385  ~VmaBlockVectorDefragmentationContext();
-
7386 
-
7387  VmaPool GetCustomPool() const { return m_hCustomPool; }
-
7388  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
-
7389  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
-
7390 
-
7391  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
-
7392  void AddAll() { m_AllAllocations = true; }
-
7393 
-
7394  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
-
7395 
-
7396 private:
-
7397  const VmaAllocator m_hAllocator;
-
7398  // Null if not from custom pool.
-
7399  const VmaPool m_hCustomPool;
-
7400  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
-
7401  VmaBlockVector* const m_pBlockVector;
-
7402  const uint32_t m_CurrFrameIndex;
-
7403  // Owner of this object.
-
7404  VmaDefragmentationAlgorithm* m_pAlgorithm;
-
7405 
-
7406  struct AllocInfo
-
7407  {
-
7408  VmaAllocation hAlloc;
-
7409  VkBool32* pChanged;
-
7410  };
-
7411  // Used between constructor and Begin.
-
7412  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
-
7413  bool m_AllAllocations;
-
7414 };
+
7368  const bool m_OverlappingMoveSupported;
+
7369 
+
7370  uint32_t m_AllocationCount;
+
7371  bool m_AllAllocations;
+
7372 
+
7373  VkDeviceSize m_BytesMoved;
+
7374  uint32_t m_AllocationsMoved;
+
7375 
+
7376  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
+
7377 
+
7378  void PreprocessMetadata();
+
7379  void PostprocessMetadata();
+
7380  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
+
7381 };
+
7382 
+
7383 struct VmaBlockDefragmentationContext
+
7384 {
+
7385  enum BLOCK_FLAG
+
7386  {
+
7387  BLOCK_FLAG_USED = 0x00000001,
+
7388  };
+
7389  uint32_t flags;
+
7390  VkBuffer hBuffer;
+
7391 };
+
7392 
+
7393 class VmaBlockVectorDefragmentationContext
+
7394 {
+
7395  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
+
7396 public:
+
7397  VkResult res;
+
7398  bool mutexLocked;
+
7399  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
+
7400  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
+
7401  uint32_t defragmentationMovesProcessed;
+
7402  uint32_t defragmentationMovesCommitted;
+
7403  bool hasDefragmentationPlan;
+
7404 
+
7405  VmaBlockVectorDefragmentationContext(
+
7406  VmaAllocator hAllocator,
+
7407  VmaPool hCustomPool, // Optional.
+
7408  VmaBlockVector* pBlockVector,
+
7409  uint32_t currFrameIndex);
+
7410  ~VmaBlockVectorDefragmentationContext();
+
7411 
+
7412  VmaPool GetCustomPool() const { return m_hCustomPool; }
+
7413  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
+
7414  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7415 
-
7416 struct VmaDefragmentationContext_T
-
7417 {
-
7418 private:
-
7419  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
-
7420 public:
-
7421  VmaDefragmentationContext_T(
-
7422  VmaAllocator hAllocator,
-
7423  uint32_t currFrameIndex,
-
7424  uint32_t flags,
-
7425  VmaDefragmentationStats* pStats);
-
7426  ~VmaDefragmentationContext_T();
-
7427 
-
7428  void AddPools(uint32_t poolCount, const VmaPool* pPools);
-
7429  void AddAllocations(
-
7430  uint32_t allocationCount,
-
7431  const VmaAllocation* pAllocations,
-
7432  VkBool32* pAllocationsChanged);
-
7433 
-
7434  /*
-
7435  Returns:
-
7436  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
-
7437  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
-
7438  - Negative value if error occured and object can be destroyed immediately.
-
7439  */
-
7440  VkResult Defragment(
-
7441  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
-
7442  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
-
7443  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
-
7444 
-
7445  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
-
7446  VkResult DefragmentPassEnd();
-
7447 
-
7448 private:
-
7449  const VmaAllocator m_hAllocator;
-
7450  const uint32_t m_CurrFrameIndex;
-
7451  const uint32_t m_Flags;
-
7452  VmaDefragmentationStats* const m_pStats;
-
7453 
-
7454  VkDeviceSize m_MaxCpuBytesToMove;
-
7455  uint32_t m_MaxCpuAllocationsToMove;
-
7456  VkDeviceSize m_MaxGpuBytesToMove;
-
7457  uint32_t m_MaxGpuAllocationsToMove;
+
7416  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
+
7417  void AddAll() { m_AllAllocations = true; }
+
7418 
+
7419  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
+
7420 
+
7421 private:
+
7422  const VmaAllocator m_hAllocator;
+
7423  // Null if not from custom pool.
+
7424  const VmaPool m_hCustomPool;
+
7425  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
+
7426  VmaBlockVector* const m_pBlockVector;
+
7427  const uint32_t m_CurrFrameIndex;
+
7428  // Owner of this object.
+
7429  VmaDefragmentationAlgorithm* m_pAlgorithm;
+
7430 
+
7431  struct AllocInfo
+
7432  {
+
7433  VmaAllocation hAlloc;
+
7434  VkBool32* pChanged;
+
7435  };
+
7436  // Used between constructor and Begin.
+
7437  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
+
7438  bool m_AllAllocations;
+
7439 };
+
7440 
+
7441 struct VmaDefragmentationContext_T
+
7442 {
+
7443 private:
+
7444  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
+
7445 public:
+
7446  VmaDefragmentationContext_T(
+
7447  VmaAllocator hAllocator,
+
7448  uint32_t currFrameIndex,
+
7449  uint32_t flags,
+
7450  VmaDefragmentationStats* pStats);
+
7451  ~VmaDefragmentationContext_T();
+
7452 
+
7453  void AddPools(uint32_t poolCount, const VmaPool* pPools);
+
7454  void AddAllocations(
+
7455  uint32_t allocationCount,
+
7456  const VmaAllocation* pAllocations,
+
7457  VkBool32* pAllocationsChanged);
7458 
-
7459  // Owner of these objects.
-
7460  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
-
7461  // Owner of these objects.
-
7462  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
-
7463 };
-
7464 
-
7465 #if VMA_RECORDING_ENABLED
-
7466 
-
7467 class VmaRecorder
-
7468 {
-
7469 public:
-
7470  VmaRecorder();
-
7471  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
-
7472  void WriteConfiguration(
-
7473  const VkPhysicalDeviceProperties& devProps,
-
7474  const VkPhysicalDeviceMemoryProperties& memProps,
-
7475  uint32_t vulkanApiVersion,
-
7476  bool dedicatedAllocationExtensionEnabled,
-
7477  bool bindMemory2ExtensionEnabled,
-
7478  bool memoryBudgetExtensionEnabled,
-
7479  bool deviceCoherentMemoryExtensionEnabled);
-
7480  ~VmaRecorder();
-
7481 
-
7482  void RecordCreateAllocator(uint32_t frameIndex);
-
7483  void RecordDestroyAllocator(uint32_t frameIndex);
-
7484  void RecordCreatePool(uint32_t frameIndex,
-
7485  const VmaPoolCreateInfo& createInfo,
-
7486  VmaPool pool);
-
7487  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
-
7488  void RecordAllocateMemory(uint32_t frameIndex,
-
7489  const VkMemoryRequirements& vkMemReq,
-
7490  const VmaAllocationCreateInfo& createInfo,
-
7491  VmaAllocation allocation);
-
7492  void RecordAllocateMemoryPages(uint32_t frameIndex,
-
7493  const VkMemoryRequirements& vkMemReq,
-
7494  const VmaAllocationCreateInfo& createInfo,
-
7495  uint64_t allocationCount,
-
7496  const VmaAllocation* pAllocations);
-
7497  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
-
7498  const VkMemoryRequirements& vkMemReq,
-
7499  bool requiresDedicatedAllocation,
-
7500  bool prefersDedicatedAllocation,
-
7501  const VmaAllocationCreateInfo& createInfo,
-
7502  VmaAllocation allocation);
-
7503  void RecordAllocateMemoryForImage(uint32_t frameIndex,
-
7504  const VkMemoryRequirements& vkMemReq,
-
7505  bool requiresDedicatedAllocation,
-
7506  bool prefersDedicatedAllocation,
-
7507  const VmaAllocationCreateInfo& createInfo,
-
7508  VmaAllocation allocation);
-
7509  void RecordFreeMemory(uint32_t frameIndex,
-
7510  VmaAllocation allocation);
-
7511  void RecordFreeMemoryPages(uint32_t frameIndex,
-
7512  uint64_t allocationCount,
-
7513  const VmaAllocation* pAllocations);
-
7514  void RecordSetAllocationUserData(uint32_t frameIndex,
-
7515  VmaAllocation allocation,
-
7516  const void* pUserData);
-
7517  void RecordCreateLostAllocation(uint32_t frameIndex,
-
7518  VmaAllocation allocation);
-
7519  void RecordMapMemory(uint32_t frameIndex,
-
7520  VmaAllocation allocation);
-
7521  void RecordUnmapMemory(uint32_t frameIndex,
-
7522  VmaAllocation allocation);
-
7523  void RecordFlushAllocation(uint32_t frameIndex,
-
7524  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
-
7525  void RecordInvalidateAllocation(uint32_t frameIndex,
-
7526  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
-
7527  void RecordCreateBuffer(uint32_t frameIndex,
-
7528  const VkBufferCreateInfo& bufCreateInfo,
-
7529  const VmaAllocationCreateInfo& allocCreateInfo,
-
7530  VmaAllocation allocation);
-
7531  void RecordCreateImage(uint32_t frameIndex,
-
7532  const VkImageCreateInfo& imageCreateInfo,
-
7533  const VmaAllocationCreateInfo& allocCreateInfo,
-
7534  VmaAllocation allocation);
-
7535  void RecordDestroyBuffer(uint32_t frameIndex,
-
7536  VmaAllocation allocation);
-
7537  void RecordDestroyImage(uint32_t frameIndex,
-
7538  VmaAllocation allocation);
-
7539  void RecordTouchAllocation(uint32_t frameIndex,
-
7540  VmaAllocation allocation);
-
7541  void RecordGetAllocationInfo(uint32_t frameIndex,
-
7542  VmaAllocation allocation);
-
7543  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
-
7544  VmaPool pool);
-
7545  void RecordDefragmentationBegin(uint32_t frameIndex,
-
7546  const VmaDefragmentationInfo2& info,
- -
7548  void RecordDefragmentationEnd(uint32_t frameIndex,
- -
7550  void RecordSetPoolName(uint32_t frameIndex,
-
7551  VmaPool pool,
-
7552  const char* name);
-
7553 
-
7554 private:
-
7555  struct CallParams
-
7556  {
-
7557  uint32_t threadId;
-
7558  double time;
-
7559  };
-
7560 
-
7561  class UserDataString
-
7562  {
-
7563  public:
-
7564  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
-
7565  const char* GetString() const { return m_Str; }
-
7566 
-
7567  private:
-
7568  char m_PtrStr[17];
-
7569  const char* m_Str;
-
7570  };
-
7571 
-
7572  bool m_UseMutex;
-
7573  VmaRecordFlags m_Flags;
-
7574  FILE* m_File;
-
7575  VMA_MUTEX m_FileMutex;
-
7576  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
-
7577 
-
7578  void GetBasicParams(CallParams& outParams);
-
7579 
-
7580  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
-
7581  template<typename T>
-
7582  void PrintPointerList(uint64_t count, const T* pItems)
-
7583  {
-
7584  if(count)
-
7585  {
-
7586  fprintf(m_File, "%p", pItems[0]);
-
7587  for(uint64_t i = 1; i < count; ++i)
-
7588  {
-
7589  fprintf(m_File, " %p", pItems[i]);
-
7590  }
-
7591  }
-
7592  }
-
7593 
-
7594  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
-
7595  void Flush();
-
7596 };
-
7597 
-
7598 #endif // #if VMA_RECORDING_ENABLED
-
7599 
-
7600 /*
-
7601 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
-
7602 */
-
7603 class VmaAllocationObjectAllocator
-
7604 {
-
7605  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
-
7606 public:
-
7607  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
-
7608 
-
7609  template<typename... Types> VmaAllocation Allocate(Types... args);
-
7610  void Free(VmaAllocation hAlloc);
-
7611 
-
7612 private:
-
7613  VMA_MUTEX m_Mutex;
-
7614  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
-
7615 };
-
7616 
-
7617 struct VmaCurrentBudgetData
-
7618 {
-
7619  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
-
7620  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
-
7621 
-
7622 #if VMA_MEMORY_BUDGET
-
7623  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
-
7624  VMA_RW_MUTEX m_BudgetMutex;
-
7625  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
-
7626  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
-
7627  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
-
7628 #endif // #if VMA_MEMORY_BUDGET
-
7629 
-
7630  VmaCurrentBudgetData()
-
7631  {
-
7632  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
-
7633  {
-
7634  m_BlockBytes[heapIndex] = 0;
-
7635  m_AllocationBytes[heapIndex] = 0;
-
7636 #if VMA_MEMORY_BUDGET
-
7637  m_VulkanUsage[heapIndex] = 0;
-
7638  m_VulkanBudget[heapIndex] = 0;
-
7639  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
-
7640 #endif
-
7641  }
-
7642 
-
7643 #if VMA_MEMORY_BUDGET
-
7644  m_OperationsSinceBudgetFetch = 0;
-
7645 #endif
-
7646  }
-
7647 
-
7648  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-
7649  {
-
7650  m_AllocationBytes[heapIndex] += allocationSize;
-
7651 #if VMA_MEMORY_BUDGET
-
7652  ++m_OperationsSinceBudgetFetch;
-
7653 #endif
-
7654  }
-
7655 
-
7656  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-
7657  {
-
7658  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
-
7659  m_AllocationBytes[heapIndex] -= allocationSize;
-
7660 #if VMA_MEMORY_BUDGET
-
7661  ++m_OperationsSinceBudgetFetch;
-
7662 #endif
-
7663  }
-
7664 };
-
7665 
-
7666 // Main allocator object.
-
7667 struct VmaAllocator_T
-
7668 {
-
7669  VMA_CLASS_NO_COPY(VmaAllocator_T)
-
7670 public:
-
7671  bool m_UseMutex;
-
7672  uint32_t m_VulkanApiVersion;
-
7673  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-
7674  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-
7675  bool m_UseExtMemoryBudget;
-
7676  bool m_UseAmdDeviceCoherentMemory;
-
7677  bool m_UseKhrBufferDeviceAddress;
-
7678  VkDevice m_hDevice;
-
7679  VkInstance m_hInstance;
-
7680  bool m_AllocationCallbacksSpecified;
-
7681  VkAllocationCallbacks m_AllocationCallbacks;
-
7682  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
-
7683  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
-
7684 
-
7685  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
-
7686  uint32_t m_HeapSizeLimitMask;
-
7687 
-
7688  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
-
7689  VkPhysicalDeviceMemoryProperties m_MemProps;
+
7459  /*
+
7460  Returns:
+
7461  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
+
7462  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
+
7463  - Negative value if error occured and object can be destroyed immediately.
+
7464  */
+
7465  VkResult Defragment(
+
7466  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+
7467  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+
7468  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
+
7469 
+
7470  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
+
7471  VkResult DefragmentPassEnd();
+
7472 
+
7473 private:
+
7474  const VmaAllocator m_hAllocator;
+
7475  const uint32_t m_CurrFrameIndex;
+
7476  const uint32_t m_Flags;
+
7477  VmaDefragmentationStats* const m_pStats;
+
7478 
+
7479  VkDeviceSize m_MaxCpuBytesToMove;
+
7480  uint32_t m_MaxCpuAllocationsToMove;
+
7481  VkDeviceSize m_MaxGpuBytesToMove;
+
7482  uint32_t m_MaxGpuAllocationsToMove;
+
7483 
+
7484  // Owner of these objects.
+
7485  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
+
7486  // Owner of these objects.
+
7487  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
+
7488 };
+
7489 
+
7490 #if VMA_RECORDING_ENABLED
+
7491 
+
7492 class VmaRecorder
+
7493 {
+
7494 public:
+
7495  VmaRecorder();
+
7496  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
+
7497  void WriteConfiguration(
+
7498  const VkPhysicalDeviceProperties& devProps,
+
7499  const VkPhysicalDeviceMemoryProperties& memProps,
+
7500  uint32_t vulkanApiVersion,
+
7501  bool dedicatedAllocationExtensionEnabled,
+
7502  bool bindMemory2ExtensionEnabled,
+
7503  bool memoryBudgetExtensionEnabled,
+
7504  bool deviceCoherentMemoryExtensionEnabled);
+
7505  ~VmaRecorder();
+
7506 
+
7507  void RecordCreateAllocator(uint32_t frameIndex);
+
7508  void RecordDestroyAllocator(uint32_t frameIndex);
+
7509  void RecordCreatePool(uint32_t frameIndex,
+
7510  const VmaPoolCreateInfo& createInfo,
+
7511  VmaPool pool);
+
7512  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
+
7513  void RecordAllocateMemory(uint32_t frameIndex,
+
7514  const VkMemoryRequirements& vkMemReq,
+
7515  const VmaAllocationCreateInfo& createInfo,
+
7516  VmaAllocation allocation);
+
7517  void RecordAllocateMemoryPages(uint32_t frameIndex,
+
7518  const VkMemoryRequirements& vkMemReq,
+
7519  const VmaAllocationCreateInfo& createInfo,
+
7520  uint64_t allocationCount,
+
7521  const VmaAllocation* pAllocations);
+
7522  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+
7523  const VkMemoryRequirements& vkMemReq,
+
7524  bool requiresDedicatedAllocation,
+
7525  bool prefersDedicatedAllocation,
+
7526  const VmaAllocationCreateInfo& createInfo,
+
7527  VmaAllocation allocation);
+
7528  void RecordAllocateMemoryForImage(uint32_t frameIndex,
+
7529  const VkMemoryRequirements& vkMemReq,
+
7530  bool requiresDedicatedAllocation,
+
7531  bool prefersDedicatedAllocation,
+
7532  const VmaAllocationCreateInfo& createInfo,
+
7533  VmaAllocation allocation);
+
7534  void RecordFreeMemory(uint32_t frameIndex,
+
7535  VmaAllocation allocation);
+
7536  void RecordFreeMemoryPages(uint32_t frameIndex,
+
7537  uint64_t allocationCount,
+
7538  const VmaAllocation* pAllocations);
+
7539  void RecordSetAllocationUserData(uint32_t frameIndex,
+
7540  VmaAllocation allocation,
+
7541  const void* pUserData);
+
7542  void RecordCreateLostAllocation(uint32_t frameIndex,
+
7543  VmaAllocation allocation);
+
7544  void RecordMapMemory(uint32_t frameIndex,
+
7545  VmaAllocation allocation);
+
7546  void RecordUnmapMemory(uint32_t frameIndex,
+
7547  VmaAllocation allocation);
+
7548  void RecordFlushAllocation(uint32_t frameIndex,
+
7549  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+
7550  void RecordInvalidateAllocation(uint32_t frameIndex,
+
7551  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+
7552  void RecordCreateBuffer(uint32_t frameIndex,
+
7553  const VkBufferCreateInfo& bufCreateInfo,
+
7554  const VmaAllocationCreateInfo& allocCreateInfo,
+
7555  VmaAllocation allocation);
+
7556  void RecordCreateImage(uint32_t frameIndex,
+
7557  const VkImageCreateInfo& imageCreateInfo,
+
7558  const VmaAllocationCreateInfo& allocCreateInfo,
+
7559  VmaAllocation allocation);
+
7560  void RecordDestroyBuffer(uint32_t frameIndex,
+
7561  VmaAllocation allocation);
+
7562  void RecordDestroyImage(uint32_t frameIndex,
+
7563  VmaAllocation allocation);
+
7564  void RecordTouchAllocation(uint32_t frameIndex,
+
7565  VmaAllocation allocation);
+
7566  void RecordGetAllocationInfo(uint32_t frameIndex,
+
7567  VmaAllocation allocation);
+
7568  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
+
7569  VmaPool pool);
+
7570  void RecordDefragmentationBegin(uint32_t frameIndex,
+
7571  const VmaDefragmentationInfo2& info,
+ +
7573  void RecordDefragmentationEnd(uint32_t frameIndex,
+ +
7575  void RecordSetPoolName(uint32_t frameIndex,
+
7576  VmaPool pool,
+
7577  const char* name);
+
7578 
+
7579 private:
+
7580  struct CallParams
+
7581  {
+
7582  uint32_t threadId;
+
7583  double time;
+
7584  };
+
7585 
+
7586  class UserDataString
+
7587  {
+
7588  public:
+
7589  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
+
7590  const char* GetString() const { return m_Str; }
+
7591 
+
7592  private:
+
7593  char m_PtrStr[17];
+
7594  const char* m_Str;
+
7595  };
+
7596 
+
7597  bool m_UseMutex;
+
7598  VmaRecordFlags m_Flags;
+
7599  FILE* m_File;
+
7600  VMA_MUTEX m_FileMutex;
+
7601  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
+
7602 
+
7603  void GetBasicParams(CallParams& outParams);
+
7604 
+
7605  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
+
7606  template<typename T>
+
7607  void PrintPointerList(uint64_t count, const T* pItems)
+
7608  {
+
7609  if(count)
+
7610  {
+
7611  fprintf(m_File, "%p", pItems[0]);
+
7612  for(uint64_t i = 1; i < count; ++i)
+
7613  {
+
7614  fprintf(m_File, " %p", pItems[i]);
+
7615  }
+
7616  }
+
7617  }
+
7618 
+
7619  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
+
7620  void Flush();
+
7621 };
+
7622 
+
7623 #endif // #if VMA_RECORDING_ENABLED
+
7624 
+
7625 /*
+
7626 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
+
7627 */
+
7628 class VmaAllocationObjectAllocator
+
7629 {
+
7630  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
+
7631 public:
+
7632  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
+
7633 
+
7634  template<typename... Types> VmaAllocation Allocate(Types... args);
+
7635  void Free(VmaAllocation hAlloc);
+
7636 
+
7637 private:
+
7638  VMA_MUTEX m_Mutex;
+
7639  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
+
7640 };
+
7641 
+
7642 struct VmaCurrentBudgetData
+
7643 {
+
7644  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
+
7645  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
+
7646 
+
7647 #if VMA_MEMORY_BUDGET
+
7648  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
+
7649  VMA_RW_MUTEX m_BudgetMutex;
+
7650  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
+
7651  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
+
7652  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
+
7653 #endif // #if VMA_MEMORY_BUDGET
+
7654 
+
7655  VmaCurrentBudgetData()
+
7656  {
+
7657  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
+
7658  {
+
7659  m_BlockBytes[heapIndex] = 0;
+
7660  m_AllocationBytes[heapIndex] = 0;
+
7661 #if VMA_MEMORY_BUDGET
+
7662  m_VulkanUsage[heapIndex] = 0;
+
7663  m_VulkanBudget[heapIndex] = 0;
+
7664  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
+
7665 #endif
+
7666  }
+
7667 
+
7668 #if VMA_MEMORY_BUDGET
+
7669  m_OperationsSinceBudgetFetch = 0;
+
7670 #endif
+
7671  }
+
7672 
+
7673  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+
7674  {
+
7675  m_AllocationBytes[heapIndex] += allocationSize;
+
7676 #if VMA_MEMORY_BUDGET
+
7677  ++m_OperationsSinceBudgetFetch;
+
7678 #endif
+
7679  }
+
7680 
+
7681  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+
7682  {
+
7683  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
+
7684  m_AllocationBytes[heapIndex] -= allocationSize;
+
7685 #if VMA_MEMORY_BUDGET
+
7686  ++m_OperationsSinceBudgetFetch;
+
7687 #endif
+
7688  }
+
7689 };
7690 
-
7691  // Default pools.
-
7692  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
-
7693 
-
7694  // Each vector is sorted by memory (handle value).
-
7695  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
-
7696  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
-
7697  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
-
7698 
-
7699  VmaCurrentBudgetData m_Budget;
-
7700 
-
7701  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
-
7702  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
-
7703  ~VmaAllocator_T();
-
7704 
-
7705  const VkAllocationCallbacks* GetAllocationCallbacks() const
-
7706  {
-
7707  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
-
7708  }
-
7709  const VmaVulkanFunctions& GetVulkanFunctions() const
-
7710  {
-
7711  return m_VulkanFunctions;
-
7712  }
-
7713 
-
7714  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
+
7691 // Main allocator object.
+
7692 struct VmaAllocator_T
+
7693 {
+
7694  VMA_CLASS_NO_COPY(VmaAllocator_T)
+
7695 public:
+
7696  bool m_UseMutex;
+
7697  uint32_t m_VulkanApiVersion;
+
7698  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
+
7699  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
+
7700  bool m_UseExtMemoryBudget;
+
7701  bool m_UseAmdDeviceCoherentMemory;
+
7702  bool m_UseKhrBufferDeviceAddress;
+
7703  VkDevice m_hDevice;
+
7704  VkInstance m_hInstance;
+
7705  bool m_AllocationCallbacksSpecified;
+
7706  VkAllocationCallbacks m_AllocationCallbacks;
+
7707  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
+
7708  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
+
7709 
+
7710  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
+
7711  uint32_t m_HeapSizeLimitMask;
+
7712 
+
7713  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
+
7714  VkPhysicalDeviceMemoryProperties m_MemProps;
7715 
-
7716  VkDeviceSize GetBufferImageGranularity() const
-
7717  {
-
7718  return VMA_MAX(
-
7719  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
-
7720  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
-
7721  }
-
7722 
-
7723  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
-
7724  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
+
7716  // Default pools.
+
7717  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+
7718 
+
7719  // Each vector is sorted by memory (handle value).
+
7720  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
+
7721  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
+
7722  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
+
7723 
+
7724  VmaCurrentBudgetData m_Budget;
7725 
-
7726  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
-
7727  {
-
7728  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
-
7729  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-
7730  }
-
7731  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
-
7732  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
-
7733  {
-
7734  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
-
7735  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
7736  }
-
7737  // Minimum alignment for all allocations in specific memory type.
-
7738  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
-
7739  {
-
7740  return IsMemoryTypeNonCoherent(memTypeIndex) ?
-
7741  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
-
7742  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
-
7743  }
-
7744 
-
7745  bool IsIntegratedGpu() const
-
7746  {
-
7747  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
-
7748  }
-
7749 
-
7750  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
-
7751 
-
7752 #if VMA_RECORDING_ENABLED
-
7753  VmaRecorder* GetRecorder() const { return m_pRecorder; }
-
7754 #endif
-
7755 
-
7756  void GetBufferMemoryRequirements(
-
7757  VkBuffer hBuffer,
-
7758  VkMemoryRequirements& memReq,
-
7759  bool& requiresDedicatedAllocation,
-
7760  bool& prefersDedicatedAllocation) const;
-
7761  void GetImageMemoryRequirements(
-
7762  VkImage hImage,
-
7763  VkMemoryRequirements& memReq,
-
7764  bool& requiresDedicatedAllocation,
-
7765  bool& prefersDedicatedAllocation) const;
-
7766 
-
7767  // Main allocation function.
-
7768  VkResult AllocateMemory(
-
7769  const VkMemoryRequirements& vkMemReq,
-
7770  bool requiresDedicatedAllocation,
-
7771  bool prefersDedicatedAllocation,
-
7772  VkBuffer dedicatedBuffer,
-
7773  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
-
7774  VkImage dedicatedImage,
-
7775  const VmaAllocationCreateInfo& createInfo,
-
7776  VmaSuballocationType suballocType,
-
7777  size_t allocationCount,
-
7778  VmaAllocation* pAllocations);
-
7779 
-
7780  // Main deallocation function.
-
7781  void FreeMemory(
-
7782  size_t allocationCount,
-
7783  const VmaAllocation* pAllocations);
-
7784 
-
7785  VkResult ResizeAllocation(
-
7786  const VmaAllocation alloc,
-
7787  VkDeviceSize newSize);
-
7788 
-
7789  void CalculateStats(VmaStats* pStats);
-
7790 
-
7791  void GetBudget(
-
7792  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
-
7793 
-
7794 #if VMA_STATS_STRING_ENABLED
-
7795  void PrintDetailedMap(class VmaJsonWriter& json);
-
7796 #endif
-
7797 
-
7798  VkResult DefragmentationBegin(
-
7799  const VmaDefragmentationInfo2& info,
-
7800  VmaDefragmentationStats* pStats,
-
7801  VmaDefragmentationContext* pContext);
-
7802  VkResult DefragmentationEnd(
-
7803  VmaDefragmentationContext context);
+
7726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
+
7727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
+
7728  ~VmaAllocator_T();
+
7729 
+
7730  const VkAllocationCallbacks* GetAllocationCallbacks() const
+
7731  {
+
7732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
+
7733  }
+
7734  const VmaVulkanFunctions& GetVulkanFunctions() const
+
7735  {
+
7736  return m_VulkanFunctions;
+
7737  }
+
7738 
+
7739  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
+
7740 
+
7741  VkDeviceSize GetBufferImageGranularity() const
+
7742  {
+
7743  return VMA_MAX(
+
7744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
+
7745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
+
7746  }
+
7747 
+
7748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
+
7749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
+
7750 
+
7751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
+
7752  {
+
7753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
+
7754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
+
7755  }
+
7756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
+
7757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
+
7758  {
+
7759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
+
7760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
7761  }
+
7762  // Minimum alignment for all allocations in specific memory type.
+
7763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
+
7764  {
+
7765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
+
7766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
+
7767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
+
7768  }
+
7769 
+
7770  bool IsIntegratedGpu() const
+
7771  {
+
7772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+
7773  }
+
7774 
+
7775  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
+
7776 
+
7777 #if VMA_RECORDING_ENABLED
+
7778  VmaRecorder* GetRecorder() const { return m_pRecorder; }
+
7779 #endif
+
7780 
+
7781  void GetBufferMemoryRequirements(
+
7782  VkBuffer hBuffer,
+
7783  VkMemoryRequirements& memReq,
+
7784  bool& requiresDedicatedAllocation,
+
7785  bool& prefersDedicatedAllocation) const;
+
7786  void GetImageMemoryRequirements(
+
7787  VkImage hImage,
+
7788  VkMemoryRequirements& memReq,
+
7789  bool& requiresDedicatedAllocation,
+
7790  bool& prefersDedicatedAllocation) const;
+
7791 
+
7792  // Main allocation function.
+
7793  VkResult AllocateMemory(
+
7794  const VkMemoryRequirements& vkMemReq,
+
7795  bool requiresDedicatedAllocation,
+
7796  bool prefersDedicatedAllocation,
+
7797  VkBuffer dedicatedBuffer,
+
7798  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
+
7799  VkImage dedicatedImage,
+
7800  const VmaAllocationCreateInfo& createInfo,
+
7801  VmaSuballocationType suballocType,
+
7802  size_t allocationCount,
+
7803  VmaAllocation* pAllocations);
7804 
-
7805  VkResult DefragmentationPassBegin(
- -
7807  VmaDefragmentationContext context);
-
7808  VkResult DefragmentationPassEnd(
-
7809  VmaDefragmentationContext context);
-
7810 
-
7811  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
-
7812  bool TouchAllocation(VmaAllocation hAllocation);
+
7805  // Main deallocation function.
+
7806  void FreeMemory(
+
7807  size_t allocationCount,
+
7808  const VmaAllocation* pAllocations);
+
7809 
+
7810  VkResult ResizeAllocation(
+
7811  const VmaAllocation alloc,
+
7812  VkDeviceSize newSize);
7813 
-
7814  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
-
7815  void DestroyPool(VmaPool pool);
-
7816  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
-
7817 
-
7818  void SetCurrentFrameIndex(uint32_t frameIndex);
-
7819  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
-
7820 
-
7821  void MakePoolAllocationsLost(
-
7822  VmaPool hPool,
-
7823  size_t* pLostAllocationCount);
-
7824  VkResult CheckPoolCorruption(VmaPool hPool);
-
7825  VkResult CheckCorruption(uint32_t memoryTypeBits);
-
7826 
-
7827  void CreateLostAllocation(VmaAllocation* pAllocation);
-
7828 
-
7829  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
-
7830  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
-
7831  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
-
7832  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
-
7833  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
-
7834  VkResult BindVulkanBuffer(
-
7835  VkDeviceMemory memory,
-
7836  VkDeviceSize memoryOffset,
-
7837  VkBuffer buffer,
-
7838  const void* pNext);
-
7839  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
-
7840  VkResult BindVulkanImage(
-
7841  VkDeviceMemory memory,
-
7842  VkDeviceSize memoryOffset,
-
7843  VkImage image,
-
7844  const void* pNext);
+
7814  void CalculateStats(VmaStats* pStats);
+
7815 
+
7816  void GetBudget(
+
7817  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
+
7818 
+
7819 #if VMA_STATS_STRING_ENABLED
+
7820  void PrintDetailedMap(class VmaJsonWriter& json);
+
7821 #endif
+
7822 
+
7823  VkResult DefragmentationBegin(
+
7824  const VmaDefragmentationInfo2& info,
+
7825  VmaDefragmentationStats* pStats,
+
7826  VmaDefragmentationContext* pContext);
+
7827  VkResult DefragmentationEnd(
+
7828  VmaDefragmentationContext context);
+
7829 
+
7830  VkResult DefragmentationPassBegin(
+ +
7832  VmaDefragmentationContext context);
+
7833  VkResult DefragmentationPassEnd(
+
7834  VmaDefragmentationContext context);
+
7835 
+
7836  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
+
7837  bool TouchAllocation(VmaAllocation hAllocation);
+
7838 
+
7839  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
+
7840  void DestroyPool(VmaPool pool);
+
7841  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
+
7842 
+
7843  void SetCurrentFrameIndex(uint32_t frameIndex);
+
7844  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7845 
-
7846  VkResult Map(VmaAllocation hAllocation, void** ppData);
-
7847  void Unmap(VmaAllocation hAllocation);
-
7848 
-
7849  VkResult BindBufferMemory(
-
7850  VmaAllocation hAllocation,
-
7851  VkDeviceSize allocationLocalOffset,
-
7852  VkBuffer hBuffer,
-
7853  const void* pNext);
-
7854  VkResult BindImageMemory(
-
7855  VmaAllocation hAllocation,
-
7856  VkDeviceSize allocationLocalOffset,
-
7857  VkImage hImage,
-
7858  const void* pNext);
-
7859 
-
7860  VkResult FlushOrInvalidateAllocation(
-
7861  VmaAllocation hAllocation,
-
7862  VkDeviceSize offset, VkDeviceSize size,
-
7863  VMA_CACHE_OPERATION op);
-
7864  VkResult FlushOrInvalidateAllocations(
-
7865  uint32_t allocationCount,
-
7866  const VmaAllocation* allocations,
-
7867  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-
7868  VMA_CACHE_OPERATION op);
-
7869 
-
7870  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
-
7871 
-
7872  /*
-
7873  Returns bit mask of memory types that can support defragmentation on GPU as
-
7874  they support creation of required buffer for copy operations.
-
7875  */
-
7876  uint32_t GetGpuDefragmentationMemoryTypeBits();
-
7877 
-
7878 private:
-
7879  VkDeviceSize m_PreferredLargeHeapBlockSize;
-
7880 
-
7881  VkPhysicalDevice m_PhysicalDevice;
-
7882  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
-
7883  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
-
7884 
-
7885  VMA_RW_MUTEX m_PoolsMutex;
-
7886  // Protected by m_PoolsMutex. Sorted by pointer value.
-
7887  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
-
7888  uint32_t m_NextPoolId;
-
7889 
-
7890  VmaVulkanFunctions m_VulkanFunctions;
-
7891 
-
7892  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
-
7893  uint32_t m_GlobalMemoryTypeBits;
+
7846  void MakePoolAllocationsLost(
+
7847  VmaPool hPool,
+
7848  size_t* pLostAllocationCount);
+
7849  VkResult CheckPoolCorruption(VmaPool hPool);
+
7850  VkResult CheckCorruption(uint32_t memoryTypeBits);
+
7851 
+
7852  void CreateLostAllocation(VmaAllocation* pAllocation);
+
7853 
+
7854  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
+
7855  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
+
7856  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
+
7857  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
+
7858  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
+
7859  VkResult BindVulkanBuffer(
+
7860  VkDeviceMemory memory,
+
7861  VkDeviceSize memoryOffset,
+
7862  VkBuffer buffer,
+
7863  const void* pNext);
+
7864  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
+
7865  VkResult BindVulkanImage(
+
7866  VkDeviceMemory memory,
+
7867  VkDeviceSize memoryOffset,
+
7868  VkImage image,
+
7869  const void* pNext);
+
7870 
+
7871  VkResult Map(VmaAllocation hAllocation, void** ppData);
+
7872  void Unmap(VmaAllocation hAllocation);
+
7873 
+
7874  VkResult BindBufferMemory(
+
7875  VmaAllocation hAllocation,
+
7876  VkDeviceSize allocationLocalOffset,
+
7877  VkBuffer hBuffer,
+
7878  const void* pNext);
+
7879  VkResult BindImageMemory(
+
7880  VmaAllocation hAllocation,
+
7881  VkDeviceSize allocationLocalOffset,
+
7882  VkImage hImage,
+
7883  const void* pNext);
+
7884 
+
7885  VkResult FlushOrInvalidateAllocation(
+
7886  VmaAllocation hAllocation,
+
7887  VkDeviceSize offset, VkDeviceSize size,
+
7888  VMA_CACHE_OPERATION op);
+
7889  VkResult FlushOrInvalidateAllocations(
+
7890  uint32_t allocationCount,
+
7891  const VmaAllocation* allocations,
+
7892  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+
7893  VMA_CACHE_OPERATION op);
7894 
-
7895 #if VMA_RECORDING_ENABLED
-
7896  VmaRecorder* m_pRecorder;
-
7897 #endif
-
7898 
-
7899  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
-
7900 
-
7901 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
7902  void ImportVulkanFunctions_Static();
-
7903 #endif
-
7904 
-
7905  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
-
7906 
-
7907 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-
7908  void ImportVulkanFunctions_Dynamic();
-
7909 #endif
-
7910 
-
7911  void ValidateVulkanFunctions();
-
7912 
-
7913  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+
7895  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
+
7896 
+
7897  /*
+
7898  Returns bit mask of memory types that can support defragmentation on GPU as
+
7899  they support creation of required buffer for copy operations.
+
7900  */
+
7901  uint32_t GetGpuDefragmentationMemoryTypeBits();
+
7902 
+
7903 private:
+
7904  VkDeviceSize m_PreferredLargeHeapBlockSize;
+
7905 
+
7906  VkPhysicalDevice m_PhysicalDevice;
+
7907  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
+
7908  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
+
7909 
+
7910  VMA_RW_MUTEX m_PoolsMutex;
+
7911  // Protected by m_PoolsMutex. Sorted by pointer value.
+
7912  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
+
7913  uint32_t m_NextPoolId;
7914 
-
7915  VkResult AllocateMemoryOfType(
-
7916  VkDeviceSize size,
-
7917  VkDeviceSize alignment,
-
7918  bool dedicatedAllocation,
-
7919  VkBuffer dedicatedBuffer,
-
7920  VkBufferUsageFlags dedicatedBufferUsage,
-
7921  VkImage dedicatedImage,
-
7922  const VmaAllocationCreateInfo& createInfo,
-
7923  uint32_t memTypeIndex,
-
7924  VmaSuballocationType suballocType,
-
7925  size_t allocationCount,
-
7926  VmaAllocation* pAllocations);
-
7927 
-
7928  // Helper function only to be used inside AllocateDedicatedMemory.
-
7929  VkResult AllocateDedicatedMemoryPage(
-
7930  VkDeviceSize size,
-
7931  VmaSuballocationType suballocType,
-
7932  uint32_t memTypeIndex,
-
7933  const VkMemoryAllocateInfo& allocInfo,
-
7934  bool map,
-
7935  bool isUserDataString,
-
7936  void* pUserData,
-
7937  VmaAllocation* pAllocation);
-
7938 
-
7939  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
-
7940  VkResult AllocateDedicatedMemory(
+
7915  VmaVulkanFunctions m_VulkanFunctions;
+
7916 
+
7917  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
+
7918  uint32_t m_GlobalMemoryTypeBits;
+
7919 
+
7920 #if VMA_RECORDING_ENABLED
+
7921  VmaRecorder* m_pRecorder;
+
7922 #endif
+
7923 
+
7924  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
+
7925 
+
7926 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
7927  void ImportVulkanFunctions_Static();
+
7928 #endif
+
7929 
+
7930  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
+
7931 
+
7932 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+
7933  void ImportVulkanFunctions_Dynamic();
+
7934 #endif
+
7935 
+
7936  void ValidateVulkanFunctions();
+
7937 
+
7938  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+
7939 
+
7940  VkResult AllocateMemoryOfType(
7941  VkDeviceSize size,
-
7942  VmaSuballocationType suballocType,
-
7943  uint32_t memTypeIndex,
-
7944  bool withinBudget,
-
7945  bool map,
-
7946  bool isUserDataString,
-
7947  void* pUserData,
-
7948  VkBuffer dedicatedBuffer,
-
7949  VkBufferUsageFlags dedicatedBufferUsage,
-
7950  VkImage dedicatedImage,
-
7951  size_t allocationCount,
-
7952  VmaAllocation* pAllocations);
-
7953 
-
7954  void FreeDedicatedMemory(const VmaAllocation allocation);
-
7955 
-
7956  /*
-
7957  Calculates and returns bit mask of memory types that can support defragmentation
-
7958  on GPU as they support creation of required buffer for copy operations.
-
7959  */
-
7960  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
-
7961 
-
7962  uint32_t CalculateGlobalMemoryTypeBits() const;
+
7942  VkDeviceSize alignment,
+
7943  bool dedicatedAllocation,
+
7944  VkBuffer dedicatedBuffer,
+
7945  VkBufferUsageFlags dedicatedBufferUsage,
+
7946  VkImage dedicatedImage,
+
7947  const VmaAllocationCreateInfo& createInfo,
+
7948  uint32_t memTypeIndex,
+
7949  VmaSuballocationType suballocType,
+
7950  size_t allocationCount,
+
7951  VmaAllocation* pAllocations);
+
7952 
+
7953  // Helper function only to be used inside AllocateDedicatedMemory.
+
7954  VkResult AllocateDedicatedMemoryPage(
+
7955  VkDeviceSize size,
+
7956  VmaSuballocationType suballocType,
+
7957  uint32_t memTypeIndex,
+
7958  const VkMemoryAllocateInfo& allocInfo,
+
7959  bool map,
+
7960  bool isUserDataString,
+
7961  void* pUserData,
+
7962  VmaAllocation* pAllocation);
7963 
-
7964  bool GetFlushOrInvalidateRange(
-
7965  VmaAllocation allocation,
-
7966  VkDeviceSize offset, VkDeviceSize size,
-
7967  VkMappedMemoryRange& outRange) const;
-
7968 
-
7969 #if VMA_MEMORY_BUDGET
-
7970  void UpdateVulkanBudget();
-
7971 #endif // #if VMA_MEMORY_BUDGET
-
7972 };
-
7973 
-
7975 // Memory allocation #2 after VmaAllocator_T definition
-
7976 
-
7977 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
-
7978 {
-
7979  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
-
7980 }
-
7981 
-
7982 static void VmaFree(VmaAllocator hAllocator, void* ptr)
-
7983 {
-
7984  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
-
7985 }
+
7964  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
+
7965  VkResult AllocateDedicatedMemory(
+
7966  VkDeviceSize size,
+
7967  VmaSuballocationType suballocType,
+
7968  uint32_t memTypeIndex,
+
7969  bool withinBudget,
+
7970  bool map,
+
7971  bool isUserDataString,
+
7972  void* pUserData,
+
7973  VkBuffer dedicatedBuffer,
+
7974  VkBufferUsageFlags dedicatedBufferUsage,
+
7975  VkImage dedicatedImage,
+
7976  size_t allocationCount,
+
7977  VmaAllocation* pAllocations);
+
7978 
+
7979  void FreeDedicatedMemory(const VmaAllocation allocation);
+
7980 
+
7981  /*
+
7982  Calculates and returns bit mask of memory types that can support defragmentation
+
7983  on GPU as they support creation of required buffer for copy operations.
+
7984  */
+
7985  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7986 
-
7987 template<typename T>
-
7988 static T* VmaAllocate(VmaAllocator hAllocator)
-
7989 {
-
7990  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
-
7991 }
-
7992 
-
7993 template<typename T>
-
7994 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
-
7995 {
-
7996  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
-
7997 }
+
7987  uint32_t CalculateGlobalMemoryTypeBits() const;
+
7988 
+
7989  bool GetFlushOrInvalidateRange(
+
7990  VmaAllocation allocation,
+
7991  VkDeviceSize offset, VkDeviceSize size,
+
7992  VkMappedMemoryRange& outRange) const;
+
7993 
+
7994 #if VMA_MEMORY_BUDGET
+
7995  void UpdateVulkanBudget();
+
7996 #endif // #if VMA_MEMORY_BUDGET
+
7997 };
7998 
-
7999 template<typename T>
-
8000 static void vma_delete(VmaAllocator hAllocator, T* ptr)
-
8001 {
-
8002  if(ptr != VMA_NULL)
-
8003  {
-
8004  ptr->~T();
-
8005  VmaFree(hAllocator, ptr);
-
8006  }
-
8007 }
-
8008 
-
8009 template<typename T>
-
8010 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
-
8011 {
-
8012  if(ptr != VMA_NULL)
-
8013  {
-
8014  for(size_t i = count; i--; )
-
8015  ptr[i].~T();
-
8016  VmaFree(hAllocator, ptr);
-
8017  }
-
8018 }
-
8019 
-
8021 // VmaStringBuilder
-
8022 
-
8023 #if VMA_STATS_STRING_ENABLED
-
8024 
-
8025 class VmaStringBuilder
+
8000 // Memory allocation #2 after VmaAllocator_T definition
+
8001 
+
8002 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
+
8003 {
+
8004  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
+
8005 }
+
8006 
+
8007 static void VmaFree(VmaAllocator hAllocator, void* ptr)
+
8008 {
+
8009  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
+
8010 }
+
8011 
+
8012 template<typename T>
+
8013 static T* VmaAllocate(VmaAllocator hAllocator)
+
8014 {
+
8015  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
+
8016 }
+
8017 
+
8018 template<typename T>
+
8019 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
+
8020 {
+
8021  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
+
8022 }
+
8023 
+
8024 template<typename T>
+
8025 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8026 {
-
8027 public:
-
8028  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
-
8029  size_t GetLength() const { return m_Data.size(); }
-
8030  const char* GetData() const { return m_Data.data(); }
-
8031 
-
8032  void Add(char ch) { m_Data.push_back(ch); }
-
8033  void Add(const char* pStr);
-
8034  void AddNewLine() { Add('\n'); }
-
8035  void AddNumber(uint32_t num);
-
8036  void AddNumber(uint64_t num);
-
8037  void AddPointer(const void* ptr);
-
8038 
-
8039 private:
-
8040  VmaVector< char, VmaStlAllocator<char> > m_Data;
-
8041 };
-
8042 
-
8043 void VmaStringBuilder::Add(const char* pStr)
-
8044 {
-
8045  const size_t strLen = strlen(pStr);
-
8046  if(strLen > 0)
-
8047  {
-
8048  const size_t oldCount = m_Data.size();
-
8049  m_Data.resize(oldCount + strLen);
-
8050  memcpy(m_Data.data() + oldCount, pStr, strLen);
-
8051  }
-
8052 }
-
8053 
-
8054 void VmaStringBuilder::AddNumber(uint32_t num)
-
8055 {
-
8056  char buf[11];
-
8057  buf[10] = '\0';
-
8058  char *p = &buf[10];
-
8059  do
-
8060  {
-
8061  *--p = '0' + (num % 10);
-
8062  num /= 10;
-
8063  }
-
8064  while(num);
-
8065  Add(p);
-
8066 }
+
8027  if(ptr != VMA_NULL)
+
8028  {
+
8029  ptr->~T();
+
8030  VmaFree(hAllocator, ptr);
+
8031  }
+
8032 }
+
8033 
+
8034 template<typename T>
+
8035 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
+
8036 {
+
8037  if(ptr != VMA_NULL)
+
8038  {
+
8039  for(size_t i = count; i--; )
+
8040  ptr[i].~T();
+
8041  VmaFree(hAllocator, ptr);
+
8042  }
+
8043 }
+
8044 
+
8046 // VmaStringBuilder
+
8047 
+
8048 #if VMA_STATS_STRING_ENABLED
+
8049 
+
8050 class VmaStringBuilder
+
8051 {
+
8052 public:
+
8053  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
+
8054  size_t GetLength() const { return m_Data.size(); }
+
8055  const char* GetData() const { return m_Data.data(); }
+
8056 
+
8057  void Add(char ch) { m_Data.push_back(ch); }
+
8058  void Add(const char* pStr);
+
8059  void AddNewLine() { Add('\n'); }
+
8060  void AddNumber(uint32_t num);
+
8061  void AddNumber(uint64_t num);
+
8062  void AddPointer(const void* ptr);
+
8063 
+
8064 private:
+
8065  VmaVector< char, VmaStlAllocator<char> > m_Data;
+
8066 };
8067 
-
8068 void VmaStringBuilder::AddNumber(uint64_t num)
+
8068 void VmaStringBuilder::Add(const char* pStr)
8069 {
-
8070  char buf[21];
-
8071  buf[20] = '\0';
-
8072  char *p = &buf[20];
-
8073  do
-
8074  {
-
8075  *--p = '0' + (num % 10);
-
8076  num /= 10;
-
8077  }
-
8078  while(num);
-
8079  Add(p);
-
8080 }
-
8081 
-
8082 void VmaStringBuilder::AddPointer(const void* ptr)
-
8083 {
-
8084  char buf[21];
-
8085  VmaPtrToStr(buf, sizeof(buf), ptr);
-
8086  Add(buf);
-
8087 }
-
8088 
-
8089 #endif // #if VMA_STATS_STRING_ENABLED
-
8090 
-
8092 // VmaJsonWriter
-
8093 
-
8094 #if VMA_STATS_STRING_ENABLED
-
8095 
-
8096 class VmaJsonWriter
-
8097 {
-
8098  VMA_CLASS_NO_COPY(VmaJsonWriter)
-
8099 public:
-
8100  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
-
8101  ~VmaJsonWriter();
-
8102 
-
8103  void BeginObject(bool singleLine = false);
-
8104  void EndObject();
-
8105 
-
8106  void BeginArray(bool singleLine = false);
-
8107  void EndArray();
-
8108 
-
8109  void WriteString(const char* pStr);
-
8110  void BeginString(const char* pStr = VMA_NULL);
-
8111  void ContinueString(const char* pStr);
-
8112  void ContinueString(uint32_t n);
-
8113  void ContinueString(uint64_t n);
-
8114  void ContinueString_Pointer(const void* ptr);
-
8115  void EndString(const char* pStr = VMA_NULL);
-
8116 
-
8117  void WriteNumber(uint32_t n);
-
8118  void WriteNumber(uint64_t n);
-
8119  void WriteBool(bool b);
-
8120  void WriteNull();
-
8121 
-
8122 private:
-
8123  static const char* const INDENT;
-
8124 
-
8125  enum COLLECTION_TYPE
-
8126  {
-
8127  COLLECTION_TYPE_OBJECT,
-
8128  COLLECTION_TYPE_ARRAY,
-
8129  };
-
8130  struct StackItem
-
8131  {
-
8132  COLLECTION_TYPE type;
-
8133  uint32_t valueCount;
-
8134  bool singleLineMode;
-
8135  };
-
8136 
-
8137  VmaStringBuilder& m_SB;
-
8138  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
-
8139  bool m_InsideString;
-
8140 
-
8141  void BeginValue(bool isString);
-
8142  void WriteIndent(bool oneLess = false);
-
8143 };
-
8144 
-
8145 const char* const VmaJsonWriter::INDENT = " ";
+
8070  const size_t strLen = strlen(pStr);
+
8071  if(strLen > 0)
+
8072  {
+
8073  const size_t oldCount = m_Data.size();
+
8074  m_Data.resize(oldCount + strLen);
+
8075  memcpy(m_Data.data() + oldCount, pStr, strLen);
+
8076  }
+
8077 }
+
8078 
+
8079 void VmaStringBuilder::AddNumber(uint32_t num)
+
8080 {
+
8081  char buf[11];
+
8082  buf[10] = '\0';
+
8083  char *p = &buf[10];
+
8084  do
+
8085  {
+
8086  *--p = '0' + (num % 10);
+
8087  num /= 10;
+
8088  }
+
8089  while(num);
+
8090  Add(p);
+
8091 }
+
8092 
+
8093 void VmaStringBuilder::AddNumber(uint64_t num)
+
8094 {
+
8095  char buf[21];
+
8096  buf[20] = '\0';
+
8097  char *p = &buf[20];
+
8098  do
+
8099  {
+
8100  *--p = '0' + (num % 10);
+
8101  num /= 10;
+
8102  }
+
8103  while(num);
+
8104  Add(p);
+
8105 }
+
8106 
+
8107 void VmaStringBuilder::AddPointer(const void* ptr)
+
8108 {
+
8109  char buf[21];
+
8110  VmaPtrToStr(buf, sizeof(buf), ptr);
+
8111  Add(buf);
+
8112 }
+
8113 
+
8114 #endif // #if VMA_STATS_STRING_ENABLED
+
8115 
+
8117 // VmaJsonWriter
+
8118 
+
8119 #if VMA_STATS_STRING_ENABLED
+
8120 
+
8121 class VmaJsonWriter
+
8122 {
+
8123  VMA_CLASS_NO_COPY(VmaJsonWriter)
+
8124 public:
+
8125  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
+
8126  ~VmaJsonWriter();
+
8127 
+
8128  void BeginObject(bool singleLine = false);
+
8129  void EndObject();
+
8130 
+
8131  void BeginArray(bool singleLine = false);
+
8132  void EndArray();
+
8133 
+
8134  void WriteString(const char* pStr);
+
8135  void BeginString(const char* pStr = VMA_NULL);
+
8136  void ContinueString(const char* pStr);
+
8137  void ContinueString(uint32_t n);
+
8138  void ContinueString(uint64_t n);
+
8139  void ContinueString_Pointer(const void* ptr);
+
8140  void EndString(const char* pStr = VMA_NULL);
+
8141 
+
8142  void WriteNumber(uint32_t n);
+
8143  void WriteNumber(uint64_t n);
+
8144  void WriteBool(bool b);
+
8145  void WriteNull();
8146 
-
8147 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
-
8148  m_SB(sb),
-
8149  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
-
8150  m_InsideString(false)
-
8151 {
-
8152 }
-
8153 
-
8154 VmaJsonWriter::~VmaJsonWriter()
-
8155 {
-
8156  VMA_ASSERT(!m_InsideString);
-
8157  VMA_ASSERT(m_Stack.empty());
-
8158 }
-
8159 
-
8160 void VmaJsonWriter::BeginObject(bool singleLine)
-
8161 {
-
8162  VMA_ASSERT(!m_InsideString);
-
8163 
-
8164  BeginValue(false);
-
8165  m_SB.Add('{');
-
8166 
-
8167  StackItem item;
-
8168  item.type = COLLECTION_TYPE_OBJECT;
-
8169  item.valueCount = 0;
-
8170  item.singleLineMode = singleLine;
-
8171  m_Stack.push_back(item);
-
8172 }
-
8173 
-
8174 void VmaJsonWriter::EndObject()
-
8175 {
-
8176  VMA_ASSERT(!m_InsideString);
-
8177 
-
8178  WriteIndent(true);
-
8179  m_SB.Add('}');
-
8180 
-
8181  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
-
8182  m_Stack.pop_back();
+
8147 private:
+
8148  static const char* const INDENT;
+
8149 
+
8150  enum COLLECTION_TYPE
+
8151  {
+
8152  COLLECTION_TYPE_OBJECT,
+
8153  COLLECTION_TYPE_ARRAY,
+
8154  };
+
8155  struct StackItem
+
8156  {
+
8157  COLLECTION_TYPE type;
+
8158  uint32_t valueCount;
+
8159  bool singleLineMode;
+
8160  };
+
8161 
+
8162  VmaStringBuilder& m_SB;
+
8163  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
+
8164  bool m_InsideString;
+
8165 
+
8166  void BeginValue(bool isString);
+
8167  void WriteIndent(bool oneLess = false);
+
8168 };
+
8169 
+
8170 const char* const VmaJsonWriter::INDENT = " ";
+
8171 
+
8172 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
+
8173  m_SB(sb),
+
8174  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
+
8175  m_InsideString(false)
+
8176 {
+
8177 }
+
8178 
+
8179 VmaJsonWriter::~VmaJsonWriter()
+
8180 {
+
8181  VMA_ASSERT(!m_InsideString);
+
8182  VMA_ASSERT(m_Stack.empty());
8183 }
8184 
-
8185 void VmaJsonWriter::BeginArray(bool singleLine)
+
8185 void VmaJsonWriter::BeginObject(bool singleLine)
8186 {
8187  VMA_ASSERT(!m_InsideString);
8188 
8189  BeginValue(false);
-
8190  m_SB.Add('[');
+
8190  m_SB.Add('{');
8191 
8192  StackItem item;
-
8193  item.type = COLLECTION_TYPE_ARRAY;
+
8193  item.type = COLLECTION_TYPE_OBJECT;
8194  item.valueCount = 0;
8195  item.singleLineMode = singleLine;
8196  m_Stack.push_back(item);
8197 }
8198 
-
8199 void VmaJsonWriter::EndArray()
+
8199 void VmaJsonWriter::EndObject()
8200 {
8201  VMA_ASSERT(!m_InsideString);
8202 
8203  WriteIndent(true);
-
8204  m_SB.Add(']');
+
8204  m_SB.Add('}');
8205 
-
8206  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
+
8206  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8207  m_Stack.pop_back();
8208 }
8209 
-
8210 void VmaJsonWriter::WriteString(const char* pStr)
+
8210 void VmaJsonWriter::BeginArray(bool singleLine)
8211 {
-
8212  BeginString(pStr);
-
8213  EndString();
-
8214 }
-
8215 
-
8216 void VmaJsonWriter::BeginString(const char* pStr)
-
8217 {
-
8218  VMA_ASSERT(!m_InsideString);
-
8219 
-
8220  BeginValue(true);
-
8221  m_SB.Add('"');
-
8222  m_InsideString = true;
-
8223  if(pStr != VMA_NULL && pStr[0] != '\0')
-
8224  {
-
8225  ContinueString(pStr);
-
8226  }
-
8227 }
-
8228 
-
8229 void VmaJsonWriter::ContinueString(const char* pStr)
-
8230 {
-
8231  VMA_ASSERT(m_InsideString);
-
8232 
-
8233  const size_t strLen = strlen(pStr);
-
8234  for(size_t i = 0; i < strLen; ++i)
-
8235  {
-
8236  char ch = pStr[i];
-
8237  if(ch == '\\')
-
8238  {
-
8239  m_SB.Add("\\\\");
-
8240  }
-
8241  else if(ch == '"')
-
8242  {
-
8243  m_SB.Add("\\\"");
-
8244  }
-
8245  else if(ch >= 32)
-
8246  {
-
8247  m_SB.Add(ch);
-
8248  }
-
8249  else switch(ch)
-
8250  {
-
8251  case '\b':
-
8252  m_SB.Add("\\b");
-
8253  break;
-
8254  case '\f':
-
8255  m_SB.Add("\\f");
-
8256  break;
-
8257  case '\n':
-
8258  m_SB.Add("\\n");
-
8259  break;
-
8260  case '\r':
-
8261  m_SB.Add("\\r");
-
8262  break;
-
8263  case '\t':
-
8264  m_SB.Add("\\t");
-
8265  break;
-
8266  default:
-
8267  VMA_ASSERT(0 && "Character not currently supported.");
-
8268  break;
+
8212  VMA_ASSERT(!m_InsideString);
+
8213 
+
8214  BeginValue(false);
+
8215  m_SB.Add('[');
+
8216 
+
8217  StackItem item;
+
8218  item.type = COLLECTION_TYPE_ARRAY;
+
8219  item.valueCount = 0;
+
8220  item.singleLineMode = singleLine;
+
8221  m_Stack.push_back(item);
+
8222 }
+
8223 
+
8224 void VmaJsonWriter::EndArray()
+
8225 {
+
8226  VMA_ASSERT(!m_InsideString);
+
8227 
+
8228  WriteIndent(true);
+
8229  m_SB.Add(']');
+
8230 
+
8231  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
+
8232  m_Stack.pop_back();
+
8233 }
+
8234 
+
8235 void VmaJsonWriter::WriteString(const char* pStr)
+
8236 {
+
8237  BeginString(pStr);
+
8238  EndString();
+
8239 }
+
8240 
+
8241 void VmaJsonWriter::BeginString(const char* pStr)
+
8242 {
+
8243  VMA_ASSERT(!m_InsideString);
+
8244 
+
8245  BeginValue(true);
+
8246  m_SB.Add('"');
+
8247  m_InsideString = true;
+
8248  if(pStr != VMA_NULL && pStr[0] != '\0')
+
8249  {
+
8250  ContinueString(pStr);
+
8251  }
+
8252 }
+
8253 
+
8254 void VmaJsonWriter::ContinueString(const char* pStr)
+
8255 {
+
8256  VMA_ASSERT(m_InsideString);
+
8257 
+
8258  const size_t strLen = strlen(pStr);
+
8259  for(size_t i = 0; i < strLen; ++i)
+
8260  {
+
8261  char ch = pStr[i];
+
8262  if(ch == '\\')
+
8263  {
+
8264  m_SB.Add("\\\\");
+
8265  }
+
8266  else if(ch == '"')
+
8267  {
+
8268  m_SB.Add("\\\"");
8269  }
-
8270  }
-
8271 }
-
8272 
-
8273 void VmaJsonWriter::ContinueString(uint32_t n)
-
8274 {
-
8275  VMA_ASSERT(m_InsideString);
-
8276  m_SB.AddNumber(n);
-
8277 }
-
8278 
-
8279 void VmaJsonWriter::ContinueString(uint64_t n)
-
8280 {
-
8281  VMA_ASSERT(m_InsideString);
-
8282  m_SB.AddNumber(n);
-
8283 }
-
8284 
-
8285 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
-
8286 {
-
8287  VMA_ASSERT(m_InsideString);
-
8288  m_SB.AddPointer(ptr);
-
8289 }
-
8290 
-
8291 void VmaJsonWriter::EndString(const char* pStr)
-
8292 {
-
8293  VMA_ASSERT(m_InsideString);
-
8294  if(pStr != VMA_NULL && pStr[0] != '\0')
-
8295  {
-
8296  ContinueString(pStr);
-
8297  }
-
8298  m_SB.Add('"');
-
8299  m_InsideString = false;
-
8300 }
-
8301 
-
8302 void VmaJsonWriter::WriteNumber(uint32_t n)
-
8303 {
-
8304  VMA_ASSERT(!m_InsideString);
-
8305  BeginValue(false);
-
8306  m_SB.AddNumber(n);
-
8307 }
-
8308 
-
8309 void VmaJsonWriter::WriteNumber(uint64_t n)
-
8310 {
-
8311  VMA_ASSERT(!m_InsideString);
-
8312  BeginValue(false);
-
8313  m_SB.AddNumber(n);
+
8270  else if(ch >= 32)
+
8271  {
+
8272  m_SB.Add(ch);
+
8273  }
+
8274  else switch(ch)
+
8275  {
+
8276  case '\b':
+
8277  m_SB.Add("\\b");
+
8278  break;
+
8279  case '\f':
+
8280  m_SB.Add("\\f");
+
8281  break;
+
8282  case '\n':
+
8283  m_SB.Add("\\n");
+
8284  break;
+
8285  case '\r':
+
8286  m_SB.Add("\\r");
+
8287  break;
+
8288  case '\t':
+
8289  m_SB.Add("\\t");
+
8290  break;
+
8291  default:
+
8292  VMA_ASSERT(0 && "Character not currently supported.");
+
8293  break;
+
8294  }
+
8295  }
+
8296 }
+
8297 
+
8298 void VmaJsonWriter::ContinueString(uint32_t n)
+
8299 {
+
8300  VMA_ASSERT(m_InsideString);
+
8301  m_SB.AddNumber(n);
+
8302 }
+
8303 
+
8304 void VmaJsonWriter::ContinueString(uint64_t n)
+
8305 {
+
8306  VMA_ASSERT(m_InsideString);
+
8307  m_SB.AddNumber(n);
+
8308 }
+
8309 
+
8310 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
+
8311 {
+
8312  VMA_ASSERT(m_InsideString);
+
8313  m_SB.AddPointer(ptr);
8314 }
8315 
-
8316 void VmaJsonWriter::WriteBool(bool b)
+
8316 void VmaJsonWriter::EndString(const char* pStr)
8317 {
-
8318  VMA_ASSERT(!m_InsideString);
-
8319  BeginValue(false);
-
8320  m_SB.Add(b ? "true" : "false");
-
8321 }
-
8322 
-
8323 void VmaJsonWriter::WriteNull()
-
8324 {
-
8325  VMA_ASSERT(!m_InsideString);
-
8326  BeginValue(false);
-
8327  m_SB.Add("null");
-
8328 }
-
8329 
-
8330 void VmaJsonWriter::BeginValue(bool isString)
-
8331 {
-
8332  if(!m_Stack.empty())
-
8333  {
-
8334  StackItem& currItem = m_Stack.back();
-
8335  if(currItem.type == COLLECTION_TYPE_OBJECT &&
-
8336  currItem.valueCount % 2 == 0)
-
8337  {
-
8338  VMA_ASSERT(isString);
-
8339  }
+
8318  VMA_ASSERT(m_InsideString);
+
8319  if(pStr != VMA_NULL && pStr[0] != '\0')
+
8320  {
+
8321  ContinueString(pStr);
+
8322  }
+
8323  m_SB.Add('"');
+
8324  m_InsideString = false;
+
8325 }
+
8326 
+
8327 void VmaJsonWriter::WriteNumber(uint32_t n)
+
8328 {
+
8329  VMA_ASSERT(!m_InsideString);
+
8330  BeginValue(false);
+
8331  m_SB.AddNumber(n);
+
8332 }
+
8333 
+
8334 void VmaJsonWriter::WriteNumber(uint64_t n)
+
8335 {
+
8336  VMA_ASSERT(!m_InsideString);
+
8337  BeginValue(false);
+
8338  m_SB.AddNumber(n);
+
8339 }
8340 
-
8341  if(currItem.type == COLLECTION_TYPE_OBJECT &&
-
8342  currItem.valueCount % 2 != 0)
-
8343  {
-
8344  m_SB.Add(": ");
-
8345  }
-
8346  else if(currItem.valueCount > 0)
-
8347  {
-
8348  m_SB.Add(", ");
-
8349  WriteIndent();
-
8350  }
-
8351  else
-
8352  {
-
8353  WriteIndent();
-
8354  }
-
8355  ++currItem.valueCount;
-
8356  }
-
8357 }
-
8358 
-
8359 void VmaJsonWriter::WriteIndent(bool oneLess)
-
8360 {
-
8361  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
-
8362  {
-
8363  m_SB.AddNewLine();
-
8364 
-
8365  size_t count = m_Stack.size();
-
8366  if(count > 0 && oneLess)
-
8367  {
-
8368  --count;
-
8369  }
-
8370  for(size_t i = 0; i < count; ++i)
-
8371  {
-
8372  m_SB.Add(INDENT);
-
8373  }
-
8374  }
-
8375 }
-
8376 
-
8377 #endif // #if VMA_STATS_STRING_ENABLED
-
8378 
-
8380 
-
8381 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
-
8382 {
-
8383  if(IsUserDataString())
-
8384  {
-
8385  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
-
8386 
-
8387  FreeUserDataString(hAllocator);
-
8388 
-
8389  if(pUserData != VMA_NULL)
-
8390  {
-
8391  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
-
8392  }
-
8393  }
-
8394  else
-
8395  {
-
8396  m_pUserData = pUserData;
-
8397  }
-
8398 }
-
8399 
-
8400 void VmaAllocation_T::ChangeBlockAllocation(
-
8401  VmaAllocator hAllocator,
-
8402  VmaDeviceMemoryBlock* block,
-
8403  VkDeviceSize offset)
-
8404 {
-
8405  VMA_ASSERT(block != VMA_NULL);
-
8406  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-
8407 
-
8408  // Move mapping reference counter from old block to new block.
-
8409  if(block != m_BlockAllocation.m_Block)
-
8410  {
-
8411  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
-
8412  if(IsPersistentMap())
-
8413  ++mapRefCount;
-
8414  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
-
8415  block->Map(hAllocator, mapRefCount, VMA_NULL);
-
8416  }
-
8417 
-
8418  m_BlockAllocation.m_Block = block;
-
8419  m_BlockAllocation.m_Offset = offset;
-
8420 }
-
8421 
-
8422 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
-
8423 {
-
8424  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-
8425  m_BlockAllocation.m_Offset = newOffset;
-
8426 }
-
8427 
-
8428 VkDeviceSize VmaAllocation_T::GetOffset() const
-
8429 {
-
8430  switch(m_Type)
-
8431  {
-
8432  case ALLOCATION_TYPE_BLOCK:
-
8433  return m_BlockAllocation.m_Offset;
-
8434  case ALLOCATION_TYPE_DEDICATED:
-
8435  return 0;
-
8436  default:
-
8437  VMA_ASSERT(0);
-
8438  return 0;
-
8439  }
-
8440 }
-
8441 
-
8442 VkDeviceMemory VmaAllocation_T::GetMemory() const
-
8443 {
-
8444  switch(m_Type)
-
8445  {
-
8446  case ALLOCATION_TYPE_BLOCK:
-
8447  return m_BlockAllocation.m_Block->GetDeviceMemory();
-
8448  case ALLOCATION_TYPE_DEDICATED:
-
8449  return m_DedicatedAllocation.m_hMemory;
-
8450  default:
-
8451  VMA_ASSERT(0);
-
8452  return VK_NULL_HANDLE;
-
8453  }
-
8454 }
-
8455 
-
8456 void* VmaAllocation_T::GetMappedData() const
-
8457 {
-
8458  switch(m_Type)
-
8459  {
-
8460  case ALLOCATION_TYPE_BLOCK:
-
8461  if(m_MapCount != 0)
-
8462  {
-
8463  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
-
8464  VMA_ASSERT(pBlockData != VMA_NULL);
-
8465  return (char*)pBlockData + m_BlockAllocation.m_Offset;
-
8466  }
-
8467  else
-
8468  {
-
8469  return VMA_NULL;
-
8470  }
-
8471  break;
-
8472  case ALLOCATION_TYPE_DEDICATED:
-
8473  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
-
8474  return m_DedicatedAllocation.m_pMappedData;
+
8341 void VmaJsonWriter::WriteBool(bool b)
+
8342 {
+
8343  VMA_ASSERT(!m_InsideString);
+
8344  BeginValue(false);
+
8345  m_SB.Add(b ? "true" : "false");
+
8346 }
+
8347 
+
8348 void VmaJsonWriter::WriteNull()
+
8349 {
+
8350  VMA_ASSERT(!m_InsideString);
+
8351  BeginValue(false);
+
8352  m_SB.Add("null");
+
8353 }
+
8354 
+
8355 void VmaJsonWriter::BeginValue(bool isString)
+
8356 {
+
8357  if(!m_Stack.empty())
+
8358  {
+
8359  StackItem& currItem = m_Stack.back();
+
8360  if(currItem.type == COLLECTION_TYPE_OBJECT &&
+
8361  currItem.valueCount % 2 == 0)
+
8362  {
+
8363  VMA_ASSERT(isString);
+
8364  }
+
8365 
+
8366  if(currItem.type == COLLECTION_TYPE_OBJECT &&
+
8367  currItem.valueCount % 2 != 0)
+
8368  {
+
8369  m_SB.Add(": ");
+
8370  }
+
8371  else if(currItem.valueCount > 0)
+
8372  {
+
8373  m_SB.Add(", ");
+
8374  WriteIndent();
+
8375  }
+
8376  else
+
8377  {
+
8378  WriteIndent();
+
8379  }
+
8380  ++currItem.valueCount;
+
8381  }
+
8382 }
+
8383 
+
8384 void VmaJsonWriter::WriteIndent(bool oneLess)
+
8385 {
+
8386  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
+
8387  {
+
8388  m_SB.AddNewLine();
+
8389 
+
8390  size_t count = m_Stack.size();
+
8391  if(count > 0 && oneLess)
+
8392  {
+
8393  --count;
+
8394  }
+
8395  for(size_t i = 0; i < count; ++i)
+
8396  {
+
8397  m_SB.Add(INDENT);
+
8398  }
+
8399  }
+
8400 }
+
8401 
+
8402 #endif // #if VMA_STATS_STRING_ENABLED
+
8403 
+
8405 
+
8406 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
+
8407 {
+
8408  if(IsUserDataString())
+
8409  {
+
8410  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
+
8411 
+
8412  FreeUserDataString(hAllocator);
+
8413 
+
8414  if(pUserData != VMA_NULL)
+
8415  {
+
8416  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
+
8417  }
+
8418  }
+
8419  else
+
8420  {
+
8421  m_pUserData = pUserData;
+
8422  }
+
8423 }
+
8424 
+
8425 void VmaAllocation_T::ChangeBlockAllocation(
+
8426  VmaAllocator hAllocator,
+
8427  VmaDeviceMemoryBlock* block,
+
8428  VkDeviceSize offset)
+
8429 {
+
8430  VMA_ASSERT(block != VMA_NULL);
+
8431  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+
8432 
+
8433  // Move mapping reference counter from old block to new block.
+
8434  if(block != m_BlockAllocation.m_Block)
+
8435  {
+
8436  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
+
8437  if(IsPersistentMap())
+
8438  ++mapRefCount;
+
8439  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
+
8440  block->Map(hAllocator, mapRefCount, VMA_NULL);
+
8441  }
+
8442 
+
8443  m_BlockAllocation.m_Block = block;
+
8444  m_BlockAllocation.m_Offset = offset;
+
8445 }
+
8446 
+
8447 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
+
8448 {
+
8449  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+
8450  m_BlockAllocation.m_Offset = newOffset;
+
8451 }
+
8452 
+
8453 VkDeviceSize VmaAllocation_T::GetOffset() const
+
8454 {
+
8455  switch(m_Type)
+
8456  {
+
8457  case ALLOCATION_TYPE_BLOCK:
+
8458  return m_BlockAllocation.m_Offset;
+
8459  case ALLOCATION_TYPE_DEDICATED:
+
8460  return 0;
+
8461  default:
+
8462  VMA_ASSERT(0);
+
8463  return 0;
+
8464  }
+
8465 }
+
8466 
+
8467 VkDeviceMemory VmaAllocation_T::GetMemory() const
+
8468 {
+
8469  switch(m_Type)
+
8470  {
+
8471  case ALLOCATION_TYPE_BLOCK:
+
8472  return m_BlockAllocation.m_Block->GetDeviceMemory();
+
8473  case ALLOCATION_TYPE_DEDICATED:
+
8474  return m_DedicatedAllocation.m_hMemory;
8475  default:
8476  VMA_ASSERT(0);
-
8477  return VMA_NULL;
+
8477  return VK_NULL_HANDLE;
8478  }
8479 }
8480 
-
8481 bool VmaAllocation_T::CanBecomeLost() const
+
8481 void* VmaAllocation_T::GetMappedData() const
8482 {
8483  switch(m_Type)
8484  {
8485  case ALLOCATION_TYPE_BLOCK:
-
8486  return m_BlockAllocation.m_CanBecomeLost;
-
8487  case ALLOCATION_TYPE_DEDICATED:
-
8488  return false;
-
8489  default:
-
8490  VMA_ASSERT(0);
-
8491  return false;
-
8492  }
-
8493 }
-
8494 
-
8495 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
8496 {
-
8497  VMA_ASSERT(CanBecomeLost());
-
8498 
-
8499  /*
-
8500  Warning: This is a carefully designed algorithm.
-
8501  Do not modify unless you really know what you're doing :)
-
8502  */
-
8503  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
-
8504  for(;;)
-
8505  {
-
8506  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
8507  {
-
8508  VMA_ASSERT(0);
-
8509  return false;
-
8510  }
-
8511  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
-
8512  {
-
8513  return false;
-
8514  }
-
8515  else // Last use time earlier than current time.
-
8516  {
-
8517  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
-
8518  {
-
8519  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
-
8520  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
-
8521  return true;
-
8522  }
-
8523  }
-
8524  }
-
8525 }
-
8526 
-
8527 #if VMA_STATS_STRING_ENABLED
-
8528 
-
8529 // Correspond to values of enum VmaSuballocationType.
-
8530 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
-
8531  "FREE",
-
8532  "UNKNOWN",
-
8533  "BUFFER",
-
8534  "IMAGE_UNKNOWN",
-
8535  "IMAGE_LINEAR",
-
8536  "IMAGE_OPTIMAL",
-
8537 };
-
8538 
-
8539 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
-
8540 {
-
8541  json.WriteString("Type");
-
8542  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
-
8543 
-
8544  json.WriteString("Size");
-
8545  json.WriteNumber(m_Size);
-
8546 
-
8547  if(m_pUserData != VMA_NULL)
-
8548  {
-
8549  json.WriteString("UserData");
-
8550  if(IsUserDataString())
-
8551  {
-
8552  json.WriteString((const char*)m_pUserData);
-
8553  }
-
8554  else
-
8555  {
-
8556  json.BeginString();
-
8557  json.ContinueString_Pointer(m_pUserData);
-
8558  json.EndString();
-
8559  }
-
8560  }
-
8561 
-
8562  json.WriteString("CreationFrameIndex");
-
8563  json.WriteNumber(m_CreationFrameIndex);
-
8564 
-
8565  json.WriteString("LastUseFrameIndex");
-
8566  json.WriteNumber(GetLastUseFrameIndex());
-
8567 
-
8568  if(m_BufferImageUsage != 0)
-
8569  {
-
8570  json.WriteString("Usage");
-
8571  json.WriteNumber(m_BufferImageUsage);
-
8572  }
-
8573 }
-
8574 
-
8575 #endif
-
8576 
-
8577 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
-
8578 {
-
8579  VMA_ASSERT(IsUserDataString());
-
8580  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
-
8581  m_pUserData = VMA_NULL;
-
8582 }
-
8583 
-
8584 void VmaAllocation_T::BlockAllocMap()
-
8585 {
-
8586  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
-
8587 
-
8588  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
-
8589  {
-
8590  ++m_MapCount;
-
8591  }
-
8592  else
-
8593  {
-
8594  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
-
8595  }
-
8596 }
-
8597 
-
8598 void VmaAllocation_T::BlockAllocUnmap()
-
8599 {
-
8600  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
8486  if(m_MapCount != 0)
+
8487  {
+
8488  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
+
8489  VMA_ASSERT(pBlockData != VMA_NULL);
+
8490  return (char*)pBlockData + m_BlockAllocation.m_Offset;
+
8491  }
+
8492  else
+
8493  {
+
8494  return VMA_NULL;
+
8495  }
+
8496  break;
+
8497  case ALLOCATION_TYPE_DEDICATED:
+
8498  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
+
8499  return m_DedicatedAllocation.m_pMappedData;
+
8500  default:
+
8501  VMA_ASSERT(0);
+
8502  return VMA_NULL;
+
8503  }
+
8504 }
+
8505 
+
8506 bool VmaAllocation_T::CanBecomeLost() const
+
8507 {
+
8508  switch(m_Type)
+
8509  {
+
8510  case ALLOCATION_TYPE_BLOCK:
+
8511  return m_BlockAllocation.m_CanBecomeLost;
+
8512  case ALLOCATION_TYPE_DEDICATED:
+
8513  return false;
+
8514  default:
+
8515  VMA_ASSERT(0);
+
8516  return false;
+
8517  }
+
8518 }
+
8519 
+
8520 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
8521 {
+
8522  VMA_ASSERT(CanBecomeLost());
+
8523 
+
8524  /*
+
8525  Warning: This is a carefully designed algorithm.
+
8526  Do not modify unless you really know what you're doing :)
+
8527  */
+
8528  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
+
8529  for(;;)
+
8530  {
+
8531  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
8532  {
+
8533  VMA_ASSERT(0);
+
8534  return false;
+
8535  }
+
8536  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
+
8537  {
+
8538  return false;
+
8539  }
+
8540  else // Last use time earlier than current time.
+
8541  {
+
8542  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
+
8543  {
+
8544  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
+
8545  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
+
8546  return true;
+
8547  }
+
8548  }
+
8549  }
+
8550 }
+
8551 
+
8552 #if VMA_STATS_STRING_ENABLED
+
8553 
+
8554 // Correspond to values of enum VmaSuballocationType.
+
8555 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
+
8556  "FREE",
+
8557  "UNKNOWN",
+
8558  "BUFFER",
+
8559  "IMAGE_UNKNOWN",
+
8560  "IMAGE_LINEAR",
+
8561  "IMAGE_OPTIMAL",
+
8562 };
+
8563 
+
8564 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
+
8565 {
+
8566  json.WriteString("Type");
+
8567  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
+
8568 
+
8569  json.WriteString("Size");
+
8570  json.WriteNumber(m_Size);
+
8571 
+
8572  if(m_pUserData != VMA_NULL)
+
8573  {
+
8574  json.WriteString("UserData");
+
8575  if(IsUserDataString())
+
8576  {
+
8577  json.WriteString((const char*)m_pUserData);
+
8578  }
+
8579  else
+
8580  {
+
8581  json.BeginString();
+
8582  json.ContinueString_Pointer(m_pUserData);
+
8583  json.EndString();
+
8584  }
+
8585  }
+
8586 
+
8587  json.WriteString("CreationFrameIndex");
+
8588  json.WriteNumber(m_CreationFrameIndex);
+
8589 
+
8590  json.WriteString("LastUseFrameIndex");
+
8591  json.WriteNumber(GetLastUseFrameIndex());
+
8592 
+
8593  if(m_BufferImageUsage != 0)
+
8594  {
+
8595  json.WriteString("Usage");
+
8596  json.WriteNumber(m_BufferImageUsage);
+
8597  }
+
8598 }
+
8599 
+
8600 #endif
8601 
-
8602  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
-
8603  {
-
8604  --m_MapCount;
-
8605  }
-
8606  else
-
8607  {
-
8608  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
-
8609  }
-
8610 }
-
8611 
-
8612 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
-
8613 {
-
8614  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-
8615 
-
8616  if(m_MapCount != 0)
-
8617  {
-
8618  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
-
8619  {
-
8620  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
-
8621  *ppData = m_DedicatedAllocation.m_pMappedData;
-
8622  ++m_MapCount;
-
8623  return VK_SUCCESS;
-
8624  }
-
8625  else
-
8626  {
-
8627  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
-
8628  return VK_ERROR_MEMORY_MAP_FAILED;
-
8629  }
+
8602 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
+
8603 {
+
8604  VMA_ASSERT(IsUserDataString());
+
8605  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
+
8606  m_pUserData = VMA_NULL;
+
8607 }
+
8608 
+
8609 void VmaAllocation_T::BlockAllocMap()
+
8610 {
+
8611  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
8612 
+
8613  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
+
8614  {
+
8615  ++m_MapCount;
+
8616  }
+
8617  else
+
8618  {
+
8619  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
+
8620  }
+
8621 }
+
8622 
+
8623 void VmaAllocation_T::BlockAllocUnmap()
+
8624 {
+
8625  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
8626 
+
8627  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
+
8628  {
+
8629  --m_MapCount;
8630  }
8631  else
8632  {
-
8633  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-
8634  hAllocator->m_hDevice,
-
8635  m_DedicatedAllocation.m_hMemory,
-
8636  0, // offset
-
8637  VK_WHOLE_SIZE,
-
8638  0, // flags
-
8639  ppData);
-
8640  if(result == VK_SUCCESS)
-
8641  {
-
8642  m_DedicatedAllocation.m_pMappedData = *ppData;
-
8643  m_MapCount = 1;
-
8644  }
-
8645  return result;
-
8646  }
-
8647 }
-
8648 
-
8649 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
-
8650 {
-
8651  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-
8652 
-
8653  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
-
8654  {
-
8655  --m_MapCount;
-
8656  if(m_MapCount == 0)
-
8657  {
-
8658  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
-
8659  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
-
8660  hAllocator->m_hDevice,
-
8661  m_DedicatedAllocation.m_hMemory);
-
8662  }
-
8663  }
-
8664  else
-
8665  {
-
8666  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
-
8667  }
-
8668 }
-
8669 
-
8670 #if VMA_STATS_STRING_ENABLED
-
8671 
-
8672 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
-
8673 {
-
8674  json.BeginObject();
-
8675 
-
8676  json.WriteString("Blocks");
-
8677  json.WriteNumber(stat.blockCount);
-
8678 
-
8679  json.WriteString("Allocations");
-
8680  json.WriteNumber(stat.allocationCount);
-
8681 
-
8682  json.WriteString("UnusedRanges");
-
8683  json.WriteNumber(stat.unusedRangeCount);
-
8684 
-
8685  json.WriteString("UsedBytes");
-
8686  json.WriteNumber(stat.usedBytes);
-
8687 
-
8688  json.WriteString("UnusedBytes");
-
8689  json.WriteNumber(stat.unusedBytes);
-
8690 
-
8691  if(stat.allocationCount > 1)
-
8692  {
-
8693  json.WriteString("AllocationSize");
-
8694  json.BeginObject(true);
-
8695  json.WriteString("Min");
-
8696  json.WriteNumber(stat.allocationSizeMin);
-
8697  json.WriteString("Avg");
-
8698  json.WriteNumber(stat.allocationSizeAvg);
-
8699  json.WriteString("Max");
-
8700  json.WriteNumber(stat.allocationSizeMax);
-
8701  json.EndObject();
-
8702  }
+
8633  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
+
8634  }
+
8635 }
+
8636 
+
8637 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
+
8638 {
+
8639  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
8640 
+
8641  if(m_MapCount != 0)
+
8642  {
+
8643  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
+
8644  {
+
8645  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
+
8646  *ppData = m_DedicatedAllocation.m_pMappedData;
+
8647  ++m_MapCount;
+
8648  return VK_SUCCESS;
+
8649  }
+
8650  else
+
8651  {
+
8652  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
+
8653  return VK_ERROR_MEMORY_MAP_FAILED;
+
8654  }
+
8655  }
+
8656  else
+
8657  {
+
8658  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+
8659  hAllocator->m_hDevice,
+
8660  m_DedicatedAllocation.m_hMemory,
+
8661  0, // offset
+
8662  VK_WHOLE_SIZE,
+
8663  0, // flags
+
8664  ppData);
+
8665  if(result == VK_SUCCESS)
+
8666  {
+
8667  m_DedicatedAllocation.m_pMappedData = *ppData;
+
8668  m_MapCount = 1;
+
8669  }
+
8670  return result;
+
8671  }
+
8672 }
+
8673 
+
8674 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
+
8675 {
+
8676  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
8677 
+
8678  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
+
8679  {
+
8680  --m_MapCount;
+
8681  if(m_MapCount == 0)
+
8682  {
+
8683  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
+
8684  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
+
8685  hAllocator->m_hDevice,
+
8686  m_DedicatedAllocation.m_hMemory);
+
8687  }
+
8688  }
+
8689  else
+
8690  {
+
8691  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
+
8692  }
+
8693 }
+
8694 
+
8695 #if VMA_STATS_STRING_ENABLED
+
8696 
+
8697 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
+
8698 {
+
8699  json.BeginObject();
+
8700 
+
8701  json.WriteString("Blocks");
+
8702  json.WriteNumber(stat.blockCount);
8703 
-
8704  if(stat.unusedRangeCount > 1)
-
8705  {
-
8706  json.WriteString("UnusedRangeSize");
-
8707  json.BeginObject(true);
-
8708  json.WriteString("Min");
-
8709  json.WriteNumber(stat.unusedRangeSizeMin);
-
8710  json.WriteString("Avg");
-
8711  json.WriteNumber(stat.unusedRangeSizeAvg);
-
8712  json.WriteString("Max");
-
8713  json.WriteNumber(stat.unusedRangeSizeMax);
-
8714  json.EndObject();
-
8715  }
-
8716 
-
8717  json.EndObject();
-
8718 }
-
8719 
-
8720 #endif // #if VMA_STATS_STRING_ENABLED
-
8721 
-
8722 struct VmaSuballocationItemSizeLess
-
8723 {
-
8724  bool operator()(
-
8725  const VmaSuballocationList::iterator lhs,
-
8726  const VmaSuballocationList::iterator rhs) const
-
8727  {
-
8728  return lhs->size < rhs->size;
-
8729  }
-
8730  bool operator()(
-
8731  const VmaSuballocationList::iterator lhs,
-
8732  VkDeviceSize rhsSize) const
-
8733  {
-
8734  return lhs->size < rhsSize;
-
8735  }
-
8736 };
-
8737 
-
8738 
-
8740 // class VmaBlockMetadata
+
8704  json.WriteString("Allocations");
+
8705  json.WriteNumber(stat.allocationCount);
+
8706 
+
8707  json.WriteString("UnusedRanges");
+
8708  json.WriteNumber(stat.unusedRangeCount);
+
8709 
+
8710  json.WriteString("UsedBytes");
+
8711  json.WriteNumber(stat.usedBytes);
+
8712 
+
8713  json.WriteString("UnusedBytes");
+
8714  json.WriteNumber(stat.unusedBytes);
+
8715 
+
8716  if(stat.allocationCount > 1)
+
8717  {
+
8718  json.WriteString("AllocationSize");
+
8719  json.BeginObject(true);
+
8720  json.WriteString("Min");
+
8721  json.WriteNumber(stat.allocationSizeMin);
+
8722  json.WriteString("Avg");
+
8723  json.WriteNumber(stat.allocationSizeAvg);
+
8724  json.WriteString("Max");
+
8725  json.WriteNumber(stat.allocationSizeMax);
+
8726  json.EndObject();
+
8727  }
+
8728 
+
8729  if(stat.unusedRangeCount > 1)
+
8730  {
+
8731  json.WriteString("UnusedRangeSize");
+
8732  json.BeginObject(true);
+
8733  json.WriteString("Min");
+
8734  json.WriteNumber(stat.unusedRangeSizeMin);
+
8735  json.WriteString("Avg");
+
8736  json.WriteNumber(stat.unusedRangeSizeAvg);
+
8737  json.WriteString("Max");
+
8738  json.WriteNumber(stat.unusedRangeSizeMax);
+
8739  json.EndObject();
+
8740  }
8741 
-
8742 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
-
8743  m_Size(0),
-
8744  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
-
8745 {
-
8746 }
-
8747 
-
8748 #if VMA_STATS_STRING_ENABLED
-
8749 
-
8750 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
-
8751  VkDeviceSize unusedBytes,
-
8752  size_t allocationCount,
-
8753  size_t unusedRangeCount) const
-
8754 {
-
8755  json.BeginObject();
-
8756 
-
8757  json.WriteString("TotalBytes");
-
8758  json.WriteNumber(GetSize());
-
8759 
-
8760  json.WriteString("UnusedBytes");
-
8761  json.WriteNumber(unusedBytes);
+
8742  json.EndObject();
+
8743 }
+
8744 
+
8745 #endif // #if VMA_STATS_STRING_ENABLED
+
8746 
+
8747 struct VmaSuballocationItemSizeLess
+
8748 {
+
8749  bool operator()(
+
8750  const VmaSuballocationList::iterator lhs,
+
8751  const VmaSuballocationList::iterator rhs) const
+
8752  {
+
8753  return lhs->size < rhs->size;
+
8754  }
+
8755  bool operator()(
+
8756  const VmaSuballocationList::iterator lhs,
+
8757  VkDeviceSize rhsSize) const
+
8758  {
+
8759  return lhs->size < rhsSize;
+
8760  }
+
8761 };
8762 
-
8763  json.WriteString("Allocations");
-
8764  json.WriteNumber((uint64_t)allocationCount);
-
8765 
-
8766  json.WriteString("UnusedRanges");
-
8767  json.WriteNumber((uint64_t)unusedRangeCount);
-
8768 
-
8769  json.WriteString("Suballocations");
-
8770  json.BeginArray();
+
8763 
+
8765 // class VmaBlockMetadata
+
8766 
+
8767 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
+
8768  m_Size(0),
+
8769  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
+
8770 {
8771 }
8772 
-
8773 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-
8774  VkDeviceSize offset,
-
8775  VmaAllocation hAllocation) const
-
8776 {
-
8777  json.BeginObject(true);
-
8778 
-
8779  json.WriteString("Offset");
-
8780  json.WriteNumber(offset);
+
8773 #if VMA_STATS_STRING_ENABLED
+
8774 
+
8775 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
+
8776  VkDeviceSize unusedBytes,
+
8777  size_t allocationCount,
+
8778  size_t unusedRangeCount) const
+
8779 {
+
8780  json.BeginObject();
8781 
-
8782  hAllocation->PrintParameters(json);
-
8783 
-
8784  json.EndObject();
-
8785 }
-
8786 
-
8787 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-
8788  VkDeviceSize offset,
-
8789  VkDeviceSize size) const
-
8790 {
-
8791  json.BeginObject(true);
-
8792 
-
8793  json.WriteString("Offset");
-
8794  json.WriteNumber(offset);
-
8795 
-
8796  json.WriteString("Type");
-
8797  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
-
8798 
-
8799  json.WriteString("Size");
-
8800  json.WriteNumber(size);
-
8801 
-
8802  json.EndObject();
-
8803 }
-
8804 
-
8805 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
-
8806 {
-
8807  json.EndArray();
-
8808  json.EndObject();
-
8809 }
-
8810 
-
8811 #endif // #if VMA_STATS_STRING_ENABLED
-
8812 
-
8814 // class VmaBlockMetadata_Generic
-
8815 
-
8816 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
-
8817  VmaBlockMetadata(hAllocator),
-
8818  m_FreeCount(0),
-
8819  m_SumFreeSize(0),
-
8820  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
-
8821  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
-
8822 {
-
8823 }
-
8824 
-
8825 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
-
8826 {
-
8827 }
-
8828 
-
8829 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
-
8830 {
-
8831  VmaBlockMetadata::Init(size);
-
8832 
-
8833  m_FreeCount = 1;
-
8834  m_SumFreeSize = size;
+
8782  json.WriteString("TotalBytes");
+
8783  json.WriteNumber(GetSize());
+
8784 
+
8785  json.WriteString("UnusedBytes");
+
8786  json.WriteNumber(unusedBytes);
+
8787 
+
8788  json.WriteString("Allocations");
+
8789  json.WriteNumber((uint64_t)allocationCount);
+
8790 
+
8791  json.WriteString("UnusedRanges");
+
8792  json.WriteNumber((uint64_t)unusedRangeCount);
+
8793 
+
8794  json.WriteString("Suballocations");
+
8795  json.BeginArray();
+
8796 }
+
8797 
+
8798 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
+
8799  VkDeviceSize offset,
+
8800  VmaAllocation hAllocation) const
+
8801 {
+
8802  json.BeginObject(true);
+
8803 
+
8804  json.WriteString("Offset");
+
8805  json.WriteNumber(offset);
+
8806 
+
8807  hAllocation->PrintParameters(json);
+
8808 
+
8809  json.EndObject();
+
8810 }
+
8811 
+
8812 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
+
8813  VkDeviceSize offset,
+
8814  VkDeviceSize size) const
+
8815 {
+
8816  json.BeginObject(true);
+
8817 
+
8818  json.WriteString("Offset");
+
8819  json.WriteNumber(offset);
+
8820 
+
8821  json.WriteString("Type");
+
8822  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
+
8823 
+
8824  json.WriteString("Size");
+
8825  json.WriteNumber(size);
+
8826 
+
8827  json.EndObject();
+
8828 }
+
8829 
+
8830 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
+
8831 {
+
8832  json.EndArray();
+
8833  json.EndObject();
+
8834 }
8835 
-
8836  VmaSuballocation suballoc = {};
-
8837  suballoc.offset = 0;
-
8838  suballoc.size = size;
-
8839  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
8840  suballoc.hAllocation = VK_NULL_HANDLE;
-
8841 
-
8842  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
-
8843  m_Suballocations.push_back(suballoc);
-
8844  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
-
8845  --suballocItem;
-
8846  m_FreeSuballocationsBySize.push_back(suballocItem);
-
8847 }
-
8848 
-
8849 bool VmaBlockMetadata_Generic::Validate() const
-
8850 {
-
8851  VMA_VALIDATE(!m_Suballocations.empty());
-
8852 
-
8853  // Expected offset of new suballocation as calculated from previous ones.
-
8854  VkDeviceSize calculatedOffset = 0;
-
8855  // Expected number of free suballocations as calculated from traversing their list.
-
8856  uint32_t calculatedFreeCount = 0;
-
8857  // Expected sum size of free suballocations as calculated from traversing their list.
-
8858  VkDeviceSize calculatedSumFreeSize = 0;
-
8859  // Expected number of free suballocations that should be registered in
-
8860  // m_FreeSuballocationsBySize calculated from traversing their list.
-
8861  size_t freeSuballocationsToRegister = 0;
-
8862  // True if previous visited suballocation was free.
-
8863  bool prevFree = false;
-
8864 
-
8865  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
-
8866  suballocItem != m_Suballocations.cend();
-
8867  ++suballocItem)
-
8868  {
-
8869  const VmaSuballocation& subAlloc = *suballocItem;
-
8870 
-
8871  // Actual offset of this suballocation doesn't match expected one.
-
8872  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
+
8836 #endif // #if VMA_STATS_STRING_ENABLED
+
8837 
+
8839 // class VmaBlockMetadata_Generic
+
8840 
+
8841 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
+
8842  VmaBlockMetadata(hAllocator),
+
8843  m_FreeCount(0),
+
8844  m_SumFreeSize(0),
+
8845  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+
8846  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
+
8847 {
+
8848 }
+
8849 
+
8850 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
+
8851 {
+
8852 }
+
8853 
+
8854 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
+
8855 {
+
8856  VmaBlockMetadata::Init(size);
+
8857 
+
8858  m_FreeCount = 1;
+
8859  m_SumFreeSize = size;
+
8860 
+
8861  VmaSuballocation suballoc = {};
+
8862  suballoc.offset = 0;
+
8863  suballoc.size = size;
+
8864  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
8865  suballoc.hAllocation = VK_NULL_HANDLE;
+
8866 
+
8867  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+
8868  m_Suballocations.push_back(suballoc);
+
8869  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
+
8870  --suballocItem;
+
8871  m_FreeSuballocationsBySize.push_back(suballocItem);
+
8872 }
8873 
-
8874  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
8875  // Two adjacent free suballocations are invalid. They should be merged.
-
8876  VMA_VALIDATE(!prevFree || !currFree);
-
8877 
-
8878  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
-
8879 
-
8880  if(currFree)
-
8881  {
-
8882  calculatedSumFreeSize += subAlloc.size;
-
8883  ++calculatedFreeCount;
-
8884  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
8885  {
-
8886  ++freeSuballocationsToRegister;
-
8887  }
-
8888 
-
8889  // Margin required between allocations - every free space must be at least that large.
-
8890  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
-
8891  }
-
8892  else
-
8893  {
-
8894  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
-
8895  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
-
8896 
-
8897  // Margin required between allocations - previous allocation must be free.
-
8898  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
-
8899  }
-
8900 
-
8901  calculatedOffset += subAlloc.size;
-
8902  prevFree = currFree;
-
8903  }
+
8874 bool VmaBlockMetadata_Generic::Validate() const
+
8875 {
+
8876  VMA_VALIDATE(!m_Suballocations.empty());
+
8877 
+
8878  // Expected offset of new suballocation as calculated from previous ones.
+
8879  VkDeviceSize calculatedOffset = 0;
+
8880  // Expected number of free suballocations as calculated from traversing their list.
+
8881  uint32_t calculatedFreeCount = 0;
+
8882  // Expected sum size of free suballocations as calculated from traversing their list.
+
8883  VkDeviceSize calculatedSumFreeSize = 0;
+
8884  // Expected number of free suballocations that should be registered in
+
8885  // m_FreeSuballocationsBySize calculated from traversing their list.
+
8886  size_t freeSuballocationsToRegister = 0;
+
8887  // True if previous visited suballocation was free.
+
8888  bool prevFree = false;
+
8889 
+
8890  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+
8891  suballocItem != m_Suballocations.cend();
+
8892  ++suballocItem)
+
8893  {
+
8894  const VmaSuballocation& subAlloc = *suballocItem;
+
8895 
+
8896  // Actual offset of this suballocation doesn't match expected one.
+
8897  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
+
8898 
+
8899  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
8900  // Two adjacent free suballocations are invalid. They should be merged.
+
8901  VMA_VALIDATE(!prevFree || !currFree);
+
8902 
+
8903  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8904 
-
8905  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
-
8906  // match expected one.
-
8907  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
-
8908 
-
8909  VkDeviceSize lastSize = 0;
-
8910  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
-
8911  {
-
8912  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
-
8913 
-
8914  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
-
8915  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-
8916  // They must be sorted by size ascending.
-
8917  VMA_VALIDATE(suballocItem->size >= lastSize);
-
8918 
-
8919  lastSize = suballocItem->size;
-
8920  }
+
8905  if(currFree)
+
8906  {
+
8907  calculatedSumFreeSize += subAlloc.size;
+
8908  ++calculatedFreeCount;
+
8909  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
8910  {
+
8911  ++freeSuballocationsToRegister;
+
8912  }
+
8913 
+
8914  // Margin required between allocations - every free space must be at least that large.
+
8915  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
+
8916  }
+
8917  else
+
8918  {
+
8919  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
+
8920  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8921 
-
8922  // Check if totals match calculacted values.
-
8923  VMA_VALIDATE(ValidateFreeSuballocationList());
-
8924  VMA_VALIDATE(calculatedOffset == GetSize());
-
8925  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
-
8926  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
-
8927 
-
8928  return true;
-
8929 }
-
8930 
-
8931 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
-
8932 {
-
8933  if(!m_FreeSuballocationsBySize.empty())
-
8934  {
-
8935  return m_FreeSuballocationsBySize.back()->size;
-
8936  }
-
8937  else
-
8938  {
-
8939  return 0;
-
8940  }
-
8941 }
-
8942 
-
8943 bool VmaBlockMetadata_Generic::IsEmpty() const
-
8944 {
-
8945  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
-
8946 }
-
8947 
-
8948 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
-
8949 {
-
8950  outInfo.blockCount = 1;
-
8951 
-
8952  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-
8953  outInfo.allocationCount = rangeCount - m_FreeCount;
-
8954  outInfo.unusedRangeCount = m_FreeCount;
-
8955 
-
8956  outInfo.unusedBytes = m_SumFreeSize;
-
8957  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
-
8958 
-
8959  outInfo.allocationSizeMin = UINT64_MAX;
-
8960  outInfo.allocationSizeMax = 0;
-
8961  outInfo.unusedRangeSizeMin = UINT64_MAX;
-
8962  outInfo.unusedRangeSizeMax = 0;
-
8963 
-
8964  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
-
8965  suballocItem != m_Suballocations.cend();
-
8966  ++suballocItem)
-
8967  {
-
8968  const VmaSuballocation& suballoc = *suballocItem;
-
8969  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
8970  {
-
8971  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
-
8972  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
-
8973  }
-
8974  else
-
8975  {
-
8976  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
-
8977  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
-
8978  }
-
8979  }
-
8980 }
-
8981 
-
8982 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
-
8983 {
-
8984  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-
8985 
-
8986  inoutStats.size += GetSize();
-
8987  inoutStats.unusedSize += m_SumFreeSize;
-
8988  inoutStats.allocationCount += rangeCount - m_FreeCount;
-
8989  inoutStats.unusedRangeCount += m_FreeCount;
-
8990  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
-
8991 }
-
8992 
-
8993 #if VMA_STATS_STRING_ENABLED
-
8994 
-
8995 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
-
8996 {
-
8997  PrintDetailedMap_Begin(json,
-
8998  m_SumFreeSize, // unusedBytes
-
8999  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
-
9000  m_FreeCount); // unusedRangeCount
-
9001 
-
9002  size_t i = 0;
-
9003  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
-
9004  suballocItem != m_Suballocations.cend();
-
9005  ++suballocItem, ++i)
-
9006  {
-
9007  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
9008  {
-
9009  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
-
9010  }
-
9011  else
-
9012  {
-
9013  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
-
9014  }
-
9015  }
-
9016 
-
9017  PrintDetailedMap_End(json);
-
9018 }
+
8922  // Margin required between allocations - previous allocation must be free.
+
8923  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
+
8924  }
+
8925 
+
8926  calculatedOffset += subAlloc.size;
+
8927  prevFree = currFree;
+
8928  }
+
8929 
+
8930  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
+
8931  // match expected one.
+
8932  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
+
8933 
+
8934  VkDeviceSize lastSize = 0;
+
8935  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
+
8936  {
+
8937  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
+
8938 
+
8939  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
+
8940  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+
8941  // They must be sorted by size ascending.
+
8942  VMA_VALIDATE(suballocItem->size >= lastSize);
+
8943 
+
8944  lastSize = suballocItem->size;
+
8945  }
+
8946 
+
8947  // Check if totals match calculacted values.
+
8948  VMA_VALIDATE(ValidateFreeSuballocationList());
+
8949  VMA_VALIDATE(calculatedOffset == GetSize());
+
8950  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
+
8951  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
+
8952 
+
8953  return true;
+
8954 }
+
8955 
+
8956 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
+
8957 {
+
8958  if(!m_FreeSuballocationsBySize.empty())
+
8959  {
+
8960  return m_FreeSuballocationsBySize.back()->size;
+
8961  }
+
8962  else
+
8963  {
+
8964  return 0;
+
8965  }
+
8966 }
+
8967 
+
8968 bool VmaBlockMetadata_Generic::IsEmpty() const
+
8969 {
+
8970  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
+
8971 }
+
8972 
+
8973 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
+
8974 {
+
8975  outInfo.blockCount = 1;
+
8976 
+
8977  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+
8978  outInfo.allocationCount = rangeCount - m_FreeCount;
+
8979  outInfo.unusedRangeCount = m_FreeCount;
+
8980 
+
8981  outInfo.unusedBytes = m_SumFreeSize;
+
8982  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
+
8983 
+
8984  outInfo.allocationSizeMin = UINT64_MAX;
+
8985  outInfo.allocationSizeMax = 0;
+
8986  outInfo.unusedRangeSizeMin = UINT64_MAX;
+
8987  outInfo.unusedRangeSizeMax = 0;
+
8988 
+
8989  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+
8990  suballocItem != m_Suballocations.cend();
+
8991  ++suballocItem)
+
8992  {
+
8993  const VmaSuballocation& suballoc = *suballocItem;
+
8994  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
8995  {
+
8996  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+
8997  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
+
8998  }
+
8999  else
+
9000  {
+
9001  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
+
9002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
+
9003  }
+
9004  }
+
9005 }
+
9006 
+
9007 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
+
9008 {
+
9009  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+
9010 
+
9011  inoutStats.size += GetSize();
+
9012  inoutStats.unusedSize += m_SumFreeSize;
+
9013  inoutStats.allocationCount += rangeCount - m_FreeCount;
+
9014  inoutStats.unusedRangeCount += m_FreeCount;
+
9015  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
+
9016 }
+
9017 
+
9018 #if VMA_STATS_STRING_ENABLED
9019 
-
9020 #endif // #if VMA_STATS_STRING_ENABLED
-
9021 
-
9022 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
-
9023  uint32_t currentFrameIndex,
-
9024  uint32_t frameInUseCount,
-
9025  VkDeviceSize bufferImageGranularity,
-
9026  VkDeviceSize allocSize,
-
9027  VkDeviceSize allocAlignment,
-
9028  bool upperAddress,
-
9029  VmaSuballocationType allocType,
-
9030  bool canMakeOtherLost,
-
9031  uint32_t strategy,
-
9032  VmaAllocationRequest* pAllocationRequest)
-
9033 {
-
9034  VMA_ASSERT(allocSize > 0);
-
9035  VMA_ASSERT(!upperAddress);
-
9036  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-
9037  VMA_ASSERT(pAllocationRequest != VMA_NULL);
-
9038  VMA_HEAVY_ASSERT(Validate());
-
9039 
-
9040  pAllocationRequest->type = VmaAllocationRequestType::Normal;
+
9020 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
+
9021 {
+
9022  PrintDetailedMap_Begin(json,
+
9023  m_SumFreeSize, // unusedBytes
+
9024  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
+
9025  m_FreeCount); // unusedRangeCount
+
9026 
+
9027  size_t i = 0;
+
9028  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+
9029  suballocItem != m_Suballocations.cend();
+
9030  ++suballocItem, ++i)
+
9031  {
+
9032  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
9033  {
+
9034  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
+
9035  }
+
9036  else
+
9037  {
+
9038  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
+
9039  }
+
9040  }
9041 
-
9042  // There is not enough total free space in this block to fullfill the request: Early return.
-
9043  if(canMakeOtherLost == false &&
-
9044  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
-
9045  {
-
9046  return false;
-
9047  }
-
9048 
-
9049  // New algorithm, efficiently searching freeSuballocationsBySize.
-
9050  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
-
9051  if(freeSuballocCount > 0)
-
9052  {
- -
9054  {
-
9055  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
-
9056  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-
9057  m_FreeSuballocationsBySize.data(),
-
9058  m_FreeSuballocationsBySize.data() + freeSuballocCount,
-
9059  allocSize + 2 * VMA_DEBUG_MARGIN,
-
9060  VmaSuballocationItemSizeLess());
-
9061  size_t index = it - m_FreeSuballocationsBySize.data();
-
9062  for(; index < freeSuballocCount; ++index)
-
9063  {
-
9064  if(CheckAllocation(
-
9065  currentFrameIndex,
-
9066  frameInUseCount,
-
9067  bufferImageGranularity,
-
9068  allocSize,
-
9069  allocAlignment,
-
9070  allocType,
-
9071  m_FreeSuballocationsBySize[index],
-
9072  false, // canMakeOtherLost
-
9073  &pAllocationRequest->offset,
-
9074  &pAllocationRequest->itemsToMakeLostCount,
-
9075  &pAllocationRequest->sumFreeSize,
-
9076  &pAllocationRequest->sumItemSize))
-
9077  {
-
9078  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-
9079  return true;
-
9080  }
-
9081  }
-
9082  }
-
9083  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
-
9084  {
-
9085  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
-
9086  it != m_Suballocations.end();
-
9087  ++it)
+
9042  PrintDetailedMap_End(json);
+
9043 }
+
9044 
+
9045 #endif // #if VMA_STATS_STRING_ENABLED
+
9046 
+
9047 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
+
9048  uint32_t currentFrameIndex,
+
9049  uint32_t frameInUseCount,
+
9050  VkDeviceSize bufferImageGranularity,
+
9051  VkDeviceSize allocSize,
+
9052  VkDeviceSize allocAlignment,
+
9053  bool upperAddress,
+
9054  VmaSuballocationType allocType,
+
9055  bool canMakeOtherLost,
+
9056  uint32_t strategy,
+
9057  VmaAllocationRequest* pAllocationRequest)
+
9058 {
+
9059  VMA_ASSERT(allocSize > 0);
+
9060  VMA_ASSERT(!upperAddress);
+
9061  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+
9062  VMA_ASSERT(pAllocationRequest != VMA_NULL);
+
9063  VMA_HEAVY_ASSERT(Validate());
+
9064 
+
9065  pAllocationRequest->type = VmaAllocationRequestType::Normal;
+
9066 
+
9067  // There is not enough total free space in this block to fullfill the request: Early return.
+
9068  if(canMakeOtherLost == false &&
+
9069  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
+
9070  {
+
9071  return false;
+
9072  }
+
9073 
+
9074  // New algorithm, efficiently searching freeSuballocationsBySize.
+
9075  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
+
9076  if(freeSuballocCount > 0)
+
9077  {
+ +
9079  {
+
9080  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
+
9081  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
+
9082  m_FreeSuballocationsBySize.data(),
+
9083  m_FreeSuballocationsBySize.data() + freeSuballocCount,
+
9084  allocSize + 2 * VMA_DEBUG_MARGIN,
+
9085  VmaSuballocationItemSizeLess());
+
9086  size_t index = it - m_FreeSuballocationsBySize.data();
+
9087  for(; index < freeSuballocCount; ++index)
9088  {
-
9089  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
+
9089  if(CheckAllocation(
9090  currentFrameIndex,
9091  frameInUseCount,
9092  bufferImageGranularity,
9093  allocSize,
9094  allocAlignment,
9095  allocType,
-
9096  it,
+
9096  m_FreeSuballocationsBySize[index],
9097  false, // canMakeOtherLost
9098  &pAllocationRequest->offset,
9099  &pAllocationRequest->itemsToMakeLostCount,
9100  &pAllocationRequest->sumFreeSize,
9101  &pAllocationRequest->sumItemSize))
9102  {
-
9103  pAllocationRequest->item = it;
+
9103  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9104  return true;
9105  }
9106  }
9107  }
-
9108  else // WORST_FIT, FIRST_FIT
+
9108  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9109  {
-
9110  // Search staring from biggest suballocations.
-
9111  for(size_t index = freeSuballocCount; index--; )
-
9112  {
-
9113  if(CheckAllocation(
-
9114  currentFrameIndex,
-
9115  frameInUseCount,
-
9116  bufferImageGranularity,
-
9117  allocSize,
-
9118  allocAlignment,
-
9119  allocType,
-
9120  m_FreeSuballocationsBySize[index],
-
9121  false, // canMakeOtherLost
-
9122  &pAllocationRequest->offset,
-
9123  &pAllocationRequest->itemsToMakeLostCount,
-
9124  &pAllocationRequest->sumFreeSize,
-
9125  &pAllocationRequest->sumItemSize))
-
9126  {
-
9127  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-
9128  return true;
-
9129  }
-
9130  }
-
9131  }
-
9132  }
-
9133 
-
9134  if(canMakeOtherLost)
-
9135  {
-
9136  // Brute-force algorithm. TODO: Come up with something better.
-
9137 
-
9138  bool found = false;
-
9139  VmaAllocationRequest tmpAllocRequest = {};
-
9140  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
-
9141  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
-
9142  suballocIt != m_Suballocations.end();
-
9143  ++suballocIt)
-
9144  {
-
9145  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
-
9146  suballocIt->hAllocation->CanBecomeLost())
-
9147  {
-
9148  if(CheckAllocation(
-
9149  currentFrameIndex,
-
9150  frameInUseCount,
-
9151  bufferImageGranularity,
-
9152  allocSize,
-
9153  allocAlignment,
-
9154  allocType,
-
9155  suballocIt,
-
9156  canMakeOtherLost,
-
9157  &tmpAllocRequest.offset,
-
9158  &tmpAllocRequest.itemsToMakeLostCount,
-
9159  &tmpAllocRequest.sumFreeSize,
-
9160  &tmpAllocRequest.sumItemSize))
-
9161  {
- -
9163  {
-
9164  *pAllocationRequest = tmpAllocRequest;
-
9165  pAllocationRequest->item = suballocIt;
-
9166  break;
-
9167  }
-
9168  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
-
9169  {
-
9170  *pAllocationRequest = tmpAllocRequest;
-
9171  pAllocationRequest->item = suballocIt;
-
9172  found = true;
-
9173  }
-
9174  }
-
9175  }
-
9176  }
-
9177 
-
9178  return found;
-
9179  }
-
9180 
-
9181  return false;
-
9182 }
-
9183 
-
9184 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
-
9185  uint32_t currentFrameIndex,
-
9186  uint32_t frameInUseCount,
-
9187  VmaAllocationRequest* pAllocationRequest)
-
9188 {
-
9189  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
-
9190 
-
9191  while(pAllocationRequest->itemsToMakeLostCount > 0)
-
9192  {
-
9193  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
-
9194  {
-
9195  ++pAllocationRequest->item;
-
9196  }
-
9197  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
-
9198  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
-
9199  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
-
9200  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
9201  {
-
9202  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
-
9203  --pAllocationRequest->itemsToMakeLostCount;
-
9204  }
-
9205  else
-
9206  {
-
9207  return false;
-
9208  }
-
9209  }
-
9210 
-
9211  VMA_HEAVY_ASSERT(Validate());
-
9212  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
-
9213  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
9214 
-
9215  return true;
-
9216 }
-
9217 
-
9218 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
9219 {
-
9220  uint32_t lostAllocationCount = 0;
-
9221  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
-
9222  it != m_Suballocations.end();
-
9223  ++it)
-
9224  {
-
9225  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
-
9226  it->hAllocation->CanBecomeLost() &&
-
9227  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
9228  {
-
9229  it = FreeSuballocation(it);
-
9230  ++lostAllocationCount;
-
9231  }
-
9232  }
-
9233  return lostAllocationCount;
-
9234 }
+
9110  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+
9111  it != m_Suballocations.end();
+
9112  ++it)
+
9113  {
+
9114  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
+
9115  currentFrameIndex,
+
9116  frameInUseCount,
+
9117  bufferImageGranularity,
+
9118  allocSize,
+
9119  allocAlignment,
+
9120  allocType,
+
9121  it,
+
9122  false, // canMakeOtherLost
+
9123  &pAllocationRequest->offset,
+
9124  &pAllocationRequest->itemsToMakeLostCount,
+
9125  &pAllocationRequest->sumFreeSize,
+
9126  &pAllocationRequest->sumItemSize))
+
9127  {
+
9128  pAllocationRequest->item = it;
+
9129  return true;
+
9130  }
+
9131  }
+
9132  }
+
9133  else // WORST_FIT, FIRST_FIT
+
9134  {
+
9135  // Search staring from biggest suballocations.
+
9136  for(size_t index = freeSuballocCount; index--; )
+
9137  {
+
9138  if(CheckAllocation(
+
9139  currentFrameIndex,
+
9140  frameInUseCount,
+
9141  bufferImageGranularity,
+
9142  allocSize,
+
9143  allocAlignment,
+
9144  allocType,
+
9145  m_FreeSuballocationsBySize[index],
+
9146  false, // canMakeOtherLost
+
9147  &pAllocationRequest->offset,
+
9148  &pAllocationRequest->itemsToMakeLostCount,
+
9149  &pAllocationRequest->sumFreeSize,
+
9150  &pAllocationRequest->sumItemSize))
+
9151  {
+
9152  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+
9153  return true;
+
9154  }
+
9155  }
+
9156  }
+
9157  }
+
9158 
+
9159  if(canMakeOtherLost)
+
9160  {
+
9161  // Brute-force algorithm. TODO: Come up with something better.
+
9162 
+
9163  bool found = false;
+
9164  VmaAllocationRequest tmpAllocRequest = {};
+
9165  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
+
9166  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
+
9167  suballocIt != m_Suballocations.end();
+
9168  ++suballocIt)
+
9169  {
+
9170  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
+
9171  suballocIt->hAllocation->CanBecomeLost())
+
9172  {
+
9173  if(CheckAllocation(
+
9174  currentFrameIndex,
+
9175  frameInUseCount,
+
9176  bufferImageGranularity,
+
9177  allocSize,
+
9178  allocAlignment,
+
9179  allocType,
+
9180  suballocIt,
+
9181  canMakeOtherLost,
+
9182  &tmpAllocRequest.offset,
+
9183  &tmpAllocRequest.itemsToMakeLostCount,
+
9184  &tmpAllocRequest.sumFreeSize,
+
9185  &tmpAllocRequest.sumItemSize))
+
9186  {
+ +
9188  {
+
9189  *pAllocationRequest = tmpAllocRequest;
+
9190  pAllocationRequest->item = suballocIt;
+
9191  break;
+
9192  }
+
9193  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
+
9194  {
+
9195  *pAllocationRequest = tmpAllocRequest;
+
9196  pAllocationRequest->item = suballocIt;
+
9197  found = true;
+
9198  }
+
9199  }
+
9200  }
+
9201  }
+
9202 
+
9203  return found;
+
9204  }
+
9205 
+
9206  return false;
+
9207 }
+
9208 
+
9209 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
+
9210  uint32_t currentFrameIndex,
+
9211  uint32_t frameInUseCount,
+
9212  VmaAllocationRequest* pAllocationRequest)
+
9213 {
+
9214  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
+
9215 
+
9216  while(pAllocationRequest->itemsToMakeLostCount > 0)
+
9217  {
+
9218  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
+
9219  {
+
9220  ++pAllocationRequest->item;
+
9221  }
+
9222  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+
9223  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
+
9224  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
+
9225  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
9226  {
+
9227  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
+
9228  --pAllocationRequest->itemsToMakeLostCount;
+
9229  }
+
9230  else
+
9231  {
+
9232  return false;
+
9233  }
+
9234  }
9235 
-
9236 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
-
9237 {
-
9238  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
-
9239  it != m_Suballocations.end();
-
9240  ++it)
-
9241  {
-
9242  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
-
9243  {
-
9244  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
-
9245  {
-
9246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
-
9247  return VK_ERROR_VALIDATION_FAILED_EXT;
-
9248  }
-
9249  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
-
9250  {
-
9251  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-
9252  return VK_ERROR_VALIDATION_FAILED_EXT;
-
9253  }
-
9254  }
-
9255  }
-
9256 
-
9257  return VK_SUCCESS;
-
9258 }
-
9259 
-
9260 void VmaBlockMetadata_Generic::Alloc(
-
9261  const VmaAllocationRequest& request,
-
9262  VmaSuballocationType type,
-
9263  VkDeviceSize allocSize,
-
9264  VmaAllocation hAllocation)
-
9265 {
-
9266  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-
9267  VMA_ASSERT(request.item != m_Suballocations.end());
-
9268  VmaSuballocation& suballoc = *request.item;
-
9269  // Given suballocation is a free block.
-
9270  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
9271  // Given offset is inside this suballocation.
-
9272  VMA_ASSERT(request.offset >= suballoc.offset);
-
9273  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
-
9274  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
-
9275  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
-
9276 
-
9277  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
-
9278  // it to become used.
-
9279  UnregisterFreeSuballocation(request.item);
-
9280 
-
9281  suballoc.offset = request.offset;
-
9282  suballoc.size = allocSize;
-
9283  suballoc.type = type;
-
9284  suballoc.hAllocation = hAllocation;
-
9285 
-
9286  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
-
9287  if(paddingEnd)
-
9288  {
-
9289  VmaSuballocation paddingSuballoc = {};
-
9290  paddingSuballoc.offset = request.offset + allocSize;
-
9291  paddingSuballoc.size = paddingEnd;
-
9292  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
9293  VmaSuballocationList::iterator next = request.item;
-
9294  ++next;
-
9295  const VmaSuballocationList::iterator paddingEndItem =
-
9296  m_Suballocations.insert(next, paddingSuballoc);
-
9297  RegisterFreeSuballocation(paddingEndItem);
-
9298  }
-
9299 
-
9300  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
-
9301  if(paddingBegin)
-
9302  {
-
9303  VmaSuballocation paddingSuballoc = {};
-
9304  paddingSuballoc.offset = request.offset - paddingBegin;
-
9305  paddingSuballoc.size = paddingBegin;
-
9306  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
9307  const VmaSuballocationList::iterator paddingBeginItem =
-
9308  m_Suballocations.insert(request.item, paddingSuballoc);
-
9309  RegisterFreeSuballocation(paddingBeginItem);
-
9310  }
-
9311 
-
9312  // Update totals.
-
9313  m_FreeCount = m_FreeCount - 1;
-
9314  if(paddingBegin > 0)
-
9315  {
-
9316  ++m_FreeCount;
-
9317  }
-
9318  if(paddingEnd > 0)
-
9319  {
-
9320  ++m_FreeCount;
-
9321  }
-
9322  m_SumFreeSize -= allocSize;
-
9323 }
+
9236  VMA_HEAVY_ASSERT(Validate());
+
9237  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+
9238  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
9239 
+
9240  return true;
+
9241 }
+
9242 
+
9243 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
9244 {
+
9245  uint32_t lostAllocationCount = 0;
+
9246  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+
9247  it != m_Suballocations.end();
+
9248  ++it)
+
9249  {
+
9250  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
+
9251  it->hAllocation->CanBecomeLost() &&
+
9252  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
9253  {
+
9254  it = FreeSuballocation(it);
+
9255  ++lostAllocationCount;
+
9256  }
+
9257  }
+
9258  return lostAllocationCount;
+
9259 }
+
9260 
+
9261 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
+
9262 {
+
9263  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+
9264  it != m_Suballocations.end();
+
9265  ++it)
+
9266  {
+
9267  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
+
9268  {
+
9269  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
+
9270  {
+
9271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+
9272  return VK_ERROR_VALIDATION_FAILED_EXT;
+
9273  }
+
9274  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
+
9275  {
+
9276  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+
9277  return VK_ERROR_VALIDATION_FAILED_EXT;
+
9278  }
+
9279  }
+
9280  }
+
9281 
+
9282  return VK_SUCCESS;
+
9283 }
+
9284 
+
9285 void VmaBlockMetadata_Generic::Alloc(
+
9286  const VmaAllocationRequest& request,
+
9287  VmaSuballocationType type,
+
9288  VkDeviceSize allocSize,
+
9289  VmaAllocation hAllocation)
+
9290 {
+
9291  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+
9292  VMA_ASSERT(request.item != m_Suballocations.end());
+
9293  VmaSuballocation& suballoc = *request.item;
+
9294  // Given suballocation is a free block.
+
9295  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
9296  // Given offset is inside this suballocation.
+
9297  VMA_ASSERT(request.offset >= suballoc.offset);
+
9298  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
+
9299  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
+
9300  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
+
9301 
+
9302  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
+
9303  // it to become used.
+
9304  UnregisterFreeSuballocation(request.item);
+
9305 
+
9306  suballoc.offset = request.offset;
+
9307  suballoc.size = allocSize;
+
9308  suballoc.type = type;
+
9309  suballoc.hAllocation = hAllocation;
+
9310 
+
9311  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
+
9312  if(paddingEnd)
+
9313  {
+
9314  VmaSuballocation paddingSuballoc = {};
+
9315  paddingSuballoc.offset = request.offset + allocSize;
+
9316  paddingSuballoc.size = paddingEnd;
+
9317  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
9318  VmaSuballocationList::iterator next = request.item;
+
9319  ++next;
+
9320  const VmaSuballocationList::iterator paddingEndItem =
+
9321  m_Suballocations.insert(next, paddingSuballoc);
+
9322  RegisterFreeSuballocation(paddingEndItem);
+
9323  }
9324 
-
9325 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
-
9326 {
-
9327  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
-
9328  suballocItem != m_Suballocations.end();
-
9329  ++suballocItem)
-
9330  {
-
9331  VmaSuballocation& suballoc = *suballocItem;
-
9332  if(suballoc.hAllocation == allocation)
-
9333  {
-
9334  FreeSuballocation(suballocItem);
-
9335  VMA_HEAVY_ASSERT(Validate());
-
9336  return;
-
9337  }
-
9338  }
-
9339  VMA_ASSERT(0 && "Not found!");
-
9340 }
-
9341 
-
9342 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
-
9343 {
-
9344  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
-
9345  suballocItem != m_Suballocations.end();
-
9346  ++suballocItem)
-
9347  {
-
9348  VmaSuballocation& suballoc = *suballocItem;
-
9349  if(suballoc.offset == offset)
-
9350  {
-
9351  FreeSuballocation(suballocItem);
-
9352  return;
-
9353  }
-
9354  }
-
9355  VMA_ASSERT(0 && "Not found!");
-
9356 }
-
9357 
-
9358 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
-
9359 {
-
9360  VkDeviceSize lastSize = 0;
-
9361  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
-
9362  {
-
9363  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
-
9364 
-
9365  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
-
9366  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
-
9367  VMA_VALIDATE(it->size >= lastSize);
-
9368  lastSize = it->size;
-
9369  }
-
9370  return true;
-
9371 }
-
9372 
-
9373 bool VmaBlockMetadata_Generic::CheckAllocation(
-
9374  uint32_t currentFrameIndex,
-
9375  uint32_t frameInUseCount,
-
9376  VkDeviceSize bufferImageGranularity,
-
9377  VkDeviceSize allocSize,
-
9378  VkDeviceSize allocAlignment,
-
9379  VmaSuballocationType allocType,
-
9380  VmaSuballocationList::const_iterator suballocItem,
-
9381  bool canMakeOtherLost,
-
9382  VkDeviceSize* pOffset,
-
9383  size_t* itemsToMakeLostCount,
-
9384  VkDeviceSize* pSumFreeSize,
-
9385  VkDeviceSize* pSumItemSize) const
-
9386 {
-
9387  VMA_ASSERT(allocSize > 0);
-
9388  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-
9389  VMA_ASSERT(suballocItem != m_Suballocations.cend());
-
9390  VMA_ASSERT(pOffset != VMA_NULL);
-
9391 
-
9392  *itemsToMakeLostCount = 0;
-
9393  *pSumFreeSize = 0;
-
9394  *pSumItemSize = 0;
-
9395 
-
9396  if(canMakeOtherLost)
-
9397  {
-
9398  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
9399  {
-
9400  *pSumFreeSize = suballocItem->size;
-
9401  }
-
9402  else
-
9403  {
-
9404  if(suballocItem->hAllocation->CanBecomeLost() &&
-
9405  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
9406  {
-
9407  ++*itemsToMakeLostCount;
-
9408  *pSumItemSize = suballocItem->size;
-
9409  }
-
9410  else
-
9411  {
-
9412  return false;
-
9413  }
-
9414  }
-
9415 
-
9416  // Remaining size is too small for this request: Early return.
-
9417  if(GetSize() - suballocItem->offset < allocSize)
-
9418  {
-
9419  return false;
-
9420  }
-
9421 
-
9422  // Start from offset equal to beginning of this suballocation.
-
9423  *pOffset = suballocItem->offset;
-
9424 
-
9425  // Apply VMA_DEBUG_MARGIN at the beginning.
-
9426  if(VMA_DEBUG_MARGIN > 0)
-
9427  {
-
9428  *pOffset += VMA_DEBUG_MARGIN;
-
9429  }
-
9430 
-
9431  // Apply alignment.
-
9432  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
-
9433 
-
9434  // Check previous suballocations for BufferImageGranularity conflicts.
-
9435  // Make bigger alignment if necessary.
-
9436  if(bufferImageGranularity > 1)
-
9437  {
-
9438  bool bufferImageGranularityConflict = false;
-
9439  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
-
9440  while(prevSuballocItem != m_Suballocations.cbegin())
-
9441  {
-
9442  --prevSuballocItem;
-
9443  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
-
9444  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
-
9445  {
-
9446  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
9447  {
-
9448  bufferImageGranularityConflict = true;
-
9449  break;
-
9450  }
-
9451  }
-
9452  else
-
9453  // Already on previous page.
-
9454  break;
-
9455  }
-
9456  if(bufferImageGranularityConflict)
-
9457  {
-
9458  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
-
9459  }
-
9460  }
-
9461 
-
9462  // Now that we have final *pOffset, check if we are past suballocItem.
-
9463  // If yes, return false - this function should be called for another suballocItem as starting point.
-
9464  if(*pOffset >= suballocItem->offset + suballocItem->size)
-
9465  {
-
9466  return false;
-
9467  }
-
9468 
-
9469  // Calculate padding at the beginning based on current offset.
-
9470  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
-
9471 
-
9472  // Calculate required margin at the end.
-
9473  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
-
9474 
-
9475  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
-
9476  // Another early return check.
-
9477  if(suballocItem->offset + totalSize > GetSize())
-
9478  {
-
9479  return false;
-
9480  }
-
9481 
-
9482  // Advance lastSuballocItem until desired size is reached.
-
9483  // Update itemsToMakeLostCount.
-
9484  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
-
9485  if(totalSize > suballocItem->size)
-
9486  {
-
9487  VkDeviceSize remainingSize = totalSize - suballocItem->size;
-
9488  while(remainingSize > 0)
-
9489  {
-
9490  ++lastSuballocItem;
-
9491  if(lastSuballocItem == m_Suballocations.cend())
-
9492  {
-
9493  return false;
-
9494  }
-
9495  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
9496  {
-
9497  *pSumFreeSize += lastSuballocItem->size;
-
9498  }
-
9499  else
-
9500  {
-
9501  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
-
9502  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
-
9503  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
9504  {
-
9505  ++*itemsToMakeLostCount;
-
9506  *pSumItemSize += lastSuballocItem->size;
-
9507  }
-
9508  else
-
9509  {
-
9510  return false;
-
9511  }
-
9512  }
-
9513  remainingSize = (lastSuballocItem->size < remainingSize) ?
-
9514  remainingSize - lastSuballocItem->size : 0;
-
9515  }
-
9516  }
-
9517 
-
9518  // Check next suballocations for BufferImageGranularity conflicts.
-
9519  // If conflict exists, we must mark more allocations lost or fail.
-
9520  if(bufferImageGranularity > 1)
-
9521  {
-
9522  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
-
9523  ++nextSuballocItem;
-
9524  while(nextSuballocItem != m_Suballocations.cend())
-
9525  {
-
9526  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
-
9527  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
9528  {
-
9529  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
9530  {
-
9531  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
-
9532  if(nextSuballoc.hAllocation->CanBecomeLost() &&
-
9533  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
9534  {
-
9535  ++*itemsToMakeLostCount;
-
9536  }
-
9537  else
-
9538  {
-
9539  return false;
-
9540  }
-
9541  }
-
9542  }
-
9543  else
-
9544  {
-
9545  // Already on next page.
-
9546  break;
-
9547  }
-
9548  ++nextSuballocItem;
-
9549  }
-
9550  }
-
9551  }
-
9552  else
-
9553  {
-
9554  const VmaSuballocation& suballoc = *suballocItem;
-
9555  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
9556 
-
9557  *pSumFreeSize = suballoc.size;
-
9558 
-
9559  // Size of this suballocation is too small for this request: Early return.
-
9560  if(suballoc.size < allocSize)
-
9561  {
-
9562  return false;
-
9563  }
-
9564 
-
9565  // Start from offset equal to beginning of this suballocation.
-
9566  *pOffset = suballoc.offset;
-
9567 
-
9568  // Apply VMA_DEBUG_MARGIN at the beginning.
-
9569  if(VMA_DEBUG_MARGIN > 0)
-
9570  {
-
9571  *pOffset += VMA_DEBUG_MARGIN;
-
9572  }
-
9573 
-
9574  // Apply alignment.
-
9575  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
-
9576 
-
9577  // Check previous suballocations for BufferImageGranularity conflicts.
-
9578  // Make bigger alignment if necessary.
-
9579  if(bufferImageGranularity > 1)
-
9580  {
-
9581  bool bufferImageGranularityConflict = false;
-
9582  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
-
9583  while(prevSuballocItem != m_Suballocations.cbegin())
-
9584  {
-
9585  --prevSuballocItem;
-
9586  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
-
9587  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
-
9588  {
-
9589  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
9590  {
-
9591  bufferImageGranularityConflict = true;
-
9592  break;
-
9593  }
-
9594  }
-
9595  else
-
9596  // Already on previous page.
-
9597  break;
-
9598  }
-
9599  if(bufferImageGranularityConflict)
-
9600  {
-
9601  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
-
9602  }
-
9603  }
-
9604 
-
9605  // Calculate padding at the beginning based on current offset.
-
9606  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
-
9607 
-
9608  // Calculate required margin at the end.
-
9609  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
-
9610 
-
9611  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
-
9612  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
-
9613  {
-
9614  return false;
-
9615  }
-
9616 
-
9617  // Check next suballocations for BufferImageGranularity conflicts.
-
9618  // If conflict exists, allocation cannot be made here.
-
9619  if(bufferImageGranularity > 1)
-
9620  {
-
9621  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
-
9622  ++nextSuballocItem;
-
9623  while(nextSuballocItem != m_Suballocations.cend())
-
9624  {
-
9625  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
-
9626  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
9627  {
-
9628  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
9629  {
-
9630  return false;
-
9631  }
-
9632  }
-
9633  else
-
9634  {
-
9635  // Already on next page.
-
9636  break;
-
9637  }
-
9638  ++nextSuballocItem;
-
9639  }
+
9325  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
+
9326  if(paddingBegin)
+
9327  {
+
9328  VmaSuballocation paddingSuballoc = {};
+
9329  paddingSuballoc.offset = request.offset - paddingBegin;
+
9330  paddingSuballoc.size = paddingBegin;
+
9331  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
9332  const VmaSuballocationList::iterator paddingBeginItem =
+
9333  m_Suballocations.insert(request.item, paddingSuballoc);
+
9334  RegisterFreeSuballocation(paddingBeginItem);
+
9335  }
+
9336 
+
9337  // Update totals.
+
9338  m_FreeCount = m_FreeCount - 1;
+
9339  if(paddingBegin > 0)
+
9340  {
+
9341  ++m_FreeCount;
+
9342  }
+
9343  if(paddingEnd > 0)
+
9344  {
+
9345  ++m_FreeCount;
+
9346  }
+
9347  m_SumFreeSize -= allocSize;
+
9348 }
+
9349 
+
9350 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
+
9351 {
+
9352  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+
9353  suballocItem != m_Suballocations.end();
+
9354  ++suballocItem)
+
9355  {
+
9356  VmaSuballocation& suballoc = *suballocItem;
+
9357  if(suballoc.hAllocation == allocation)
+
9358  {
+
9359  FreeSuballocation(suballocItem);
+
9360  VMA_HEAVY_ASSERT(Validate());
+
9361  return;
+
9362  }
+
9363  }
+
9364  VMA_ASSERT(0 && "Not found!");
+
9365 }
+
9366 
+
9367 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
+
9368 {
+
9369  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+
9370  suballocItem != m_Suballocations.end();
+
9371  ++suballocItem)
+
9372  {
+
9373  VmaSuballocation& suballoc = *suballocItem;
+
9374  if(suballoc.offset == offset)
+
9375  {
+
9376  FreeSuballocation(suballocItem);
+
9377  return;
+
9378  }
+
9379  }
+
9380  VMA_ASSERT(0 && "Not found!");
+
9381 }
+
9382 
+
9383 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
+
9384 {
+
9385  VkDeviceSize lastSize = 0;
+
9386  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
+
9387  {
+
9388  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
+
9389 
+
9390  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
+
9391  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+
9392  VMA_VALIDATE(it->size >= lastSize);
+
9393  lastSize = it->size;
+
9394  }
+
9395  return true;
+
9396 }
+
9397 
+
9398 bool VmaBlockMetadata_Generic::CheckAllocation(
+
9399  uint32_t currentFrameIndex,
+
9400  uint32_t frameInUseCount,
+
9401  VkDeviceSize bufferImageGranularity,
+
9402  VkDeviceSize allocSize,
+
9403  VkDeviceSize allocAlignment,
+
9404  VmaSuballocationType allocType,
+
9405  VmaSuballocationList::const_iterator suballocItem,
+
9406  bool canMakeOtherLost,
+
9407  VkDeviceSize* pOffset,
+
9408  size_t* itemsToMakeLostCount,
+
9409  VkDeviceSize* pSumFreeSize,
+
9410  VkDeviceSize* pSumItemSize) const
+
9411 {
+
9412  VMA_ASSERT(allocSize > 0);
+
9413  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+
9414  VMA_ASSERT(suballocItem != m_Suballocations.cend());
+
9415  VMA_ASSERT(pOffset != VMA_NULL);
+
9416 
+
9417  *itemsToMakeLostCount = 0;
+
9418  *pSumFreeSize = 0;
+
9419  *pSumItemSize = 0;
+
9420 
+
9421  if(canMakeOtherLost)
+
9422  {
+
9423  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
9424  {
+
9425  *pSumFreeSize = suballocItem->size;
+
9426  }
+
9427  else
+
9428  {
+
9429  if(suballocItem->hAllocation->CanBecomeLost() &&
+
9430  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
9431  {
+
9432  ++*itemsToMakeLostCount;
+
9433  *pSumItemSize = suballocItem->size;
+
9434  }
+
9435  else
+
9436  {
+
9437  return false;
+
9438  }
+
9439  }
+
9440 
+
9441  // Remaining size is too small for this request: Early return.
+
9442  if(GetSize() - suballocItem->offset < allocSize)
+
9443  {
+
9444  return false;
+
9445  }
+
9446 
+
9447  // Start from offset equal to beginning of this suballocation.
+
9448  *pOffset = suballocItem->offset;
+
9449 
+
9450  // Apply VMA_DEBUG_MARGIN at the beginning.
+
9451  if(VMA_DEBUG_MARGIN > 0)
+
9452  {
+
9453  *pOffset += VMA_DEBUG_MARGIN;
+
9454  }
+
9455 
+
9456  // Apply alignment.
+
9457  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
9458 
+
9459  // Check previous suballocations for BufferImageGranularity conflicts.
+
9460  // Make bigger alignment if necessary.
+
9461  if(bufferImageGranularity > 1)
+
9462  {
+
9463  bool bufferImageGranularityConflict = false;
+
9464  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+
9465  while(prevSuballocItem != m_Suballocations.cbegin())
+
9466  {
+
9467  --prevSuballocItem;
+
9468  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
+
9469  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
+
9470  {
+
9471  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
9472  {
+
9473  bufferImageGranularityConflict = true;
+
9474  break;
+
9475  }
+
9476  }
+
9477  else
+
9478  // Already on previous page.
+
9479  break;
+
9480  }
+
9481  if(bufferImageGranularityConflict)
+
9482  {
+
9483  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+
9484  }
+
9485  }
+
9486 
+
9487  // Now that we have final *pOffset, check if we are past suballocItem.
+
9488  // If yes, return false - this function should be called for another suballocItem as starting point.
+
9489  if(*pOffset >= suballocItem->offset + suballocItem->size)
+
9490  {
+
9491  return false;
+
9492  }
+
9493 
+
9494  // Calculate padding at the beginning based on current offset.
+
9495  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
+
9496 
+
9497  // Calculate required margin at the end.
+
9498  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
+
9499 
+
9500  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
+
9501  // Another early return check.
+
9502  if(suballocItem->offset + totalSize > GetSize())
+
9503  {
+
9504  return false;
+
9505  }
+
9506 
+
9507  // Advance lastSuballocItem until desired size is reached.
+
9508  // Update itemsToMakeLostCount.
+
9509  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
+
9510  if(totalSize > suballocItem->size)
+
9511  {
+
9512  VkDeviceSize remainingSize = totalSize - suballocItem->size;
+
9513  while(remainingSize > 0)
+
9514  {
+
9515  ++lastSuballocItem;
+
9516  if(lastSuballocItem == m_Suballocations.cend())
+
9517  {
+
9518  return false;
+
9519  }
+
9520  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
9521  {
+
9522  *pSumFreeSize += lastSuballocItem->size;
+
9523  }
+
9524  else
+
9525  {
+
9526  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
+
9527  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
+
9528  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
9529  {
+
9530  ++*itemsToMakeLostCount;
+
9531  *pSumItemSize += lastSuballocItem->size;
+
9532  }
+
9533  else
+
9534  {
+
9535  return false;
+
9536  }
+
9537  }
+
9538  remainingSize = (lastSuballocItem->size < remainingSize) ?
+
9539  remainingSize - lastSuballocItem->size : 0;
+
9540  }
+
9541  }
+
9542 
+
9543  // Check next suballocations for BufferImageGranularity conflicts.
+
9544  // If conflict exists, we must mark more allocations lost or fail.
+
9545  if(bufferImageGranularity > 1)
+
9546  {
+
9547  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
+
9548  ++nextSuballocItem;
+
9549  while(nextSuballocItem != m_Suballocations.cend())
+
9550  {
+
9551  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
+
9552  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
9553  {
+
9554  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
9555  {
+
9556  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
+
9557  if(nextSuballoc.hAllocation->CanBecomeLost() &&
+
9558  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
9559  {
+
9560  ++*itemsToMakeLostCount;
+
9561  }
+
9562  else
+
9563  {
+
9564  return false;
+
9565  }
+
9566  }
+
9567  }
+
9568  else
+
9569  {
+
9570  // Already on next page.
+
9571  break;
+
9572  }
+
9573  ++nextSuballocItem;
+
9574  }
+
9575  }
+
9576  }
+
9577  else
+
9578  {
+
9579  const VmaSuballocation& suballoc = *suballocItem;
+
9580  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
9581 
+
9582  *pSumFreeSize = suballoc.size;
+
9583 
+
9584  // Size of this suballocation is too small for this request: Early return.
+
9585  if(suballoc.size < allocSize)
+
9586  {
+
9587  return false;
+
9588  }
+
9589 
+
9590  // Start from offset equal to beginning of this suballocation.
+
9591  *pOffset = suballoc.offset;
+
9592 
+
9593  // Apply VMA_DEBUG_MARGIN at the beginning.
+
9594  if(VMA_DEBUG_MARGIN > 0)
+
9595  {
+
9596  *pOffset += VMA_DEBUG_MARGIN;
+
9597  }
+
9598 
+
9599  // Apply alignment.
+
9600  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
9601 
+
9602  // Check previous suballocations for BufferImageGranularity conflicts.
+
9603  // Make bigger alignment if necessary.
+
9604  if(bufferImageGranularity > 1)
+
9605  {
+
9606  bool bufferImageGranularityConflict = false;
+
9607  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+
9608  while(prevSuballocItem != m_Suballocations.cbegin())
+
9609  {
+
9610  --prevSuballocItem;
+
9611  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
+
9612  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
+
9613  {
+
9614  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
9615  {
+
9616  bufferImageGranularityConflict = true;
+
9617  break;
+
9618  }
+
9619  }
+
9620  else
+
9621  // Already on previous page.
+
9622  break;
+
9623  }
+
9624  if(bufferImageGranularityConflict)
+
9625  {
+
9626  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+
9627  }
+
9628  }
+
9629 
+
9630  // Calculate padding at the beginning based on current offset.
+
9631  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
+
9632 
+
9633  // Calculate required margin at the end.
+
9634  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
+
9635 
+
9636  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
+
9637  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
+
9638  {
+
9639  return false;
9640  }
-
9641  }
-
9642 
-
9643  // All tests passed: Success. pOffset is already filled.
-
9644  return true;
-
9645 }
-
9646 
-
9647 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
-
9648 {
-
9649  VMA_ASSERT(item != m_Suballocations.end());
-
9650  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
9651 
-
9652  VmaSuballocationList::iterator nextItem = item;
-
9653  ++nextItem;
-
9654  VMA_ASSERT(nextItem != m_Suballocations.end());
-
9655  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-
9656 
-
9657  item->size += nextItem->size;
-
9658  --m_FreeCount;
-
9659  m_Suballocations.erase(nextItem);
-
9660 }
-
9661 
-
9662 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
-
9663 {
-
9664  // Change this suballocation to be marked as free.
-
9665  VmaSuballocation& suballoc = *suballocItem;
-
9666  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
9667  suballoc.hAllocation = VK_NULL_HANDLE;
-
9668 
-
9669  // Update totals.
-
9670  ++m_FreeCount;
-
9671  m_SumFreeSize += suballoc.size;
-
9672 
-
9673  // Merge with previous and/or next suballocation if it's also free.
-
9674  bool mergeWithNext = false;
-
9675  bool mergeWithPrev = false;
+
9641 
+
9642  // Check next suballocations for BufferImageGranularity conflicts.
+
9643  // If conflict exists, allocation cannot be made here.
+
9644  if(bufferImageGranularity > 1)
+
9645  {
+
9646  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
+
9647  ++nextSuballocItem;
+
9648  while(nextSuballocItem != m_Suballocations.cend())
+
9649  {
+
9650  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
+
9651  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
9652  {
+
9653  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
9654  {
+
9655  return false;
+
9656  }
+
9657  }
+
9658  else
+
9659  {
+
9660  // Already on next page.
+
9661  break;
+
9662  }
+
9663  ++nextSuballocItem;
+
9664  }
+
9665  }
+
9666  }
+
9667 
+
9668  // All tests passed: Success. pOffset is already filled.
+
9669  return true;
+
9670 }
+
9671 
+
9672 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
+
9673 {
+
9674  VMA_ASSERT(item != m_Suballocations.end());
+
9675  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9676 
-
9677  VmaSuballocationList::iterator nextItem = suballocItem;
+
9677  VmaSuballocationList::iterator nextItem = item;
9678  ++nextItem;
-
9679  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
-
9680  {
-
9681  mergeWithNext = true;
-
9682  }
-
9683 
-
9684  VmaSuballocationList::iterator prevItem = suballocItem;
-
9685  if(suballocItem != m_Suballocations.begin())
-
9686  {
-
9687  --prevItem;
-
9688  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
9689  {
-
9690  mergeWithPrev = true;
-
9691  }
-
9692  }
-
9693 
-
9694  if(mergeWithNext)
-
9695  {
-
9696  UnregisterFreeSuballocation(nextItem);
-
9697  MergeFreeWithNext(suballocItem);
-
9698  }
-
9699 
-
9700  if(mergeWithPrev)
-
9701  {
-
9702  UnregisterFreeSuballocation(prevItem);
-
9703  MergeFreeWithNext(prevItem);
-
9704  RegisterFreeSuballocation(prevItem);
-
9705  return prevItem;
-
9706  }
-
9707  else
-
9708  {
-
9709  RegisterFreeSuballocation(suballocItem);
-
9710  return suballocItem;
-
9711  }
-
9712 }
-
9713 
-
9714 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
-
9715 {
-
9716  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
9717  VMA_ASSERT(item->size > 0);
+
9679  VMA_ASSERT(nextItem != m_Suballocations.end());
+
9680  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+
9681 
+
9682  item->size += nextItem->size;
+
9683  --m_FreeCount;
+
9684  m_Suballocations.erase(nextItem);
+
9685 }
+
9686 
+
9687 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
+
9688 {
+
9689  // Change this suballocation to be marked as free.
+
9690  VmaSuballocation& suballoc = *suballocItem;
+
9691  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
9692  suballoc.hAllocation = VK_NULL_HANDLE;
+
9693 
+
9694  // Update totals.
+
9695  ++m_FreeCount;
+
9696  m_SumFreeSize += suballoc.size;
+
9697 
+
9698  // Merge with previous and/or next suballocation if it's also free.
+
9699  bool mergeWithNext = false;
+
9700  bool mergeWithPrev = false;
+
9701 
+
9702  VmaSuballocationList::iterator nextItem = suballocItem;
+
9703  ++nextItem;
+
9704  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
+
9705  {
+
9706  mergeWithNext = true;
+
9707  }
+
9708 
+
9709  VmaSuballocationList::iterator prevItem = suballocItem;
+
9710  if(suballocItem != m_Suballocations.begin())
+
9711  {
+
9712  --prevItem;
+
9713  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
9714  {
+
9715  mergeWithPrev = true;
+
9716  }
+
9717  }
9718 
-
9719  // You may want to enable this validation at the beginning or at the end of
-
9720  // this function, depending on what do you want to check.
-
9721  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-
9722 
-
9723  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
9724  {
-
9725  if(m_FreeSuballocationsBySize.empty())
-
9726  {
-
9727  m_FreeSuballocationsBySize.push_back(item);
-
9728  }
-
9729  else
-
9730  {
-
9731  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
-
9732  }
-
9733  }
-
9734 
-
9735  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-
9736 }
-
9737 
+
9719  if(mergeWithNext)
+
9720  {
+
9721  UnregisterFreeSuballocation(nextItem);
+
9722  MergeFreeWithNext(suballocItem);
+
9723  }
+
9724 
+
9725  if(mergeWithPrev)
+
9726  {
+
9727  UnregisterFreeSuballocation(prevItem);
+
9728  MergeFreeWithNext(prevItem);
+
9729  RegisterFreeSuballocation(prevItem);
+
9730  return prevItem;
+
9731  }
+
9732  else
+
9733  {
+
9734  RegisterFreeSuballocation(suballocItem);
+
9735  return suballocItem;
+
9736  }
+
9737 }
9738 
-
9739 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
+
9739 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9740 {
9741  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9742  VMA_ASSERT(item->size > 0);
@@ -6689,7387 +6689,7387 @@ $(function() {
9747 
9748  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9749  {
-
9750  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-
9751  m_FreeSuballocationsBySize.data(),
-
9752  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
-
9753  item,
-
9754  VmaSuballocationItemSizeLess());
-
9755  for(size_t index = it - m_FreeSuballocationsBySize.data();
-
9756  index < m_FreeSuballocationsBySize.size();
-
9757  ++index)
-
9758  {
-
9759  if(m_FreeSuballocationsBySize[index] == item)
-
9760  {
-
9761  VmaVectorRemove(m_FreeSuballocationsBySize, index);
-
9762  return;
-
9763  }
-
9764  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
-
9765  }
-
9766  VMA_ASSERT(0 && "Not found.");
-
9767  }
+
9750  if(m_FreeSuballocationsBySize.empty())
+
9751  {
+
9752  m_FreeSuballocationsBySize.push_back(item);
+
9753  }
+
9754  else
+
9755  {
+
9756  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
+
9757  }
+
9758  }
+
9759 
+
9760  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
9761 }
+
9762 
+
9763 
+
9764 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
+
9765 {
+
9766  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
9767  VMA_ASSERT(item->size > 0);
9768 
-
9769  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-
9770 }
-
9771 
-
9772 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
-
9773  VkDeviceSize bufferImageGranularity,
-
9774  VmaSuballocationType& inOutPrevSuballocType) const
-
9775 {
-
9776  if(bufferImageGranularity == 1 || IsEmpty())
-
9777  {
-
9778  return false;
-
9779  }
-
9780 
-
9781  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
-
9782  bool typeConflictFound = false;
-
9783  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
-
9784  it != m_Suballocations.cend();
-
9785  ++it)
-
9786  {
-
9787  const VmaSuballocationType suballocType = it->type;
-
9788  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
-
9789  {
-
9790  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
-
9791  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
-
9792  {
-
9793  typeConflictFound = true;
-
9794  }
-
9795  inOutPrevSuballocType = suballocType;
-
9796  }
-
9797  }
-
9798 
-
9799  return typeConflictFound || minAlignment >= bufferImageGranularity;
-
9800 }
-
9801 
-
9803 // class VmaBlockMetadata_Linear
-
9804 
-
9805 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
-
9806  VmaBlockMetadata(hAllocator),
-
9807  m_SumFreeSize(0),
-
9808  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
-
9809  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
-
9810  m_1stVectorIndex(0),
-
9811  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
-
9812  m_1stNullItemsBeginCount(0),
-
9813  m_1stNullItemsMiddleCount(0),
-
9814  m_2ndNullItemsCount(0)
-
9815 {
-
9816 }
-
9817 
-
9818 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
-
9819 {
-
9820 }
-
9821 
-
9822 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
-
9823 {
-
9824  VmaBlockMetadata::Init(size);
-
9825  m_SumFreeSize = size;
-
9826 }
-
9827 
-
9828 bool VmaBlockMetadata_Linear::Validate() const
-
9829 {
-
9830  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
9831  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
9832 
-
9833  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
-
9834  VMA_VALIDATE(!suballocations1st.empty() ||
-
9835  suballocations2nd.empty() ||
-
9836  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
-
9837 
-
9838  if(!suballocations1st.empty())
-
9839  {
-
9840  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
-
9841  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
-
9842  // Null item at the end should be just pop_back().
-
9843  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
-
9844  }
-
9845  if(!suballocations2nd.empty())
-
9846  {
-
9847  // Null item at the end should be just pop_back().
-
9848  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
-
9849  }
-
9850 
-
9851  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
-
9852  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
-
9853 
-
9854  VkDeviceSize sumUsedSize = 0;
-
9855  const size_t suballoc1stCount = suballocations1st.size();
-
9856  VkDeviceSize offset = VMA_DEBUG_MARGIN;
+
9769  // You may want to enable this validation at the beginning or at the end of
+
9770  // this function, depending on what do you want to check.
+
9771  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
9772 
+
9773  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
9774  {
+
9775  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
+
9776  m_FreeSuballocationsBySize.data(),
+
9777  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
+
9778  item,
+
9779  VmaSuballocationItemSizeLess());
+
9780  for(size_t index = it - m_FreeSuballocationsBySize.data();
+
9781  index < m_FreeSuballocationsBySize.size();
+
9782  ++index)
+
9783  {
+
9784  if(m_FreeSuballocationsBySize[index] == item)
+
9785  {
+
9786  VmaVectorRemove(m_FreeSuballocationsBySize, index);
+
9787  return;
+
9788  }
+
9789  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
+
9790  }
+
9791  VMA_ASSERT(0 && "Not found.");
+
9792  }
+
9793 
+
9794  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
9795 }
+
9796 
+
9797 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
+
9798  VkDeviceSize bufferImageGranularity,
+
9799  VmaSuballocationType& inOutPrevSuballocType) const
+
9800 {
+
9801  if(bufferImageGranularity == 1 || IsEmpty())
+
9802  {
+
9803  return false;
+
9804  }
+
9805 
+
9806  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
+
9807  bool typeConflictFound = false;
+
9808  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
+
9809  it != m_Suballocations.cend();
+
9810  ++it)
+
9811  {
+
9812  const VmaSuballocationType suballocType = it->type;
+
9813  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
+
9814  {
+
9815  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
+
9816  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
+
9817  {
+
9818  typeConflictFound = true;
+
9819  }
+
9820  inOutPrevSuballocType = suballocType;
+
9821  }
+
9822  }
+
9823 
+
9824  return typeConflictFound || minAlignment >= bufferImageGranularity;
+
9825 }
+
9826 
+
9828 // class VmaBlockMetadata_Linear
+
9829 
+
9830 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
+
9831  VmaBlockMetadata(hAllocator),
+
9832  m_SumFreeSize(0),
+
9833  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+
9834  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+
9835  m_1stVectorIndex(0),
+
9836  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
+
9837  m_1stNullItemsBeginCount(0),
+
9838  m_1stNullItemsMiddleCount(0),
+
9839  m_2ndNullItemsCount(0)
+
9840 {
+
9841 }
+
9842 
+
9843 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
+
9844 {
+
9845 }
+
9846 
+
9847 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
+
9848 {
+
9849  VmaBlockMetadata::Init(size);
+
9850  m_SumFreeSize = size;
+
9851 }
+
9852 
+
9853 bool VmaBlockMetadata_Linear::Validate() const
+
9854 {
+
9855  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
9856  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9857 
-
9858  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
9859  {
-
9860  const size_t suballoc2ndCount = suballocations2nd.size();
-
9861  size_t nullItem2ndCount = 0;
-
9862  for(size_t i = 0; i < suballoc2ndCount; ++i)
-
9863  {
-
9864  const VmaSuballocation& suballoc = suballocations2nd[i];
-
9865  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
9866 
-
9867  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-
9868  VMA_VALIDATE(suballoc.offset >= offset);
-
9869 
-
9870  if(!currFree)
-
9871  {
-
9872  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-
9873  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-
9874  sumUsedSize += suballoc.size;
-
9875  }
-
9876  else
-
9877  {
-
9878  ++nullItem2ndCount;
-
9879  }
-
9880 
-
9881  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
-
9882  }
-
9883 
-
9884  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-
9885  }
-
9886 
-
9887  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
-
9888  {
-
9889  const VmaSuballocation& suballoc = suballocations1st[i];
-
9890  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
-
9891  suballoc.hAllocation == VK_NULL_HANDLE);
-
9892  }
-
9893 
-
9894  size_t nullItem1stCount = m_1stNullItemsBeginCount;
-
9895 
-
9896  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
-
9897  {
-
9898  const VmaSuballocation& suballoc = suballocations1st[i];
-
9899  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
9900 
-
9901  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-
9902  VMA_VALIDATE(suballoc.offset >= offset);
-
9903  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
-
9904 
-
9905  if(!currFree)
-
9906  {
-
9907  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-
9908  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-
9909  sumUsedSize += suballoc.size;
-
9910  }
-
9911  else
-
9912  {
-
9913  ++nullItem1stCount;
-
9914  }
-
9915 
-
9916  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
9858  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
+
9859  VMA_VALIDATE(!suballocations1st.empty() ||
+
9860  suballocations2nd.empty() ||
+
9861  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
+
9862 
+
9863  if(!suballocations1st.empty())
+
9864  {
+
9865  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
+
9866  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
+
9867  // Null item at the end should be just pop_back().
+
9868  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
+
9869  }
+
9870  if(!suballocations2nd.empty())
+
9871  {
+
9872  // Null item at the end should be just pop_back().
+
9873  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
+
9874  }
+
9875 
+
9876  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
+
9877  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
+
9878 
+
9879  VkDeviceSize sumUsedSize = 0;
+
9880  const size_t suballoc1stCount = suballocations1st.size();
+
9881  VkDeviceSize offset = VMA_DEBUG_MARGIN;
+
9882 
+
9883  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
9884  {
+
9885  const size_t suballoc2ndCount = suballocations2nd.size();
+
9886  size_t nullItem2ndCount = 0;
+
9887  for(size_t i = 0; i < suballoc2ndCount; ++i)
+
9888  {
+
9889  const VmaSuballocation& suballoc = suballocations2nd[i];
+
9890  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
9891 
+
9892  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+
9893  VMA_VALIDATE(suballoc.offset >= offset);
+
9894 
+
9895  if(!currFree)
+
9896  {
+
9897  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+
9898  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+
9899  sumUsedSize += suballoc.size;
+
9900  }
+
9901  else
+
9902  {
+
9903  ++nullItem2ndCount;
+
9904  }
+
9905 
+
9906  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
9907  }
+
9908 
+
9909  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+
9910  }
+
9911 
+
9912  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
+
9913  {
+
9914  const VmaSuballocation& suballoc = suballocations1st[i];
+
9915  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
+
9916  suballoc.hAllocation == VK_NULL_HANDLE);
9917  }
-
9918  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
-
9919 
-
9920  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
9921  {
-
9922  const size_t suballoc2ndCount = suballocations2nd.size();
-
9923  size_t nullItem2ndCount = 0;
-
9924  for(size_t i = suballoc2ndCount; i--; )
-
9925  {
-
9926  const VmaSuballocation& suballoc = suballocations2nd[i];
-
9927  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
9928 
-
9929  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-
9930  VMA_VALIDATE(suballoc.offset >= offset);
-
9931 
-
9932  if(!currFree)
-
9933  {
-
9934  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-
9935  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-
9936  sumUsedSize += suballoc.size;
-
9937  }
-
9938  else
-
9939  {
-
9940  ++nullItem2ndCount;
-
9941  }
-
9942 
-
9943  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
-
9944  }
-
9945 
-
9946  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-
9947  }
-
9948 
-
9949  VMA_VALIDATE(offset <= GetSize());
-
9950  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
-
9951 
-
9952  return true;
-
9953 }
-
9954 
-
9955 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
-
9956 {
-
9957  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
-
9958  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
-
9959 }
-
9960 
-
9961 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
-
9962 {
-
9963  const VkDeviceSize size = GetSize();
-
9964 
-
9965  /*
-
9966  We don't consider gaps inside allocation vectors with freed allocations because
-
9967  they are not suitable for reuse in linear allocator. We consider only space that
-
9968  is available for new allocations.
-
9969  */
-
9970  if(IsEmpty())
-
9971  {
-
9972  return size;
-
9973  }
-
9974 
-
9975  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
9918 
+
9919  size_t nullItem1stCount = m_1stNullItemsBeginCount;
+
9920 
+
9921  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
+
9922  {
+
9923  const VmaSuballocation& suballoc = suballocations1st[i];
+
9924  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
9925 
+
9926  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+
9927  VMA_VALIDATE(suballoc.offset >= offset);
+
9928  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
+
9929 
+
9930  if(!currFree)
+
9931  {
+
9932  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+
9933  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+
9934  sumUsedSize += suballoc.size;
+
9935  }
+
9936  else
+
9937  {
+
9938  ++nullItem1stCount;
+
9939  }
+
9940 
+
9941  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
9942  }
+
9943  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
+
9944 
+
9945  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
9946  {
+
9947  const size_t suballoc2ndCount = suballocations2nd.size();
+
9948  size_t nullItem2ndCount = 0;
+
9949  for(size_t i = suballoc2ndCount; i--; )
+
9950  {
+
9951  const VmaSuballocation& suballoc = suballocations2nd[i];
+
9952  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
9953 
+
9954  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+
9955  VMA_VALIDATE(suballoc.offset >= offset);
+
9956 
+
9957  if(!currFree)
+
9958  {
+
9959  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+
9960  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+
9961  sumUsedSize += suballoc.size;
+
9962  }
+
9963  else
+
9964  {
+
9965  ++nullItem2ndCount;
+
9966  }
+
9967 
+
9968  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
9969  }
+
9970 
+
9971  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+
9972  }
+
9973 
+
9974  VMA_VALIDATE(offset <= GetSize());
+
9975  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9976 
-
9977  switch(m_2ndVectorMode)
-
9978  {
-
9979  case SECOND_VECTOR_EMPTY:
-
9980  /*
-
9981  Available space is after end of 1st, as well as before beginning of 1st (which
-
9982  whould make it a ring buffer).
-
9983  */
-
9984  {
-
9985  const size_t suballocations1stCount = suballocations1st.size();
-
9986  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
-
9987  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-
9988  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
-
9989  return VMA_MAX(
-
9990  firstSuballoc.offset,
-
9991  size - (lastSuballoc.offset + lastSuballoc.size));
-
9992  }
-
9993  break;
-
9994 
-
9995  case SECOND_VECTOR_RING_BUFFER:
-
9996  /*
-
9997  Available space is only between end of 2nd and beginning of 1st.
-
9998  */
-
9999  {
-
10000  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10001  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
-
10002  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
-
10003  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
-
10004  }
-
10005  break;
-
10006 
-
10007  case SECOND_VECTOR_DOUBLE_STACK:
-
10008  /*
-
10009  Available space is only between end of 1st and top of 2nd.
-
10010  */
-
10011  {
-
10012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10013  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
-
10014  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
-
10015  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
-
10016  }
-
10017  break;
-
10018 
-
10019  default:
-
10020  VMA_ASSERT(0);
-
10021  return 0;
-
10022  }
-
10023 }
-
10024 
-
10025 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
-
10026 {
-
10027  const VkDeviceSize size = GetSize();
-
10028  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
10029  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10030  const size_t suballoc1stCount = suballocations1st.size();
-
10031  const size_t suballoc2ndCount = suballocations2nd.size();
-
10032 
-
10033  outInfo.blockCount = 1;
-
10034  outInfo.allocationCount = (uint32_t)GetAllocationCount();
-
10035  outInfo.unusedRangeCount = 0;
-
10036  outInfo.usedBytes = 0;
-
10037  outInfo.allocationSizeMin = UINT64_MAX;
-
10038  outInfo.allocationSizeMax = 0;
-
10039  outInfo.unusedRangeSizeMin = UINT64_MAX;
-
10040  outInfo.unusedRangeSizeMax = 0;
-
10041 
-
10042  VkDeviceSize lastOffset = 0;
+
9977  return true;
+
9978 }
+
9979 
+
9980 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
+
9981 {
+
9982  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
+
9983  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
+
9984 }
+
9985 
+
9986 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
+
9987 {
+
9988  const VkDeviceSize size = GetSize();
+
9989 
+
9990  /*
+
9991  We don't consider gaps inside allocation vectors with freed allocations because
+
9992  they are not suitable for reuse in linear allocator. We consider only space that
+
9993  is available for new allocations.
+
9994  */
+
9995  if(IsEmpty())
+
9996  {
+
9997  return size;
+
9998  }
+
9999 
+
10000  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10001 
+
10002  switch(m_2ndVectorMode)
+
10003  {
+
10004  case SECOND_VECTOR_EMPTY:
+
10005  /*
+
10006  Available space is after end of 1st, as well as before beginning of 1st (which
+
10007  whould make it a ring buffer).
+
10008  */
+
10009  {
+
10010  const size_t suballocations1stCount = suballocations1st.size();
+
10011  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
+
10012  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+
10013  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
+
10014  return VMA_MAX(
+
10015  firstSuballoc.offset,
+
10016  size - (lastSuballoc.offset + lastSuballoc.size));
+
10017  }
+
10018  break;
+
10019 
+
10020  case SECOND_VECTOR_RING_BUFFER:
+
10021  /*
+
10022  Available space is only between end of 2nd and beginning of 1st.
+
10023  */
+
10024  {
+
10025  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10026  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
+
10027  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
+
10028  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
+
10029  }
+
10030  break;
+
10031 
+
10032  case SECOND_VECTOR_DOUBLE_STACK:
+
10033  /*
+
10034  Available space is only between end of 1st and top of 2nd.
+
10035  */
+
10036  {
+
10037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10038  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
+
10039  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
+
10040  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
+
10041  }
+
10042  break;
10043 
-
10044  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10045  {
-
10046  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-
10047  size_t nextAlloc2ndIndex = 0;
-
10048  while(lastOffset < freeSpace2ndTo1stEnd)
-
10049  {
-
10050  // Find next non-null allocation or move nextAllocIndex to the end.
-
10051  while(nextAlloc2ndIndex < suballoc2ndCount &&
-
10052  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10053  {
-
10054  ++nextAlloc2ndIndex;
-
10055  }
-
10056 
-
10057  // Found non-null allocation.
-
10058  if(nextAlloc2ndIndex < suballoc2ndCount)
-
10059  {
-
10060  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10061 
-
10062  // 1. Process free space before this allocation.
-
10063  if(lastOffset < suballoc.offset)
-
10064  {
-
10065  // There is free space from lastOffset to suballoc.offset.
-
10066  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10067  ++outInfo.unusedRangeCount;
-
10068  outInfo.unusedBytes += unusedRangeSize;
-
10069  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10070  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10071  }
-
10072 
-
10073  // 2. Process this allocation.
-
10074  // There is allocation with suballoc.offset, suballoc.size.
-
10075  outInfo.usedBytes += suballoc.size;
-
10076  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
-
10077  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
10078 
-
10079  // 3. Prepare for next iteration.
-
10080  lastOffset = suballoc.offset + suballoc.size;
-
10081  ++nextAlloc2ndIndex;
-
10082  }
-
10083  // We are at the end.
-
10084  else
-
10085  {
-
10086  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-
10087  if(lastOffset < freeSpace2ndTo1stEnd)
-
10088  {
-
10089  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-
10090  ++outInfo.unusedRangeCount;
-
10091  outInfo.unusedBytes += unusedRangeSize;
-
10092  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10093  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10094  }
-
10095 
-
10096  // End of loop.
-
10097  lastOffset = freeSpace2ndTo1stEnd;
-
10098  }
-
10099  }
-
10100  }
-
10101 
-
10102  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-
10103  const VkDeviceSize freeSpace1stTo2ndEnd =
-
10104  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-
10105  while(lastOffset < freeSpace1stTo2ndEnd)
-
10106  {
-
10107  // Find next non-null allocation or move nextAllocIndex to the end.
-
10108  while(nextAlloc1stIndex < suballoc1stCount &&
-
10109  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
10110  {
-
10111  ++nextAlloc1stIndex;
-
10112  }
-
10113 
-
10114  // Found non-null allocation.
-
10115  if(nextAlloc1stIndex < suballoc1stCount)
-
10116  {
-
10117  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
10118 
-
10119  // 1. Process free space before this allocation.
-
10120  if(lastOffset < suballoc.offset)
-
10121  {
-
10122  // There is free space from lastOffset to suballoc.offset.
-
10123  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10124  ++outInfo.unusedRangeCount;
-
10125  outInfo.unusedBytes += unusedRangeSize;
-
10126  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10127  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10128  }
-
10129 
-
10130  // 2. Process this allocation.
-
10131  // There is allocation with suballoc.offset, suballoc.size.
-
10132  outInfo.usedBytes += suballoc.size;
-
10133  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
-
10134  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
10135 
-
10136  // 3. Prepare for next iteration.
-
10137  lastOffset = suballoc.offset + suballoc.size;
-
10138  ++nextAlloc1stIndex;
-
10139  }
-
10140  // We are at the end.
-
10141  else
-
10142  {
-
10143  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-
10144  if(lastOffset < freeSpace1stTo2ndEnd)
-
10145  {
-
10146  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-
10147  ++outInfo.unusedRangeCount;
-
10148  outInfo.unusedBytes += unusedRangeSize;
-
10149  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10150  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10151  }
-
10152 
-
10153  // End of loop.
-
10154  lastOffset = freeSpace1stTo2ndEnd;
-
10155  }
-
10156  }
-
10157 
-
10158  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10159  {
-
10160  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-
10161  while(lastOffset < size)
-
10162  {
-
10163  // Find next non-null allocation or move nextAllocIndex to the end.
-
10164  while(nextAlloc2ndIndex != SIZE_MAX &&
-
10165  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10166  {
-
10167  --nextAlloc2ndIndex;
-
10168  }
-
10169 
-
10170  // Found non-null allocation.
-
10171  if(nextAlloc2ndIndex != SIZE_MAX)
-
10172  {
-
10173  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10174 
-
10175  // 1. Process free space before this allocation.
-
10176  if(lastOffset < suballoc.offset)
-
10177  {
-
10178  // There is free space from lastOffset to suballoc.offset.
-
10179  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10180  ++outInfo.unusedRangeCount;
-
10181  outInfo.unusedBytes += unusedRangeSize;
-
10182  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10183  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10184  }
-
10185 
-
10186  // 2. Process this allocation.
-
10187  // There is allocation with suballoc.offset, suballoc.size.
-
10188  outInfo.usedBytes += suballoc.size;
-
10189  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
-
10190  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
10191 
-
10192  // 3. Prepare for next iteration.
-
10193  lastOffset = suballoc.offset + suballoc.size;
-
10194  --nextAlloc2ndIndex;
-
10195  }
-
10196  // We are at the end.
-
10197  else
-
10198  {
-
10199  // There is free space from lastOffset to size.
-
10200  if(lastOffset < size)
-
10201  {
-
10202  const VkDeviceSize unusedRangeSize = size - lastOffset;
-
10203  ++outInfo.unusedRangeCount;
-
10204  outInfo.unusedBytes += unusedRangeSize;
-
10205  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
10206  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
10207  }
-
10208 
-
10209  // End of loop.
-
10210  lastOffset = size;
-
10211  }
-
10212  }
-
10213  }
-
10214 
-
10215  outInfo.unusedBytes = size - outInfo.usedBytes;
-
10216 }
-
10217 
-
10218 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
-
10219 {
-
10220  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
10221  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10222  const VkDeviceSize size = GetSize();
-
10223  const size_t suballoc1stCount = suballocations1st.size();
-
10224  const size_t suballoc2ndCount = suballocations2nd.size();
-
10225 
-
10226  inoutStats.size += size;
-
10227 
-
10228  VkDeviceSize lastOffset = 0;
-
10229 
-
10230  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10231  {
-
10232  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-
10233  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
-
10234  while(lastOffset < freeSpace2ndTo1stEnd)
-
10235  {
-
10236  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10237  while(nextAlloc2ndIndex < suballoc2ndCount &&
-
10238  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10239  {
-
10240  ++nextAlloc2ndIndex;
-
10241  }
+
10044  default:
+
10045  VMA_ASSERT(0);
+
10046  return 0;
+
10047  }
+
10048 }
+
10049 
+
10050 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
+
10051 {
+
10052  const VkDeviceSize size = GetSize();
+
10053  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10054  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10055  const size_t suballoc1stCount = suballocations1st.size();
+
10056  const size_t suballoc2ndCount = suballocations2nd.size();
+
10057 
+
10058  outInfo.blockCount = 1;
+
10059  outInfo.allocationCount = (uint32_t)GetAllocationCount();
+
10060  outInfo.unusedRangeCount = 0;
+
10061  outInfo.usedBytes = 0;
+
10062  outInfo.allocationSizeMin = UINT64_MAX;
+
10063  outInfo.allocationSizeMax = 0;
+
10064  outInfo.unusedRangeSizeMin = UINT64_MAX;
+
10065  outInfo.unusedRangeSizeMax = 0;
+
10066 
+
10067  VkDeviceSize lastOffset = 0;
+
10068 
+
10069  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10070  {
+
10071  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+
10072  size_t nextAlloc2ndIndex = 0;
+
10073  while(lastOffset < freeSpace2ndTo1stEnd)
+
10074  {
+
10075  // Find next non-null allocation or move nextAllocIndex to the end.
+
10076  while(nextAlloc2ndIndex < suballoc2ndCount &&
+
10077  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10078  {
+
10079  ++nextAlloc2ndIndex;
+
10080  }
+
10081 
+
10082  // Found non-null allocation.
+
10083  if(nextAlloc2ndIndex < suballoc2ndCount)
+
10084  {
+
10085  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10086 
+
10087  // 1. Process free space before this allocation.
+
10088  if(lastOffset < suballoc.offset)
+
10089  {
+
10090  // There is free space from lastOffset to suballoc.offset.
+
10091  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10092  ++outInfo.unusedRangeCount;
+
10093  outInfo.unusedBytes += unusedRangeSize;
+
10094  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10095  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10096  }
+
10097 
+
10098  // 2. Process this allocation.
+
10099  // There is allocation with suballoc.offset, suballoc.size.
+
10100  outInfo.usedBytes += suballoc.size;
+
10101  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+
10102  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
10103 
+
10104  // 3. Prepare for next iteration.
+
10105  lastOffset = suballoc.offset + suballoc.size;
+
10106  ++nextAlloc2ndIndex;
+
10107  }
+
10108  // We are at the end.
+
10109  else
+
10110  {
+
10111  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+
10112  if(lastOffset < freeSpace2ndTo1stEnd)
+
10113  {
+
10114  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+
10115  ++outInfo.unusedRangeCount;
+
10116  outInfo.unusedBytes += unusedRangeSize;
+
10117  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10118  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10119  }
+
10120 
+
10121  // End of loop.
+
10122  lastOffset = freeSpace2ndTo1stEnd;
+
10123  }
+
10124  }
+
10125  }
+
10126 
+
10127  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+
10128  const VkDeviceSize freeSpace1stTo2ndEnd =
+
10129  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+
10130  while(lastOffset < freeSpace1stTo2ndEnd)
+
10131  {
+
10132  // Find next non-null allocation or move nextAllocIndex to the end.
+
10133  while(nextAlloc1stIndex < suballoc1stCount &&
+
10134  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
10135  {
+
10136  ++nextAlloc1stIndex;
+
10137  }
+
10138 
+
10139  // Found non-null allocation.
+
10140  if(nextAlloc1stIndex < suballoc1stCount)
+
10141  {
+
10142  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
10143 
+
10144  // 1. Process free space before this allocation.
+
10145  if(lastOffset < suballoc.offset)
+
10146  {
+
10147  // There is free space from lastOffset to suballoc.offset.
+
10148  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10149  ++outInfo.unusedRangeCount;
+
10150  outInfo.unusedBytes += unusedRangeSize;
+
10151  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10152  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10153  }
+
10154 
+
10155  // 2. Process this allocation.
+
10156  // There is allocation with suballoc.offset, suballoc.size.
+
10157  outInfo.usedBytes += suballoc.size;
+
10158  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+
10159  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
10160 
+
10161  // 3. Prepare for next iteration.
+
10162  lastOffset = suballoc.offset + suballoc.size;
+
10163  ++nextAlloc1stIndex;
+
10164  }
+
10165  // We are at the end.
+
10166  else
+
10167  {
+
10168  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+
10169  if(lastOffset < freeSpace1stTo2ndEnd)
+
10170  {
+
10171  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+
10172  ++outInfo.unusedRangeCount;
+
10173  outInfo.unusedBytes += unusedRangeSize;
+
10174  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10175  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10176  }
+
10177 
+
10178  // End of loop.
+
10179  lastOffset = freeSpace1stTo2ndEnd;
+
10180  }
+
10181  }
+
10182 
+
10183  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10184  {
+
10185  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+
10186  while(lastOffset < size)
+
10187  {
+
10188  // Find next non-null allocation or move nextAllocIndex to the end.
+
10189  while(nextAlloc2ndIndex != SIZE_MAX &&
+
10190  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10191  {
+
10192  --nextAlloc2ndIndex;
+
10193  }
+
10194 
+
10195  // Found non-null allocation.
+
10196  if(nextAlloc2ndIndex != SIZE_MAX)
+
10197  {
+
10198  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10199 
+
10200  // 1. Process free space before this allocation.
+
10201  if(lastOffset < suballoc.offset)
+
10202  {
+
10203  // There is free space from lastOffset to suballoc.offset.
+
10204  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10205  ++outInfo.unusedRangeCount;
+
10206  outInfo.unusedBytes += unusedRangeSize;
+
10207  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10208  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10209  }
+
10210 
+
10211  // 2. Process this allocation.
+
10212  // There is allocation with suballoc.offset, suballoc.size.
+
10213  outInfo.usedBytes += suballoc.size;
+
10214  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+
10215  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
10216 
+
10217  // 3. Prepare for next iteration.
+
10218  lastOffset = suballoc.offset + suballoc.size;
+
10219  --nextAlloc2ndIndex;
+
10220  }
+
10221  // We are at the end.
+
10222  else
+
10223  {
+
10224  // There is free space from lastOffset to size.
+
10225  if(lastOffset < size)
+
10226  {
+
10227  const VkDeviceSize unusedRangeSize = size - lastOffset;
+
10228  ++outInfo.unusedRangeCount;
+
10229  outInfo.unusedBytes += unusedRangeSize;
+
10230  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
10231  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
10232  }
+
10233 
+
10234  // End of loop.
+
10235  lastOffset = size;
+
10236  }
+
10237  }
+
10238  }
+
10239 
+
10240  outInfo.unusedBytes = size - outInfo.usedBytes;
+
10241 }
10242 
-
10243  // Found non-null allocation.
-
10244  if(nextAlloc2ndIndex < suballoc2ndCount)
-
10245  {
-
10246  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10247 
-
10248  // 1. Process free space before this allocation.
-
10249  if(lastOffset < suballoc.offset)
-
10250  {
-
10251  // There is free space from lastOffset to suballoc.offset.
-
10252  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10253  inoutStats.unusedSize += unusedRangeSize;
-
10254  ++inoutStats.unusedRangeCount;
-
10255  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10256  }
-
10257 
-
10258  // 2. Process this allocation.
-
10259  // There is allocation with suballoc.offset, suballoc.size.
-
10260  ++inoutStats.allocationCount;
-
10261 
-
10262  // 3. Prepare for next iteration.
-
10263  lastOffset = suballoc.offset + suballoc.size;
-
10264  ++nextAlloc2ndIndex;
-
10265  }
-
10266  // We are at the end.
-
10267  else
-
10268  {
-
10269  if(lastOffset < freeSpace2ndTo1stEnd)
-
10270  {
-
10271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-
10272  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-
10273  inoutStats.unusedSize += unusedRangeSize;
-
10274  ++inoutStats.unusedRangeCount;
-
10275  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10276  }
-
10277 
-
10278  // End of loop.
-
10279  lastOffset = freeSpace2ndTo1stEnd;
-
10280  }
-
10281  }
-
10282  }
-
10283 
-
10284  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-
10285  const VkDeviceSize freeSpace1stTo2ndEnd =
-
10286  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-
10287  while(lastOffset < freeSpace1stTo2ndEnd)
-
10288  {
-
10289  // Find next non-null allocation or move nextAllocIndex to the end.
-
10290  while(nextAlloc1stIndex < suballoc1stCount &&
-
10291  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
10292  {
-
10293  ++nextAlloc1stIndex;
-
10294  }
-
10295 
-
10296  // Found non-null allocation.
-
10297  if(nextAlloc1stIndex < suballoc1stCount)
-
10298  {
-
10299  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
10300 
-
10301  // 1. Process free space before this allocation.
-
10302  if(lastOffset < suballoc.offset)
-
10303  {
-
10304  // There is free space from lastOffset to suballoc.offset.
-
10305  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10306  inoutStats.unusedSize += unusedRangeSize;
-
10307  ++inoutStats.unusedRangeCount;
-
10308  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10309  }
-
10310 
-
10311  // 2. Process this allocation.
-
10312  // There is allocation with suballoc.offset, suballoc.size.
-
10313  ++inoutStats.allocationCount;
-
10314 
-
10315  // 3. Prepare for next iteration.
-
10316  lastOffset = suballoc.offset + suballoc.size;
-
10317  ++nextAlloc1stIndex;
-
10318  }
-
10319  // We are at the end.
-
10320  else
-
10321  {
-
10322  if(lastOffset < freeSpace1stTo2ndEnd)
-
10323  {
-
10324  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-
10325  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-
10326  inoutStats.unusedSize += unusedRangeSize;
-
10327  ++inoutStats.unusedRangeCount;
-
10328  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10329  }
-
10330 
-
10331  // End of loop.
-
10332  lastOffset = freeSpace1stTo2ndEnd;
-
10333  }
-
10334  }
-
10335 
-
10336  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10337  {
-
10338  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-
10339  while(lastOffset < size)
-
10340  {
-
10341  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10342  while(nextAlloc2ndIndex != SIZE_MAX &&
-
10343  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10344  {
-
10345  --nextAlloc2ndIndex;
-
10346  }
-
10347 
-
10348  // Found non-null allocation.
-
10349  if(nextAlloc2ndIndex != SIZE_MAX)
-
10350  {
-
10351  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10352 
-
10353  // 1. Process free space before this allocation.
-
10354  if(lastOffset < suballoc.offset)
-
10355  {
-
10356  // There is free space from lastOffset to suballoc.offset.
-
10357  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10358  inoutStats.unusedSize += unusedRangeSize;
-
10359  ++inoutStats.unusedRangeCount;
-
10360  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10361  }
-
10362 
-
10363  // 2. Process this allocation.
-
10364  // There is allocation with suballoc.offset, suballoc.size.
-
10365  ++inoutStats.allocationCount;
-
10366 
-
10367  // 3. Prepare for next iteration.
-
10368  lastOffset = suballoc.offset + suballoc.size;
-
10369  --nextAlloc2ndIndex;
-
10370  }
-
10371  // We are at the end.
-
10372  else
-
10373  {
-
10374  if(lastOffset < size)
-
10375  {
-
10376  // There is free space from lastOffset to size.
-
10377  const VkDeviceSize unusedRangeSize = size - lastOffset;
-
10378  inoutStats.unusedSize += unusedRangeSize;
-
10379  ++inoutStats.unusedRangeCount;
-
10380  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
-
10381  }
-
10382 
-
10383  // End of loop.
-
10384  lastOffset = size;
-
10385  }
-
10386  }
-
10387  }
-
10388 }
-
10389 
-
10390 #if VMA_STATS_STRING_ENABLED
-
10391 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
-
10392 {
-
10393  const VkDeviceSize size = GetSize();
-
10394  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
10395  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10396  const size_t suballoc1stCount = suballocations1st.size();
-
10397  const size_t suballoc2ndCount = suballocations2nd.size();
-
10398 
-
10399  // FIRST PASS
-
10400 
-
10401  size_t unusedRangeCount = 0;
-
10402  VkDeviceSize usedBytes = 0;
-
10403 
-
10404  VkDeviceSize lastOffset = 0;
-
10405 
-
10406  size_t alloc2ndCount = 0;
-
10407  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10408  {
-
10409  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-
10410  size_t nextAlloc2ndIndex = 0;
-
10411  while(lastOffset < freeSpace2ndTo1stEnd)
-
10412  {
-
10413  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10414  while(nextAlloc2ndIndex < suballoc2ndCount &&
-
10415  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10416  {
-
10417  ++nextAlloc2ndIndex;
-
10418  }
-
10419 
-
10420  // Found non-null allocation.
-
10421  if(nextAlloc2ndIndex < suballoc2ndCount)
-
10422  {
-
10423  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10424 
-
10425  // 1. Process free space before this allocation.
-
10426  if(lastOffset < suballoc.offset)
-
10427  {
-
10428  // There is free space from lastOffset to suballoc.offset.
-
10429  ++unusedRangeCount;
-
10430  }
-
10431 
-
10432  // 2. Process this allocation.
-
10433  // There is allocation with suballoc.offset, suballoc.size.
-
10434  ++alloc2ndCount;
-
10435  usedBytes += suballoc.size;
-
10436 
-
10437  // 3. Prepare for next iteration.
-
10438  lastOffset = suballoc.offset + suballoc.size;
-
10439  ++nextAlloc2ndIndex;
-
10440  }
-
10441  // We are at the end.
-
10442  else
-
10443  {
-
10444  if(lastOffset < freeSpace2ndTo1stEnd)
-
10445  {
-
10446  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-
10447  ++unusedRangeCount;
-
10448  }
-
10449 
-
10450  // End of loop.
-
10451  lastOffset = freeSpace2ndTo1stEnd;
-
10452  }
-
10453  }
-
10454  }
-
10455 
-
10456  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-
10457  size_t alloc1stCount = 0;
-
10458  const VkDeviceSize freeSpace1stTo2ndEnd =
-
10459  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-
10460  while(lastOffset < freeSpace1stTo2ndEnd)
-
10461  {
-
10462  // Find next non-null allocation or move nextAllocIndex to the end.
-
10463  while(nextAlloc1stIndex < suballoc1stCount &&
-
10464  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
10465  {
-
10466  ++nextAlloc1stIndex;
-
10467  }
-
10468 
-
10469  // Found non-null allocation.
-
10470  if(nextAlloc1stIndex < suballoc1stCount)
-
10471  {
-
10472  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
10473 
-
10474  // 1. Process free space before this allocation.
-
10475  if(lastOffset < suballoc.offset)
-
10476  {
-
10477  // There is free space from lastOffset to suballoc.offset.
-
10478  ++unusedRangeCount;
-
10479  }
-
10480 
-
10481  // 2. Process this allocation.
-
10482  // There is allocation with suballoc.offset, suballoc.size.
-
10483  ++alloc1stCount;
-
10484  usedBytes += suballoc.size;
-
10485 
-
10486  // 3. Prepare for next iteration.
-
10487  lastOffset = suballoc.offset + suballoc.size;
-
10488  ++nextAlloc1stIndex;
-
10489  }
-
10490  // We are at the end.
-
10491  else
-
10492  {
-
10493  if(lastOffset < size)
-
10494  {
-
10495  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-
10496  ++unusedRangeCount;
-
10497  }
-
10498 
-
10499  // End of loop.
-
10500  lastOffset = freeSpace1stTo2ndEnd;
-
10501  }
-
10502  }
-
10503 
-
10504  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10505  {
-
10506  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-
10507  while(lastOffset < size)
-
10508  {
-
10509  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10510  while(nextAlloc2ndIndex != SIZE_MAX &&
-
10511  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10512  {
-
10513  --nextAlloc2ndIndex;
-
10514  }
-
10515 
-
10516  // Found non-null allocation.
-
10517  if(nextAlloc2ndIndex != SIZE_MAX)
-
10518  {
-
10519  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10520 
-
10521  // 1. Process free space before this allocation.
-
10522  if(lastOffset < suballoc.offset)
-
10523  {
-
10524  // There is free space from lastOffset to suballoc.offset.
-
10525  ++unusedRangeCount;
-
10526  }
-
10527 
-
10528  // 2. Process this allocation.
-
10529  // There is allocation with suballoc.offset, suballoc.size.
-
10530  ++alloc2ndCount;
-
10531  usedBytes += suballoc.size;
-
10532 
-
10533  // 3. Prepare for next iteration.
-
10534  lastOffset = suballoc.offset + suballoc.size;
-
10535  --nextAlloc2ndIndex;
-
10536  }
-
10537  // We are at the end.
-
10538  else
-
10539  {
-
10540  if(lastOffset < size)
-
10541  {
-
10542  // There is free space from lastOffset to size.
-
10543  ++unusedRangeCount;
-
10544  }
-
10545 
-
10546  // End of loop.
-
10547  lastOffset = size;
-
10548  }
-
10549  }
-
10550  }
-
10551 
-
10552  const VkDeviceSize unusedBytes = size - usedBytes;
-
10553  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
-
10554 
-
10555  // SECOND PASS
-
10556  lastOffset = 0;
-
10557 
-
10558  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10559  {
-
10560  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-
10561  size_t nextAlloc2ndIndex = 0;
-
10562  while(lastOffset < freeSpace2ndTo1stEnd)
-
10563  {
-
10564  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10565  while(nextAlloc2ndIndex < suballoc2ndCount &&
-
10566  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10567  {
-
10568  ++nextAlloc2ndIndex;
-
10569  }
+
10243 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
+
10244 {
+
10245  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10246  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10247  const VkDeviceSize size = GetSize();
+
10248  const size_t suballoc1stCount = suballocations1st.size();
+
10249  const size_t suballoc2ndCount = suballocations2nd.size();
+
10250 
+
10251  inoutStats.size += size;
+
10252 
+
10253  VkDeviceSize lastOffset = 0;
+
10254 
+
10255  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10256  {
+
10257  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+
10258  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
+
10259  while(lastOffset < freeSpace2ndTo1stEnd)
+
10260  {
+
10261  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10262  while(nextAlloc2ndIndex < suballoc2ndCount &&
+
10263  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10264  {
+
10265  ++nextAlloc2ndIndex;
+
10266  }
+
10267 
+
10268  // Found non-null allocation.
+
10269  if(nextAlloc2ndIndex < suballoc2ndCount)
+
10270  {
+
10271  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10272 
+
10273  // 1. Process free space before this allocation.
+
10274  if(lastOffset < suballoc.offset)
+
10275  {
+
10276  // There is free space from lastOffset to suballoc.offset.
+
10277  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10278  inoutStats.unusedSize += unusedRangeSize;
+
10279  ++inoutStats.unusedRangeCount;
+
10280  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10281  }
+
10282 
+
10283  // 2. Process this allocation.
+
10284  // There is allocation with suballoc.offset, suballoc.size.
+
10285  ++inoutStats.allocationCount;
+
10286 
+
10287  // 3. Prepare for next iteration.
+
10288  lastOffset = suballoc.offset + suballoc.size;
+
10289  ++nextAlloc2ndIndex;
+
10290  }
+
10291  // We are at the end.
+
10292  else
+
10293  {
+
10294  if(lastOffset < freeSpace2ndTo1stEnd)
+
10295  {
+
10296  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+
10297  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+
10298  inoutStats.unusedSize += unusedRangeSize;
+
10299  ++inoutStats.unusedRangeCount;
+
10300  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10301  }
+
10302 
+
10303  // End of loop.
+
10304  lastOffset = freeSpace2ndTo1stEnd;
+
10305  }
+
10306  }
+
10307  }
+
10308 
+
10309  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+
10310  const VkDeviceSize freeSpace1stTo2ndEnd =
+
10311  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+
10312  while(lastOffset < freeSpace1stTo2ndEnd)
+
10313  {
+
10314  // Find next non-null allocation or move nextAllocIndex to the end.
+
10315  while(nextAlloc1stIndex < suballoc1stCount &&
+
10316  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
10317  {
+
10318  ++nextAlloc1stIndex;
+
10319  }
+
10320 
+
10321  // Found non-null allocation.
+
10322  if(nextAlloc1stIndex < suballoc1stCount)
+
10323  {
+
10324  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
10325 
+
10326  // 1. Process free space before this allocation.
+
10327  if(lastOffset < suballoc.offset)
+
10328  {
+
10329  // There is free space from lastOffset to suballoc.offset.
+
10330  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10331  inoutStats.unusedSize += unusedRangeSize;
+
10332  ++inoutStats.unusedRangeCount;
+
10333  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10334  }
+
10335 
+
10336  // 2. Process this allocation.
+
10337  // There is allocation with suballoc.offset, suballoc.size.
+
10338  ++inoutStats.allocationCount;
+
10339 
+
10340  // 3. Prepare for next iteration.
+
10341  lastOffset = suballoc.offset + suballoc.size;
+
10342  ++nextAlloc1stIndex;
+
10343  }
+
10344  // We are at the end.
+
10345  else
+
10346  {
+
10347  if(lastOffset < freeSpace1stTo2ndEnd)
+
10348  {
+
10349  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+
10350  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+
10351  inoutStats.unusedSize += unusedRangeSize;
+
10352  ++inoutStats.unusedRangeCount;
+
10353  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10354  }
+
10355 
+
10356  // End of loop.
+
10357  lastOffset = freeSpace1stTo2ndEnd;
+
10358  }
+
10359  }
+
10360 
+
10361  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10362  {
+
10363  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+
10364  while(lastOffset < size)
+
10365  {
+
10366  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10367  while(nextAlloc2ndIndex != SIZE_MAX &&
+
10368  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10369  {
+
10370  --nextAlloc2ndIndex;
+
10371  }
+
10372 
+
10373  // Found non-null allocation.
+
10374  if(nextAlloc2ndIndex != SIZE_MAX)
+
10375  {
+
10376  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10377 
+
10378  // 1. Process free space before this allocation.
+
10379  if(lastOffset < suballoc.offset)
+
10380  {
+
10381  // There is free space from lastOffset to suballoc.offset.
+
10382  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10383  inoutStats.unusedSize += unusedRangeSize;
+
10384  ++inoutStats.unusedRangeCount;
+
10385  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10386  }
+
10387 
+
10388  // 2. Process this allocation.
+
10389  // There is allocation with suballoc.offset, suballoc.size.
+
10390  ++inoutStats.allocationCount;
+
10391 
+
10392  // 3. Prepare for next iteration.
+
10393  lastOffset = suballoc.offset + suballoc.size;
+
10394  --nextAlloc2ndIndex;
+
10395  }
+
10396  // We are at the end.
+
10397  else
+
10398  {
+
10399  if(lastOffset < size)
+
10400  {
+
10401  // There is free space from lastOffset to size.
+
10402  const VkDeviceSize unusedRangeSize = size - lastOffset;
+
10403  inoutStats.unusedSize += unusedRangeSize;
+
10404  ++inoutStats.unusedRangeCount;
+
10405  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+
10406  }
+
10407 
+
10408  // End of loop.
+
10409  lastOffset = size;
+
10410  }
+
10411  }
+
10412  }
+
10413 }
+
10414 
+
10415 #if VMA_STATS_STRING_ENABLED
+
10416 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
+
10417 {
+
10418  const VkDeviceSize size = GetSize();
+
10419  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10420  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10421  const size_t suballoc1stCount = suballocations1st.size();
+
10422  const size_t suballoc2ndCount = suballocations2nd.size();
+
10423 
+
10424  // FIRST PASS
+
10425 
+
10426  size_t unusedRangeCount = 0;
+
10427  VkDeviceSize usedBytes = 0;
+
10428 
+
10429  VkDeviceSize lastOffset = 0;
+
10430 
+
10431  size_t alloc2ndCount = 0;
+
10432  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10433  {
+
10434  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+
10435  size_t nextAlloc2ndIndex = 0;
+
10436  while(lastOffset < freeSpace2ndTo1stEnd)
+
10437  {
+
10438  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10439  while(nextAlloc2ndIndex < suballoc2ndCount &&
+
10440  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10441  {
+
10442  ++nextAlloc2ndIndex;
+
10443  }
+
10444 
+
10445  // Found non-null allocation.
+
10446  if(nextAlloc2ndIndex < suballoc2ndCount)
+
10447  {
+
10448  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10449 
+
10450  // 1. Process free space before this allocation.
+
10451  if(lastOffset < suballoc.offset)
+
10452  {
+
10453  // There is free space from lastOffset to suballoc.offset.
+
10454  ++unusedRangeCount;
+
10455  }
+
10456 
+
10457  // 2. Process this allocation.
+
10458  // There is allocation with suballoc.offset, suballoc.size.
+
10459  ++alloc2ndCount;
+
10460  usedBytes += suballoc.size;
+
10461 
+
10462  // 3. Prepare for next iteration.
+
10463  lastOffset = suballoc.offset + suballoc.size;
+
10464  ++nextAlloc2ndIndex;
+
10465  }
+
10466  // We are at the end.
+
10467  else
+
10468  {
+
10469  if(lastOffset < freeSpace2ndTo1stEnd)
+
10470  {
+
10471  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+
10472  ++unusedRangeCount;
+
10473  }
+
10474 
+
10475  // End of loop.
+
10476  lastOffset = freeSpace2ndTo1stEnd;
+
10477  }
+
10478  }
+
10479  }
+
10480 
+
10481  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+
10482  size_t alloc1stCount = 0;
+
10483  const VkDeviceSize freeSpace1stTo2ndEnd =
+
10484  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+
10485  while(lastOffset < freeSpace1stTo2ndEnd)
+
10486  {
+
10487  // Find next non-null allocation or move nextAllocIndex to the end.
+
10488  while(nextAlloc1stIndex < suballoc1stCount &&
+
10489  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
10490  {
+
10491  ++nextAlloc1stIndex;
+
10492  }
+
10493 
+
10494  // Found non-null allocation.
+
10495  if(nextAlloc1stIndex < suballoc1stCount)
+
10496  {
+
10497  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
10498 
+
10499  // 1. Process free space before this allocation.
+
10500  if(lastOffset < suballoc.offset)
+
10501  {
+
10502  // There is free space from lastOffset to suballoc.offset.
+
10503  ++unusedRangeCount;
+
10504  }
+
10505 
+
10506  // 2. Process this allocation.
+
10507  // There is allocation with suballoc.offset, suballoc.size.
+
10508  ++alloc1stCount;
+
10509  usedBytes += suballoc.size;
+
10510 
+
10511  // 3. Prepare for next iteration.
+
10512  lastOffset = suballoc.offset + suballoc.size;
+
10513  ++nextAlloc1stIndex;
+
10514  }
+
10515  // We are at the end.
+
10516  else
+
10517  {
+
10518  if(lastOffset < size)
+
10519  {
+
10520  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+
10521  ++unusedRangeCount;
+
10522  }
+
10523 
+
10524  // End of loop.
+
10525  lastOffset = freeSpace1stTo2ndEnd;
+
10526  }
+
10527  }
+
10528 
+
10529  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10530  {
+
10531  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+
10532  while(lastOffset < size)
+
10533  {
+
10534  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10535  while(nextAlloc2ndIndex != SIZE_MAX &&
+
10536  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10537  {
+
10538  --nextAlloc2ndIndex;
+
10539  }
+
10540 
+
10541  // Found non-null allocation.
+
10542  if(nextAlloc2ndIndex != SIZE_MAX)
+
10543  {
+
10544  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10545 
+
10546  // 1. Process free space before this allocation.
+
10547  if(lastOffset < suballoc.offset)
+
10548  {
+
10549  // There is free space from lastOffset to suballoc.offset.
+
10550  ++unusedRangeCount;
+
10551  }
+
10552 
+
10553  // 2. Process this allocation.
+
10554  // There is allocation with suballoc.offset, suballoc.size.
+
10555  ++alloc2ndCount;
+
10556  usedBytes += suballoc.size;
+
10557 
+
10558  // 3. Prepare for next iteration.
+
10559  lastOffset = suballoc.offset + suballoc.size;
+
10560  --nextAlloc2ndIndex;
+
10561  }
+
10562  // We are at the end.
+
10563  else
+
10564  {
+
10565  if(lastOffset < size)
+
10566  {
+
10567  // There is free space from lastOffset to size.
+
10568  ++unusedRangeCount;
+
10569  }
10570 
-
10571  // Found non-null allocation.
-
10572  if(nextAlloc2ndIndex < suballoc2ndCount)
-
10573  {
-
10574  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10575 
-
10576  // 1. Process free space before this allocation.
-
10577  if(lastOffset < suballoc.offset)
-
10578  {
-
10579  // There is free space from lastOffset to suballoc.offset.
-
10580  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10581  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
10582  }
-
10583 
-
10584  // 2. Process this allocation.
-
10585  // There is allocation with suballoc.offset, suballoc.size.
-
10586  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
10587 
-
10588  // 3. Prepare for next iteration.
-
10589  lastOffset = suballoc.offset + suballoc.size;
-
10590  ++nextAlloc2ndIndex;
-
10591  }
-
10592  // We are at the end.
-
10593  else
-
10594  {
-
10595  if(lastOffset < freeSpace2ndTo1stEnd)
-
10596  {
-
10597  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-
10598  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-
10599  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
10600  }
-
10601 
-
10602  // End of loop.
-
10603  lastOffset = freeSpace2ndTo1stEnd;
-
10604  }
-
10605  }
-
10606  }
-
10607 
-
10608  nextAlloc1stIndex = m_1stNullItemsBeginCount;
-
10609  while(lastOffset < freeSpace1stTo2ndEnd)
-
10610  {
-
10611  // Find next non-null allocation or move nextAllocIndex to the end.
-
10612  while(nextAlloc1stIndex < suballoc1stCount &&
-
10613  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
10614  {
-
10615  ++nextAlloc1stIndex;
-
10616  }
-
10617 
-
10618  // Found non-null allocation.
-
10619  if(nextAlloc1stIndex < suballoc1stCount)
-
10620  {
-
10621  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
10622 
-
10623  // 1. Process free space before this allocation.
-
10624  if(lastOffset < suballoc.offset)
-
10625  {
-
10626  // There is free space from lastOffset to suballoc.offset.
-
10627  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10628  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10571  // End of loop.
+
10572  lastOffset = size;
+
10573  }
+
10574  }
+
10575  }
+
10576 
+
10577  const VkDeviceSize unusedBytes = size - usedBytes;
+
10578  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
+
10579 
+
10580  // SECOND PASS
+
10581  lastOffset = 0;
+
10582 
+
10583  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10584  {
+
10585  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+
10586  size_t nextAlloc2ndIndex = 0;
+
10587  while(lastOffset < freeSpace2ndTo1stEnd)
+
10588  {
+
10589  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10590  while(nextAlloc2ndIndex < suballoc2ndCount &&
+
10591  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10592  {
+
10593  ++nextAlloc2ndIndex;
+
10594  }
+
10595 
+
10596  // Found non-null allocation.
+
10597  if(nextAlloc2ndIndex < suballoc2ndCount)
+
10598  {
+
10599  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10600 
+
10601  // 1. Process free space before this allocation.
+
10602  if(lastOffset < suballoc.offset)
+
10603  {
+
10604  // There is free space from lastOffset to suballoc.offset.
+
10605  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10606  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10607  }
+
10608 
+
10609  // 2. Process this allocation.
+
10610  // There is allocation with suballoc.offset, suballoc.size.
+
10611  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
10612 
+
10613  // 3. Prepare for next iteration.
+
10614  lastOffset = suballoc.offset + suballoc.size;
+
10615  ++nextAlloc2ndIndex;
+
10616  }
+
10617  // We are at the end.
+
10618  else
+
10619  {
+
10620  if(lastOffset < freeSpace2ndTo1stEnd)
+
10621  {
+
10622  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+
10623  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+
10624  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10625  }
+
10626 
+
10627  // End of loop.
+
10628  lastOffset = freeSpace2ndTo1stEnd;
10629  }
-
10630 
-
10631  // 2. Process this allocation.
-
10632  // There is allocation with suballoc.offset, suballoc.size.
-
10633  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
10634 
-
10635  // 3. Prepare for next iteration.
-
10636  lastOffset = suballoc.offset + suballoc.size;
-
10637  ++nextAlloc1stIndex;
-
10638  }
-
10639  // We are at the end.
-
10640  else
-
10641  {
-
10642  if(lastOffset < freeSpace1stTo2ndEnd)
-
10643  {
-
10644  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-
10645  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-
10646  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
10647  }
-
10648 
-
10649  // End of loop.
-
10650  lastOffset = freeSpace1stTo2ndEnd;
-
10651  }
-
10652  }
-
10653 
-
10654  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10655  {
-
10656  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-
10657  while(lastOffset < size)
-
10658  {
-
10659  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-
10660  while(nextAlloc2ndIndex != SIZE_MAX &&
-
10661  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
10662  {
-
10663  --nextAlloc2ndIndex;
-
10664  }
-
10665 
-
10666  // Found non-null allocation.
-
10667  if(nextAlloc2ndIndex != SIZE_MAX)
+
10630  }
+
10631  }
+
10632 
+
10633  nextAlloc1stIndex = m_1stNullItemsBeginCount;
+
10634  while(lastOffset < freeSpace1stTo2ndEnd)
+
10635  {
+
10636  // Find next non-null allocation or move nextAllocIndex to the end.
+
10637  while(nextAlloc1stIndex < suballoc1stCount &&
+
10638  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
10639  {
+
10640  ++nextAlloc1stIndex;
+
10641  }
+
10642 
+
10643  // Found non-null allocation.
+
10644  if(nextAlloc1stIndex < suballoc1stCount)
+
10645  {
+
10646  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
10647 
+
10648  // 1. Process free space before this allocation.
+
10649  if(lastOffset < suballoc.offset)
+
10650  {
+
10651  // There is free space from lastOffset to suballoc.offset.
+
10652  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10653  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10654  }
+
10655 
+
10656  // 2. Process this allocation.
+
10657  // There is allocation with suballoc.offset, suballoc.size.
+
10658  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
10659 
+
10660  // 3. Prepare for next iteration.
+
10661  lastOffset = suballoc.offset + suballoc.size;
+
10662  ++nextAlloc1stIndex;
+
10663  }
+
10664  // We are at the end.
+
10665  else
+
10666  {
+
10667  if(lastOffset < freeSpace1stTo2ndEnd)
10668  {
-
10669  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
10670 
-
10671  // 1. Process free space before this allocation.
-
10672  if(lastOffset < suballoc.offset)
-
10673  {
-
10674  // There is free space from lastOffset to suballoc.offset.
-
10675  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
10676  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
10677  }
-
10678 
-
10679  // 2. Process this allocation.
-
10680  // There is allocation with suballoc.offset, suballoc.size.
-
10681  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
10682 
-
10683  // 3. Prepare for next iteration.
-
10684  lastOffset = suballoc.offset + suballoc.size;
-
10685  --nextAlloc2ndIndex;
-
10686  }
-
10687  // We are at the end.
-
10688  else
-
10689  {
-
10690  if(lastOffset < size)
-
10691  {
-
10692  // There is free space from lastOffset to size.
-
10693  const VkDeviceSize unusedRangeSize = size - lastOffset;
-
10694  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
10695  }
-
10696 
-
10697  // End of loop.
-
10698  lastOffset = size;
-
10699  }
-
10700  }
-
10701  }
-
10702 
-
10703  PrintDetailedMap_End(json);
-
10704 }
-
10705 #endif // #if VMA_STATS_STRING_ENABLED
-
10706 
-
10707 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
-
10708  uint32_t currentFrameIndex,
-
10709  uint32_t frameInUseCount,
-
10710  VkDeviceSize bufferImageGranularity,
-
10711  VkDeviceSize allocSize,
-
10712  VkDeviceSize allocAlignment,
-
10713  bool upperAddress,
-
10714  VmaSuballocationType allocType,
-
10715  bool canMakeOtherLost,
-
10716  uint32_t strategy,
-
10717  VmaAllocationRequest* pAllocationRequest)
-
10718 {
-
10719  VMA_ASSERT(allocSize > 0);
-
10720  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-
10721  VMA_ASSERT(pAllocationRequest != VMA_NULL);
-
10722  VMA_HEAVY_ASSERT(Validate());
-
10723  return upperAddress ?
-
10724  CreateAllocationRequest_UpperAddress(
-
10725  currentFrameIndex, frameInUseCount, bufferImageGranularity,
-
10726  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
-
10727  CreateAllocationRequest_LowerAddress(
-
10728  currentFrameIndex, frameInUseCount, bufferImageGranularity,
-
10729  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
-
10730 }
+
10669  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+
10670  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+
10671  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10672  }
+
10673 
+
10674  // End of loop.
+
10675  lastOffset = freeSpace1stTo2ndEnd;
+
10676  }
+
10677  }
+
10678 
+
10679  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10680  {
+
10681  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+
10682  while(lastOffset < size)
+
10683  {
+
10684  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+
10685  while(nextAlloc2ndIndex != SIZE_MAX &&
+
10686  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
10687  {
+
10688  --nextAlloc2ndIndex;
+
10689  }
+
10690 
+
10691  // Found non-null allocation.
+
10692  if(nextAlloc2ndIndex != SIZE_MAX)
+
10693  {
+
10694  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
10695 
+
10696  // 1. Process free space before this allocation.
+
10697  if(lastOffset < suballoc.offset)
+
10698  {
+
10699  // There is free space from lastOffset to suballoc.offset.
+
10700  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
10701  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10702  }
+
10703 
+
10704  // 2. Process this allocation.
+
10705  // There is allocation with suballoc.offset, suballoc.size.
+
10706  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
10707 
+
10708  // 3. Prepare for next iteration.
+
10709  lastOffset = suballoc.offset + suballoc.size;
+
10710  --nextAlloc2ndIndex;
+
10711  }
+
10712  // We are at the end.
+
10713  else
+
10714  {
+
10715  if(lastOffset < size)
+
10716  {
+
10717  // There is free space from lastOffset to size.
+
10718  const VkDeviceSize unusedRangeSize = size - lastOffset;
+
10719  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
10720  }
+
10721 
+
10722  // End of loop.
+
10723  lastOffset = size;
+
10724  }
+
10725  }
+
10726  }
+
10727 
+
10728  PrintDetailedMap_End(json);
+
10729 }
+
10730 #endif // #if VMA_STATS_STRING_ENABLED
10731 
-
10732 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
+
10732 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10733  uint32_t currentFrameIndex,
10734  uint32_t frameInUseCount,
10735  VkDeviceSize bufferImageGranularity,
10736  VkDeviceSize allocSize,
10737  VkDeviceSize allocAlignment,
-
10738  VmaSuballocationType allocType,
-
10739  bool canMakeOtherLost,
-
10740  uint32_t strategy,
-
10741  VmaAllocationRequest* pAllocationRequest)
-
10742 {
-
10743  const VkDeviceSize size = GetSize();
-
10744  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
10745  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10746 
-
10747  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10748  {
-
10749  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
-
10750  return false;
-
10751  }
-
10752 
-
10753  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
-
10754  if(allocSize > size)
-
10755  {
-
10756  return false;
-
10757  }
-
10758  VkDeviceSize resultBaseOffset = size - allocSize;
-
10759  if(!suballocations2nd.empty())
-
10760  {
-
10761  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-
10762  resultBaseOffset = lastSuballoc.offset - allocSize;
-
10763  if(allocSize > lastSuballoc.offset)
-
10764  {
-
10765  return false;
-
10766  }
-
10767  }
-
10768 
-
10769  // Start from offset equal to end of free space.
-
10770  VkDeviceSize resultOffset = resultBaseOffset;
+
10738  bool upperAddress,
+
10739  VmaSuballocationType allocType,
+
10740  bool canMakeOtherLost,
+
10741  uint32_t strategy,
+
10742  VmaAllocationRequest* pAllocationRequest)
+
10743 {
+
10744  VMA_ASSERT(allocSize > 0);
+
10745  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+
10746  VMA_ASSERT(pAllocationRequest != VMA_NULL);
+
10747  VMA_HEAVY_ASSERT(Validate());
+
10748  return upperAddress ?
+
10749  CreateAllocationRequest_UpperAddress(
+
10750  currentFrameIndex, frameInUseCount, bufferImageGranularity,
+
10751  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
+
10752  CreateAllocationRequest_LowerAddress(
+
10753  currentFrameIndex, frameInUseCount, bufferImageGranularity,
+
10754  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
+
10755 }
+
10756 
+
10757 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
+
10758  uint32_t currentFrameIndex,
+
10759  uint32_t frameInUseCount,
+
10760  VkDeviceSize bufferImageGranularity,
+
10761  VkDeviceSize allocSize,
+
10762  VkDeviceSize allocAlignment,
+
10763  VmaSuballocationType allocType,
+
10764  bool canMakeOtherLost,
+
10765  uint32_t strategy,
+
10766  VmaAllocationRequest* pAllocationRequest)
+
10767 {
+
10768  const VkDeviceSize size = GetSize();
+
10769  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10770  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10771 
-
10772  // Apply VMA_DEBUG_MARGIN at the end.
-
10773  if(VMA_DEBUG_MARGIN > 0)
-
10774  {
-
10775  if(resultOffset < VMA_DEBUG_MARGIN)
-
10776  {
-
10777  return false;
-
10778  }
-
10779  resultOffset -= VMA_DEBUG_MARGIN;
-
10780  }
-
10781 
-
10782  // Apply alignment.
-
10783  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
-
10784 
-
10785  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
-
10786  // Make bigger alignment if necessary.
-
10787  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
-
10788  {
-
10789  bool bufferImageGranularityConflict = false;
-
10790  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-
10791  {
-
10792  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-
10793  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
10794  {
-
10795  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
-
10796  {
-
10797  bufferImageGranularityConflict = true;
-
10798  break;
-
10799  }
-
10800  }
-
10801  else
-
10802  // Already on previous page.
-
10803  break;
-
10804  }
-
10805  if(bufferImageGranularityConflict)
-
10806  {
-
10807  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
-
10808  }
-
10809  }
-
10810 
-
10811  // There is enough free space.
-
10812  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
-
10813  suballocations1st.back().offset + suballocations1st.back().size :
-
10814  0;
-
10815  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
-
10816  {
-
10817  // Check previous suballocations for BufferImageGranularity conflicts.
-
10818  // If conflict exists, allocation cannot be made here.
-
10819  if(bufferImageGranularity > 1)
-
10820  {
-
10821  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-
10822  {
-
10823  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-
10824  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
10825  {
-
10826  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
-
10827  {
-
10828  return false;
-
10829  }
-
10830  }
-
10831  else
-
10832  {
-
10833  // Already on next page.
-
10834  break;
-
10835  }
-
10836  }
-
10837  }
-
10838 
-
10839  // All tests passed: Success.
-
10840  pAllocationRequest->offset = resultOffset;
-
10841  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
-
10842  pAllocationRequest->sumItemSize = 0;
-
10843  // pAllocationRequest->item unused.
-
10844  pAllocationRequest->itemsToMakeLostCount = 0;
-
10845  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
-
10846  return true;
-
10847  }
-
10848 
-
10849  return false;
-
10850 }
-
10851 
-
10852 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
-
10853  uint32_t currentFrameIndex,
-
10854  uint32_t frameInUseCount,
-
10855  VkDeviceSize bufferImageGranularity,
-
10856  VkDeviceSize allocSize,
-
10857  VkDeviceSize allocAlignment,
-
10858  VmaSuballocationType allocType,
-
10859  bool canMakeOtherLost,
-
10860  uint32_t strategy,
-
10861  VmaAllocationRequest* pAllocationRequest)
-
10862 {
-
10863  const VkDeviceSize size = GetSize();
-
10864  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
10865  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
10866 
-
10867  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10868  {
-
10869  // Try to allocate at the end of 1st vector.
-
10870 
-
10871  VkDeviceSize resultBaseOffset = 0;
-
10872  if(!suballocations1st.empty())
-
10873  {
-
10874  const VmaSuballocation& lastSuballoc = suballocations1st.back();
-
10875  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
-
10876  }
-
10877 
-
10878  // Start from offset equal to beginning of free space.
-
10879  VkDeviceSize resultOffset = resultBaseOffset;
-
10880 
-
10881  // Apply VMA_DEBUG_MARGIN at the beginning.
-
10882  if(VMA_DEBUG_MARGIN > 0)
-
10883  {
-
10884  resultOffset += VMA_DEBUG_MARGIN;
-
10885  }
-
10886 
-
10887  // Apply alignment.
-
10888  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-
10889 
-
10890  // Check previous suballocations for BufferImageGranularity conflicts.
-
10891  // Make bigger alignment if necessary.
-
10892  if(bufferImageGranularity > 1 && !suballocations1st.empty())
-
10893  {
-
10894  bool bufferImageGranularityConflict = false;
-
10895  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-
10896  {
-
10897  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-
10898  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
10899  {
-
10900  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
10901  {
-
10902  bufferImageGranularityConflict = true;
-
10903  break;
-
10904  }
-
10905  }
-
10906  else
-
10907  // Already on previous page.
-
10908  break;
-
10909  }
-
10910  if(bufferImageGranularityConflict)
-
10911  {
-
10912  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-
10913  }
-
10914  }
-
10915 
-
10916  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
-
10917  suballocations2nd.back().offset : size;
-
10918 
-
10919  // There is enough free space at the end after alignment.
-
10920  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
-
10921  {
-
10922  // Check next suballocations for BufferImageGranularity conflicts.
-
10923  // If conflict exists, allocation cannot be made here.
-
10924  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
10925  {
-
10926  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-
10927  {
-
10928  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-
10929  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
10930  {
-
10931  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
10932  {
-
10933  return false;
-
10934  }
-
10935  }
-
10936  else
-
10937  {
-
10938  // Already on previous page.
-
10939  break;
-
10940  }
-
10941  }
-
10942  }
+
10772  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10773  {
+
10774  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
+
10775  return false;
+
10776  }
+
10777 
+
10778  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
+
10779  if(allocSize > size)
+
10780  {
+
10781  return false;
+
10782  }
+
10783  VkDeviceSize resultBaseOffset = size - allocSize;
+
10784  if(!suballocations2nd.empty())
+
10785  {
+
10786  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+
10787  resultBaseOffset = lastSuballoc.offset - allocSize;
+
10788  if(allocSize > lastSuballoc.offset)
+
10789  {
+
10790  return false;
+
10791  }
+
10792  }
+
10793 
+
10794  // Start from offset equal to end of free space.
+
10795  VkDeviceSize resultOffset = resultBaseOffset;
+
10796 
+
10797  // Apply VMA_DEBUG_MARGIN at the end.
+
10798  if(VMA_DEBUG_MARGIN > 0)
+
10799  {
+
10800  if(resultOffset < VMA_DEBUG_MARGIN)
+
10801  {
+
10802  return false;
+
10803  }
+
10804  resultOffset -= VMA_DEBUG_MARGIN;
+
10805  }
+
10806 
+
10807  // Apply alignment.
+
10808  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
+
10809 
+
10810  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
+
10811  // Make bigger alignment if necessary.
+
10812  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
+
10813  {
+
10814  bool bufferImageGranularityConflict = false;
+
10815  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+
10816  {
+
10817  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+
10818  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
10819  {
+
10820  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
+
10821  {
+
10822  bufferImageGranularityConflict = true;
+
10823  break;
+
10824  }
+
10825  }
+
10826  else
+
10827  // Already on previous page.
+
10828  break;
+
10829  }
+
10830  if(bufferImageGranularityConflict)
+
10831  {
+
10832  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
+
10833  }
+
10834  }
+
10835 
+
10836  // There is enough free space.
+
10837  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
+
10838  suballocations1st.back().offset + suballocations1st.back().size :
+
10839  0;
+
10840  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
+
10841  {
+
10842  // Check previous suballocations for BufferImageGranularity conflicts.
+
10843  // If conflict exists, allocation cannot be made here.
+
10844  if(bufferImageGranularity > 1)
+
10845  {
+
10846  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+
10847  {
+
10848  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+
10849  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
10850  {
+
10851  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
+
10852  {
+
10853  return false;
+
10854  }
+
10855  }
+
10856  else
+
10857  {
+
10858  // Already on next page.
+
10859  break;
+
10860  }
+
10861  }
+
10862  }
+
10863 
+
10864  // All tests passed: Success.
+
10865  pAllocationRequest->offset = resultOffset;
+
10866  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
+
10867  pAllocationRequest->sumItemSize = 0;
+
10868  // pAllocationRequest->item unused.
+
10869  pAllocationRequest->itemsToMakeLostCount = 0;
+
10870  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
+
10871  return true;
+
10872  }
+
10873 
+
10874  return false;
+
10875 }
+
10876 
+
10877 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
+
10878  uint32_t currentFrameIndex,
+
10879  uint32_t frameInUseCount,
+
10880  VkDeviceSize bufferImageGranularity,
+
10881  VkDeviceSize allocSize,
+
10882  VkDeviceSize allocAlignment,
+
10883  VmaSuballocationType allocType,
+
10884  bool canMakeOtherLost,
+
10885  uint32_t strategy,
+
10886  VmaAllocationRequest* pAllocationRequest)
+
10887 {
+
10888  const VkDeviceSize size = GetSize();
+
10889  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
10890  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
10891 
+
10892  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10893  {
+
10894  // Try to allocate at the end of 1st vector.
+
10895 
+
10896  VkDeviceSize resultBaseOffset = 0;
+
10897  if(!suballocations1st.empty())
+
10898  {
+
10899  const VmaSuballocation& lastSuballoc = suballocations1st.back();
+
10900  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+
10901  }
+
10902 
+
10903  // Start from offset equal to beginning of free space.
+
10904  VkDeviceSize resultOffset = resultBaseOffset;
+
10905 
+
10906  // Apply VMA_DEBUG_MARGIN at the beginning.
+
10907  if(VMA_DEBUG_MARGIN > 0)
+
10908  {
+
10909  resultOffset += VMA_DEBUG_MARGIN;
+
10910  }
+
10911 
+
10912  // Apply alignment.
+
10913  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+
10914 
+
10915  // Check previous suballocations for BufferImageGranularity conflicts.
+
10916  // Make bigger alignment if necessary.
+
10917  if(bufferImageGranularity > 1 && !suballocations1st.empty())
+
10918  {
+
10919  bool bufferImageGranularityConflict = false;
+
10920  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+
10921  {
+
10922  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+
10923  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
10924  {
+
10925  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
10926  {
+
10927  bufferImageGranularityConflict = true;
+
10928  break;
+
10929  }
+
10930  }
+
10931  else
+
10932  // Already on previous page.
+
10933  break;
+
10934  }
+
10935  if(bufferImageGranularityConflict)
+
10936  {
+
10937  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+
10938  }
+
10939  }
+
10940 
+
10941  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
+
10942  suballocations2nd.back().offset : size;
10943 
-
10944  // All tests passed: Success.
-
10945  pAllocationRequest->offset = resultOffset;
-
10946  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
-
10947  pAllocationRequest->sumItemSize = 0;
-
10948  // pAllocationRequest->item, customData unused.
-
10949  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
-
10950  pAllocationRequest->itemsToMakeLostCount = 0;
-
10951  return true;
-
10952  }
-
10953  }
-
10954 
-
10955  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
-
10956  // beginning of 1st vector as the end of free space.
-
10957  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
10958  {
-
10959  VMA_ASSERT(!suballocations1st.empty());
-
10960 
-
10961  VkDeviceSize resultBaseOffset = 0;
-
10962  if(!suballocations2nd.empty())
-
10963  {
-
10964  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-
10965  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
-
10966  }
-
10967 
-
10968  // Start from offset equal to beginning of free space.
-
10969  VkDeviceSize resultOffset = resultBaseOffset;
-
10970 
-
10971  // Apply VMA_DEBUG_MARGIN at the beginning.
-
10972  if(VMA_DEBUG_MARGIN > 0)
-
10973  {
-
10974  resultOffset += VMA_DEBUG_MARGIN;
-
10975  }
-
10976 
-
10977  // Apply alignment.
-
10978  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+
10944  // There is enough free space at the end after alignment.
+
10945  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
+
10946  {
+
10947  // Check next suballocations for BufferImageGranularity conflicts.
+
10948  // If conflict exists, allocation cannot be made here.
+
10949  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
10950  {
+
10951  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+
10952  {
+
10953  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+
10954  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
10955  {
+
10956  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
10957  {
+
10958  return false;
+
10959  }
+
10960  }
+
10961  else
+
10962  {
+
10963  // Already on previous page.
+
10964  break;
+
10965  }
+
10966  }
+
10967  }
+
10968 
+
10969  // All tests passed: Success.
+
10970  pAllocationRequest->offset = resultOffset;
+
10971  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
+
10972  pAllocationRequest->sumItemSize = 0;
+
10973  // pAllocationRequest->item, customData unused.
+
10974  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
+
10975  pAllocationRequest->itemsToMakeLostCount = 0;
+
10976  return true;
+
10977  }
+
10978  }
10979 
-
10980  // Check previous suballocations for BufferImageGranularity conflicts.
-
10981  // Make bigger alignment if necessary.
-
10982  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
-
10983  {
-
10984  bool bufferImageGranularityConflict = false;
-
10985  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
-
10986  {
-
10987  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
-
10988  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
10989  {
-
10990  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
10991  {
-
10992  bufferImageGranularityConflict = true;
-
10993  break;
-
10994  }
-
10995  }
-
10996  else
-
10997  // Already on previous page.
-
10998  break;
-
10999  }
-
11000  if(bufferImageGranularityConflict)
-
11001  {
-
11002  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-
11003  }
-
11004  }
-
11005 
-
11006  pAllocationRequest->itemsToMakeLostCount = 0;
-
11007  pAllocationRequest->sumItemSize = 0;
-
11008  size_t index1st = m_1stNullItemsBeginCount;
-
11009 
-
11010  if(canMakeOtherLost)
-
11011  {
-
11012  while(index1st < suballocations1st.size() &&
-
11013  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
-
11014  {
-
11015  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
-
11016  const VmaSuballocation& suballoc = suballocations1st[index1st];
-
11017  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
-
11018  {
-
11019  // No problem.
+
10980  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
+
10981  // beginning of 1st vector as the end of free space.
+
10982  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
10983  {
+
10984  VMA_ASSERT(!suballocations1st.empty());
+
10985 
+
10986  VkDeviceSize resultBaseOffset = 0;
+
10987  if(!suballocations2nd.empty())
+
10988  {
+
10989  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+
10990  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+
10991  }
+
10992 
+
10993  // Start from offset equal to beginning of free space.
+
10994  VkDeviceSize resultOffset = resultBaseOffset;
+
10995 
+
10996  // Apply VMA_DEBUG_MARGIN at the beginning.
+
10997  if(VMA_DEBUG_MARGIN > 0)
+
10998  {
+
10999  resultOffset += VMA_DEBUG_MARGIN;
+
11000  }
+
11001 
+
11002  // Apply alignment.
+
11003  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+
11004 
+
11005  // Check previous suballocations for BufferImageGranularity conflicts.
+
11006  // Make bigger alignment if necessary.
+
11007  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
+
11008  {
+
11009  bool bufferImageGranularityConflict = false;
+
11010  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
+
11011  {
+
11012  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
+
11013  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
11014  {
+
11015  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
11016  {
+
11017  bufferImageGranularityConflict = true;
+
11018  break;
+
11019  }
11020  }
11021  else
-
11022  {
-
11023  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
-
11024  if(suballoc.hAllocation->CanBecomeLost() &&
-
11025  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
11026  {
-
11027  ++pAllocationRequest->itemsToMakeLostCount;
-
11028  pAllocationRequest->sumItemSize += suballoc.size;
-
11029  }
-
11030  else
-
11031  {
-
11032  return false;
-
11033  }
-
11034  }
-
11035  ++index1st;
-
11036  }
-
11037 
-
11038  // Check next suballocations for BufferImageGranularity conflicts.
-
11039  // If conflict exists, we must mark more allocations lost or fail.
-
11040  if(bufferImageGranularity > 1)
-
11041  {
-
11042  while(index1st < suballocations1st.size())
+
11022  // Already on previous page.
+
11023  break;
+
11024  }
+
11025  if(bufferImageGranularityConflict)
+
11026  {
+
11027  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+
11028  }
+
11029  }
+
11030 
+
11031  pAllocationRequest->itemsToMakeLostCount = 0;
+
11032  pAllocationRequest->sumItemSize = 0;
+
11033  size_t index1st = m_1stNullItemsBeginCount;
+
11034 
+
11035  if(canMakeOtherLost)
+
11036  {
+
11037  while(index1st < suballocations1st.size() &&
+
11038  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
+
11039  {
+
11040  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
+
11041  const VmaSuballocation& suballoc = suballocations1st[index1st];
+
11042  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11043  {
-
11044  const VmaSuballocation& suballoc = suballocations1st[index1st];
-
11045  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
-
11046  {
-
11047  if(suballoc.hAllocation != VK_NULL_HANDLE)
-
11048  {
-
11049  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
-
11050  if(suballoc.hAllocation->CanBecomeLost() &&
-
11051  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
11052  {
-
11053  ++pAllocationRequest->itemsToMakeLostCount;
-
11054  pAllocationRequest->sumItemSize += suballoc.size;
-
11055  }
-
11056  else
-
11057  {
-
11058  return false;
-
11059  }
-
11060  }
-
11061  }
-
11062  else
-
11063  {
-
11064  // Already on next page.
-
11065  break;
-
11066  }
-
11067  ++index1st;
-
11068  }
-
11069  }
-
11070 
-
11071  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
-
11072  if(index1st == suballocations1st.size() &&
-
11073  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
-
11074  {
-
11075  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
-
11076  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
-
11077  }
-
11078  }
-
11079 
-
11080  // There is enough free space at the end after alignment.
-
11081  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
-
11082  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
-
11083  {
-
11084  // Check next suballocations for BufferImageGranularity conflicts.
-
11085  // If conflict exists, allocation cannot be made here.
-
11086  if(bufferImageGranularity > 1)
-
11087  {
-
11088  for(size_t nextSuballocIndex = index1st;
-
11089  nextSuballocIndex < suballocations1st.size();
-
11090  nextSuballocIndex++)
-
11091  {
-
11092  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
-
11093  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
11094  {
-
11095  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
11096  {
-
11097  return false;
-
11098  }
-
11099  }
-
11100  else
-
11101  {
-
11102  // Already on next page.
-
11103  break;
-
11104  }
-
11105  }
-
11106  }
-
11107 
-
11108  // All tests passed: Success.
-
11109  pAllocationRequest->offset = resultOffset;
-
11110  pAllocationRequest->sumFreeSize =
-
11111  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
-
11112  - resultBaseOffset
-
11113  - pAllocationRequest->sumItemSize;
-
11114  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
-
11115  // pAllocationRequest->item, customData unused.
-
11116  return true;
-
11117  }
-
11118  }
-
11119 
-
11120  return false;
-
11121 }
-
11122 
-
11123 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
-
11124  uint32_t currentFrameIndex,
-
11125  uint32_t frameInUseCount,
-
11126  VmaAllocationRequest* pAllocationRequest)
-
11127 {
-
11128  if(pAllocationRequest->itemsToMakeLostCount == 0)
-
11129  {
-
11130  return true;
-
11131  }
+
11044  // No problem.
+
11045  }
+
11046  else
+
11047  {
+
11048  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+
11049  if(suballoc.hAllocation->CanBecomeLost() &&
+
11050  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
11051  {
+
11052  ++pAllocationRequest->itemsToMakeLostCount;
+
11053  pAllocationRequest->sumItemSize += suballoc.size;
+
11054  }
+
11055  else
+
11056  {
+
11057  return false;
+
11058  }
+
11059  }
+
11060  ++index1st;
+
11061  }
+
11062 
+
11063  // Check next suballocations for BufferImageGranularity conflicts.
+
11064  // If conflict exists, we must mark more allocations lost or fail.
+
11065  if(bufferImageGranularity > 1)
+
11066  {
+
11067  while(index1st < suballocations1st.size())
+
11068  {
+
11069  const VmaSuballocation& suballoc = suballocations1st[index1st];
+
11070  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
+
11071  {
+
11072  if(suballoc.hAllocation != VK_NULL_HANDLE)
+
11073  {
+
11074  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
+
11075  if(suballoc.hAllocation->CanBecomeLost() &&
+
11076  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
11077  {
+
11078  ++pAllocationRequest->itemsToMakeLostCount;
+
11079  pAllocationRequest->sumItemSize += suballoc.size;
+
11080  }
+
11081  else
+
11082  {
+
11083  return false;
+
11084  }
+
11085  }
+
11086  }
+
11087  else
+
11088  {
+
11089  // Already on next page.
+
11090  break;
+
11091  }
+
11092  ++index1st;
+
11093  }
+
11094  }
+
11095 
+
11096  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
+
11097  if(index1st == suballocations1st.size() &&
+
11098  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
+
11099  {
+
11100  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
+
11101  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
+
11102  }
+
11103  }
+
11104 
+
11105  // There is enough free space at the end after alignment.
+
11106  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
+
11107  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
+
11108  {
+
11109  // Check next suballocations for BufferImageGranularity conflicts.
+
11110  // If conflict exists, allocation cannot be made here.
+
11111  if(bufferImageGranularity > 1)
+
11112  {
+
11113  for(size_t nextSuballocIndex = index1st;
+
11114  nextSuballocIndex < suballocations1st.size();
+
11115  nextSuballocIndex++)
+
11116  {
+
11117  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
+
11118  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
11119  {
+
11120  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
11121  {
+
11122  return false;
+
11123  }
+
11124  }
+
11125  else
+
11126  {
+
11127  // Already on next page.
+
11128  break;
+
11129  }
+
11130  }
+
11131  }
11132 
-
11133  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
-
11134 
-
11135  // We always start from 1st.
-
11136  SuballocationVectorType* suballocations = &AccessSuballocations1st();
-
11137  size_t index = m_1stNullItemsBeginCount;
-
11138  size_t madeLostCount = 0;
-
11139  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
-
11140  {
-
11141  if(index == suballocations->size())
-
11142  {
-
11143  index = 0;
-
11144  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
-
11145  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
11146  {
-
11147  suballocations = &AccessSuballocations2nd();
-
11148  }
-
11149  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
-
11150  // suballocations continues pointing at AccessSuballocations1st().
-
11151  VMA_ASSERT(!suballocations->empty());
-
11152  }
-
11153  VmaSuballocation& suballoc = (*suballocations)[index];
-
11154  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
11155  {
-
11156  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
-
11157  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
-
11158  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
11159  {
-
11160  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
11161  suballoc.hAllocation = VK_NULL_HANDLE;
-
11162  m_SumFreeSize += suballoc.size;
-
11163  if(suballocations == &AccessSuballocations1st())
-
11164  {
-
11165  ++m_1stNullItemsMiddleCount;
-
11166  }
-
11167  else
-
11168  {
-
11169  ++m_2ndNullItemsCount;
-
11170  }
-
11171  ++madeLostCount;
-
11172  }
-
11173  else
-
11174  {
-
11175  return false;
-
11176  }
+
11133  // All tests passed: Success.
+
11134  pAllocationRequest->offset = resultOffset;
+
11135  pAllocationRequest->sumFreeSize =
+
11136  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
+
11137  - resultBaseOffset
+
11138  - pAllocationRequest->sumItemSize;
+
11139  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
+
11140  // pAllocationRequest->item, customData unused.
+
11141  return true;
+
11142  }
+
11143  }
+
11144 
+
11145  return false;
+
11146 }
+
11147 
+
11148 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
+
11149  uint32_t currentFrameIndex,
+
11150  uint32_t frameInUseCount,
+
11151  VmaAllocationRequest* pAllocationRequest)
+
11152 {
+
11153  if(pAllocationRequest->itemsToMakeLostCount == 0)
+
11154  {
+
11155  return true;
+
11156  }
+
11157 
+
11158  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
+
11159 
+
11160  // We always start from 1st.
+
11161  SuballocationVectorType* suballocations = &AccessSuballocations1st();
+
11162  size_t index = m_1stNullItemsBeginCount;
+
11163  size_t madeLostCount = 0;
+
11164  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
+
11165  {
+
11166  if(index == suballocations->size())
+
11167  {
+
11168  index = 0;
+
11169  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
+
11170  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
11171  {
+
11172  suballocations = &AccessSuballocations2nd();
+
11173  }
+
11174  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
+
11175  // suballocations continues pointing at AccessSuballocations1st().
+
11176  VMA_ASSERT(!suballocations->empty());
11177  }
-
11178  ++index;
-
11179  }
-
11180 
-
11181  CleanupAfterFree();
-
11182  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
-
11183 
-
11184  return true;
-
11185 }
-
11186 
-
11187 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
11188 {
-
11189  uint32_t lostAllocationCount = 0;
-
11190 
-
11191  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11192  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-
11193  {
-
11194  VmaSuballocation& suballoc = suballocations1st[i];
-
11195  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
-
11196  suballoc.hAllocation->CanBecomeLost() &&
-
11197  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
11198  {
-
11199  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
11200  suballoc.hAllocation = VK_NULL_HANDLE;
-
11201  ++m_1stNullItemsMiddleCount;
-
11202  m_SumFreeSize += suballoc.size;
-
11203  ++lostAllocationCount;
-
11204  }
-
11205  }
-
11206 
-
11207  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11208  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-
11209  {
-
11210  VmaSuballocation& suballoc = suballocations2nd[i];
-
11211  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
-
11212  suballoc.hAllocation->CanBecomeLost() &&
-
11213  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
11214  {
-
11215  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
11216  suballoc.hAllocation = VK_NULL_HANDLE;
-
11217  ++m_2ndNullItemsCount;
-
11218  m_SumFreeSize += suballoc.size;
-
11219  ++lostAllocationCount;
-
11220  }
-
11221  }
-
11222 
-
11223  if(lostAllocationCount)
-
11224  {
-
11225  CleanupAfterFree();
-
11226  }
-
11227 
-
11228  return lostAllocationCount;
-
11229 }
-
11230 
-
11231 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
-
11232 {
-
11233  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11234  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-
11235  {
-
11236  const VmaSuballocation& suballoc = suballocations1st[i];
-
11237  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
11238  {
-
11239  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
-
11240  {
-
11241  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
-
11242  return VK_ERROR_VALIDATION_FAILED_EXT;
-
11243  }
-
11244  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-
11245  {
-
11246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-
11247  return VK_ERROR_VALIDATION_FAILED_EXT;
-
11248  }
-
11249  }
-
11250  }
-
11251 
-
11252  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11253  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-
11254  {
-
11255  const VmaSuballocation& suballoc = suballocations2nd[i];
-
11256  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
11257  {
-
11258  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
-
11259  {
-
11260  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
-
11261  return VK_ERROR_VALIDATION_FAILED_EXT;
-
11262  }
-
11263  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-
11264  {
-
11265  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-
11266  return VK_ERROR_VALIDATION_FAILED_EXT;
-
11267  }
-
11268  }
-
11269  }
-
11270 
-
11271  return VK_SUCCESS;
-
11272 }
-
11273 
-
11274 void VmaBlockMetadata_Linear::Alloc(
-
11275  const VmaAllocationRequest& request,
-
11276  VmaSuballocationType type,
-
11277  VkDeviceSize allocSize,
-
11278  VmaAllocation hAllocation)
-
11279 {
-
11280  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
-
11281 
-
11282  switch(request.type)
-
11283  {
-
11284  case VmaAllocationRequestType::UpperAddress:
-
11285  {
-
11286  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
-
11287  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
-
11288  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11289  suballocations2nd.push_back(newSuballoc);
-
11290  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
-
11291  }
-
11292  break;
-
11293  case VmaAllocationRequestType::EndOf1st:
-
11294  {
-
11295  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11296 
-
11297  VMA_ASSERT(suballocations1st.empty() ||
-
11298  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
-
11299  // Check if it fits before the end of the block.
-
11300  VMA_ASSERT(request.offset + allocSize <= GetSize());
-
11301 
-
11302  suballocations1st.push_back(newSuballoc);
-
11303  }
-
11304  break;
-
11305  case VmaAllocationRequestType::EndOf2nd:
-
11306  {
-
11307  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11308  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
-
11309  VMA_ASSERT(!suballocations1st.empty() &&
-
11310  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
-
11311  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11312 
-
11313  switch(m_2ndVectorMode)
-
11314  {
-
11315  case SECOND_VECTOR_EMPTY:
-
11316  // First allocation from second part ring buffer.
-
11317  VMA_ASSERT(suballocations2nd.empty());
-
11318  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
-
11319  break;
-
11320  case SECOND_VECTOR_RING_BUFFER:
-
11321  // 2-part ring buffer is already started.
-
11322  VMA_ASSERT(!suballocations2nd.empty());
-
11323  break;
-
11324  case SECOND_VECTOR_DOUBLE_STACK:
-
11325  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
-
11326  break;
-
11327  default:
-
11328  VMA_ASSERT(0);
-
11329  }
-
11330 
-
11331  suballocations2nd.push_back(newSuballoc);
-
11332  }
-
11333  break;
-
11334  default:
-
11335  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
-
11336  }
+
11178  VmaSuballocation& suballoc = (*suballocations)[index];
+
11179  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
11180  {
+
11181  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+
11182  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
+
11183  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
11184  {
+
11185  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
11186  suballoc.hAllocation = VK_NULL_HANDLE;
+
11187  m_SumFreeSize += suballoc.size;
+
11188  if(suballocations == &AccessSuballocations1st())
+
11189  {
+
11190  ++m_1stNullItemsMiddleCount;
+
11191  }
+
11192  else
+
11193  {
+
11194  ++m_2ndNullItemsCount;
+
11195  }
+
11196  ++madeLostCount;
+
11197  }
+
11198  else
+
11199  {
+
11200  return false;
+
11201  }
+
11202  }
+
11203  ++index;
+
11204  }
+
11205 
+
11206  CleanupAfterFree();
+
11207  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
+
11208 
+
11209  return true;
+
11210 }
+
11211 
+
11212 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
11213 {
+
11214  uint32_t lostAllocationCount = 0;
+
11215 
+
11216  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11217  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
+
11218  {
+
11219  VmaSuballocation& suballoc = suballocations1st[i];
+
11220  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+
11221  suballoc.hAllocation->CanBecomeLost() &&
+
11222  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
11223  {
+
11224  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
11225  suballoc.hAllocation = VK_NULL_HANDLE;
+
11226  ++m_1stNullItemsMiddleCount;
+
11227  m_SumFreeSize += suballoc.size;
+
11228  ++lostAllocationCount;
+
11229  }
+
11230  }
+
11231 
+
11232  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
11233  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
+
11234  {
+
11235  VmaSuballocation& suballoc = suballocations2nd[i];
+
11236  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+
11237  suballoc.hAllocation->CanBecomeLost() &&
+
11238  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
11239  {
+
11240  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
11241  suballoc.hAllocation = VK_NULL_HANDLE;
+
11242  ++m_2ndNullItemsCount;
+
11243  m_SumFreeSize += suballoc.size;
+
11244  ++lostAllocationCount;
+
11245  }
+
11246  }
+
11247 
+
11248  if(lostAllocationCount)
+
11249  {
+
11250  CleanupAfterFree();
+
11251  }
+
11252 
+
11253  return lostAllocationCount;
+
11254 }
+
11255 
+
11256 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
+
11257 {
+
11258  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11259  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
+
11260  {
+
11261  const VmaSuballocation& suballoc = suballocations1st[i];
+
11262  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
11263  {
+
11264  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
+
11265  {
+
11266  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+
11267  return VK_ERROR_VALIDATION_FAILED_EXT;
+
11268  }
+
11269  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+
11270  {
+
11271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+
11272  return VK_ERROR_VALIDATION_FAILED_EXT;
+
11273  }
+
11274  }
+
11275  }
+
11276 
+
11277  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
11278  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
+
11279  {
+
11280  const VmaSuballocation& suballoc = suballocations2nd[i];
+
11281  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
11282  {
+
11283  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
+
11284  {
+
11285  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+
11286  return VK_ERROR_VALIDATION_FAILED_EXT;
+
11287  }
+
11288  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+
11289  {
+
11290  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+
11291  return VK_ERROR_VALIDATION_FAILED_EXT;
+
11292  }
+
11293  }
+
11294  }
+
11295 
+
11296  return VK_SUCCESS;
+
11297 }
+
11298 
+
11299 void VmaBlockMetadata_Linear::Alloc(
+
11300  const VmaAllocationRequest& request,
+
11301  VmaSuballocationType type,
+
11302  VkDeviceSize allocSize,
+
11303  VmaAllocation hAllocation)
+
11304 {
+
11305  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
+
11306 
+
11307  switch(request.type)
+
11308  {
+
11309  case VmaAllocationRequestType::UpperAddress:
+
11310  {
+
11311  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
+
11312  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
+
11313  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
11314  suballocations2nd.push_back(newSuballoc);
+
11315  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
+
11316  }
+
11317  break;
+
11318  case VmaAllocationRequestType::EndOf1st:
+
11319  {
+
11320  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11321 
+
11322  VMA_ASSERT(suballocations1st.empty() ||
+
11323  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
+
11324  // Check if it fits before the end of the block.
+
11325  VMA_ASSERT(request.offset + allocSize <= GetSize());
+
11326 
+
11327  suballocations1st.push_back(newSuballoc);
+
11328  }
+
11329  break;
+
11330  case VmaAllocationRequestType::EndOf2nd:
+
11331  {
+
11332  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11333  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
+
11334  VMA_ASSERT(!suballocations1st.empty() &&
+
11335  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
+
11336  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11337 
-
11338  m_SumFreeSize -= newSuballoc.size;
-
11339 }
-
11340 
-
11341 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
-
11342 {
-
11343  FreeAtOffset(allocation->GetOffset());
-
11344 }
-
11345 
-
11346 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
-
11347 {
-
11348  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11349  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11350 
-
11351  if(!suballocations1st.empty())
-
11352  {
-
11353  // First allocation: Mark it as next empty at the beginning.
-
11354  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-
11355  if(firstSuballoc.offset == offset)
-
11356  {
-
11357  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-
11358  firstSuballoc.hAllocation = VK_NULL_HANDLE;
-
11359  m_SumFreeSize += firstSuballoc.size;
-
11360  ++m_1stNullItemsBeginCount;
-
11361  CleanupAfterFree();
-
11362  return;
-
11363  }
-
11364  }
+
11338  switch(m_2ndVectorMode)
+
11339  {
+
11340  case SECOND_VECTOR_EMPTY:
+
11341  // First allocation from second part ring buffer.
+
11342  VMA_ASSERT(suballocations2nd.empty());
+
11343  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
+
11344  break;
+
11345  case SECOND_VECTOR_RING_BUFFER:
+
11346  // 2-part ring buffer is already started.
+
11347  VMA_ASSERT(!suballocations2nd.empty());
+
11348  break;
+
11349  case SECOND_VECTOR_DOUBLE_STACK:
+
11350  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
+
11351  break;
+
11352  default:
+
11353  VMA_ASSERT(0);
+
11354  }
+
11355 
+
11356  suballocations2nd.push_back(newSuballoc);
+
11357  }
+
11358  break;
+
11359  default:
+
11360  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
+
11361  }
+
11362 
+
11363  m_SumFreeSize -= newSuballoc.size;
+
11364 }
11365 
-
11366  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
-
11367  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
-
11368  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
11369  {
-
11370  VmaSuballocation& lastSuballoc = suballocations2nd.back();
-
11371  if(lastSuballoc.offset == offset)
-
11372  {
-
11373  m_SumFreeSize += lastSuballoc.size;
-
11374  suballocations2nd.pop_back();
-
11375  CleanupAfterFree();
-
11376  return;
-
11377  }
-
11378  }
-
11379  // Last allocation in 1st vector.
-
11380  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
-
11381  {
-
11382  VmaSuballocation& lastSuballoc = suballocations1st.back();
-
11383  if(lastSuballoc.offset == offset)
-
11384  {
-
11385  m_SumFreeSize += lastSuballoc.size;
-
11386  suballocations1st.pop_back();
-
11387  CleanupAfterFree();
-
11388  return;
-
11389  }
-
11390  }
-
11391 
-
11392  // Item from the middle of 1st vector.
-
11393  {
-
11394  VmaSuballocation refSuballoc;
-
11395  refSuballoc.offset = offset;
-
11396  // Rest of members stays uninitialized intentionally for better performance.
-
11397  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
-
11398  suballocations1st.begin() + m_1stNullItemsBeginCount,
-
11399  suballocations1st.end(),
-
11400  refSuballoc,
-
11401  VmaSuballocationOffsetLess());
-
11402  if(it != suballocations1st.end())
-
11403  {
-
11404  it->type = VMA_SUBALLOCATION_TYPE_FREE;
-
11405  it->hAllocation = VK_NULL_HANDLE;
-
11406  ++m_1stNullItemsMiddleCount;
-
11407  m_SumFreeSize += it->size;
-
11408  CleanupAfterFree();
-
11409  return;
-
11410  }
-
11411  }
-
11412 
-
11413  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-
11414  {
-
11415  // Item from the middle of 2nd vector.
-
11416  VmaSuballocation refSuballoc;
-
11417  refSuballoc.offset = offset;
-
11418  // Rest of members stays uninitialized intentionally for better performance.
-
11419  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-
11420  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-
11421  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-
11422  if(it != suballocations2nd.end())
-
11423  {
-
11424  it->type = VMA_SUBALLOCATION_TYPE_FREE;
-
11425  it->hAllocation = VK_NULL_HANDLE;
-
11426  ++m_2ndNullItemsCount;
-
11427  m_SumFreeSize += it->size;
-
11428  CleanupAfterFree();
-
11429  return;
-
11430  }
-
11431  }
-
11432 
-
11433  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
-
11434 }
-
11435 
-
11436 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
-
11437 {
-
11438  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-
11439  const size_t suballocCount = AccessSuballocations1st().size();
-
11440  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
-
11441 }
-
11442 
-
11443 void VmaBlockMetadata_Linear::CleanupAfterFree()
-
11444 {
-
11445  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
11446  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
11447 
-
11448  if(IsEmpty())
-
11449  {
-
11450  suballocations1st.clear();
-
11451  suballocations2nd.clear();
-
11452  m_1stNullItemsBeginCount = 0;
-
11453  m_1stNullItemsMiddleCount = 0;
-
11454  m_2ndNullItemsCount = 0;
-
11455  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+
11366 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
+
11367 {
+
11368  FreeAtOffset(allocation->GetOffset());
+
11369 }
+
11370 
+
11371 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
+
11372 {
+
11373  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11374  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
11375 
+
11376  if(!suballocations1st.empty())
+
11377  {
+
11378  // First allocation: Mark it as next empty at the beginning.
+
11379  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+
11380  if(firstSuballoc.offset == offset)
+
11381  {
+
11382  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+
11383  firstSuballoc.hAllocation = VK_NULL_HANDLE;
+
11384  m_SumFreeSize += firstSuballoc.size;
+
11385  ++m_1stNullItemsBeginCount;
+
11386  CleanupAfterFree();
+
11387  return;
+
11388  }
+
11389  }
+
11390 
+
11391  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
+
11392  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
+
11393  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
11394  {
+
11395  VmaSuballocation& lastSuballoc = suballocations2nd.back();
+
11396  if(lastSuballoc.offset == offset)
+
11397  {
+
11398  m_SumFreeSize += lastSuballoc.size;
+
11399  suballocations2nd.pop_back();
+
11400  CleanupAfterFree();
+
11401  return;
+
11402  }
+
11403  }
+
11404  // Last allocation in 1st vector.
+
11405  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
+
11406  {
+
11407  VmaSuballocation& lastSuballoc = suballocations1st.back();
+
11408  if(lastSuballoc.offset == offset)
+
11409  {
+
11410  m_SumFreeSize += lastSuballoc.size;
+
11411  suballocations1st.pop_back();
+
11412  CleanupAfterFree();
+
11413  return;
+
11414  }
+
11415  }
+
11416 
+
11417  // Item from the middle of 1st vector.
+
11418  {
+
11419  VmaSuballocation refSuballoc;
+
11420  refSuballoc.offset = offset;
+
11421  // Rest of members stays uninitialized intentionally for better performance.
+
11422  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
+
11423  suballocations1st.begin() + m_1stNullItemsBeginCount,
+
11424  suballocations1st.end(),
+
11425  refSuballoc,
+
11426  VmaSuballocationOffsetLess());
+
11427  if(it != suballocations1st.end())
+
11428  {
+
11429  it->type = VMA_SUBALLOCATION_TYPE_FREE;
+
11430  it->hAllocation = VK_NULL_HANDLE;
+
11431  ++m_1stNullItemsMiddleCount;
+
11432  m_SumFreeSize += it->size;
+
11433  CleanupAfterFree();
+
11434  return;
+
11435  }
+
11436  }
+
11437 
+
11438  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
+
11439  {
+
11440  // Item from the middle of 2nd vector.
+
11441  VmaSuballocation refSuballoc;
+
11442  refSuballoc.offset = offset;
+
11443  // Rest of members stays uninitialized intentionally for better performance.
+
11444  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
+
11445  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
+
11446  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
+
11447  if(it != suballocations2nd.end())
+
11448  {
+
11449  it->type = VMA_SUBALLOCATION_TYPE_FREE;
+
11450  it->hAllocation = VK_NULL_HANDLE;
+
11451  ++m_2ndNullItemsCount;
+
11452  m_SumFreeSize += it->size;
+
11453  CleanupAfterFree();
+
11454  return;
+
11455  }
11456  }
-
11457  else
-
11458  {
-
11459  const size_t suballoc1stCount = suballocations1st.size();
-
11460  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-
11461  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
-
11462 
-
11463  // Find more null items at the beginning of 1st vector.
-
11464  while(m_1stNullItemsBeginCount < suballoc1stCount &&
-
11465  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
-
11466  {
-
11467  ++m_1stNullItemsBeginCount;
-
11468  --m_1stNullItemsMiddleCount;
-
11469  }
-
11470 
-
11471  // Find more null items at the end of 1st vector.
-
11472  while(m_1stNullItemsMiddleCount > 0 &&
-
11473  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
-
11474  {
-
11475  --m_1stNullItemsMiddleCount;
-
11476  suballocations1st.pop_back();
-
11477  }
-
11478 
-
11479  // Find more null items at the end of 2nd vector.
-
11480  while(m_2ndNullItemsCount > 0 &&
-
11481  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
-
11482  {
-
11483  --m_2ndNullItemsCount;
-
11484  suballocations2nd.pop_back();
-
11485  }
-
11486 
-
11487  // Find more null items at the beginning of 2nd vector.
-
11488  while(m_2ndNullItemsCount > 0 &&
-
11489  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
-
11490  {
-
11491  --m_2ndNullItemsCount;
-
11492  VmaVectorRemove(suballocations2nd, 0);
-
11493  }
-
11494 
-
11495  if(ShouldCompact1st())
-
11496  {
-
11497  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
-
11498  size_t srcIndex = m_1stNullItemsBeginCount;
-
11499  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
-
11500  {
-
11501  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
-
11502  {
-
11503  ++srcIndex;
-
11504  }
-
11505  if(dstIndex != srcIndex)
-
11506  {
-
11507  suballocations1st[dstIndex] = suballocations1st[srcIndex];
-
11508  }
-
11509  ++srcIndex;
-
11510  }
-
11511  suballocations1st.resize(nonNullItemCount);
-
11512  m_1stNullItemsBeginCount = 0;
-
11513  m_1stNullItemsMiddleCount = 0;
-
11514  }
-
11515 
-
11516  // 2nd vector became empty.
-
11517  if(suballocations2nd.empty())
-
11518  {
-
11519  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-
11520  }
-
11521 
-
11522  // 1st vector became empty.
-
11523  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
-
11524  {
-
11525  suballocations1st.clear();
-
11526  m_1stNullItemsBeginCount = 0;
-
11527 
-
11528  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
11529  {
-
11530  // Swap 1st with 2nd. Now 2nd is empty.
-
11531  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-
11532  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
-
11533  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
-
11534  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
-
11535  {
-
11536  ++m_1stNullItemsBeginCount;
-
11537  --m_1stNullItemsMiddleCount;
-
11538  }
-
11539  m_2ndNullItemsCount = 0;
-
11540  m_1stVectorIndex ^= 1;
-
11541  }
-
11542  }
-
11543  }
-
11544 
-
11545  VMA_HEAVY_ASSERT(Validate());
-
11546 }
-
11547 
-
11548 
-
11550 // class VmaBlockMetadata_Buddy
-
11551 
-
11552 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
-
11553  VmaBlockMetadata(hAllocator),
-
11554  m_Root(VMA_NULL),
-
11555  m_AllocationCount(0),
-
11556  m_FreeCount(1),
-
11557  m_SumFreeSize(0)
-
11558 {
-
11559  memset(m_FreeList, 0, sizeof(m_FreeList));
-
11560 }
-
11561 
-
11562 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
-
11563 {
-
11564  DeleteNode(m_Root);
-
11565 }
-
11566 
-
11567 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
-
11568 {
-
11569  VmaBlockMetadata::Init(size);
-
11570 
-
11571  m_UsableSize = VmaPrevPow2(size);
-
11572  m_SumFreeSize = m_UsableSize;
+
11457 
+
11458  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
+
11459 }
+
11460 
+
11461 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
+
11462 {
+
11463  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+
11464  const size_t suballocCount = AccessSuballocations1st().size();
+
11465  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
+
11466 }
+
11467 
+
11468 void VmaBlockMetadata_Linear::CleanupAfterFree()
+
11469 {
+
11470  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
11471  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
11472 
+
11473  if(IsEmpty())
+
11474  {
+
11475  suballocations1st.clear();
+
11476  suballocations2nd.clear();
+
11477  m_1stNullItemsBeginCount = 0;
+
11478  m_1stNullItemsMiddleCount = 0;
+
11479  m_2ndNullItemsCount = 0;
+
11480  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+
11481  }
+
11482  else
+
11483  {
+
11484  const size_t suballoc1stCount = suballocations1st.size();
+
11485  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+
11486  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
+
11487 
+
11488  // Find more null items at the beginning of 1st vector.
+
11489  while(m_1stNullItemsBeginCount < suballoc1stCount &&
+
11490  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
+
11491  {
+
11492  ++m_1stNullItemsBeginCount;
+
11493  --m_1stNullItemsMiddleCount;
+
11494  }
+
11495 
+
11496  // Find more null items at the end of 1st vector.
+
11497  while(m_1stNullItemsMiddleCount > 0 &&
+
11498  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
+
11499  {
+
11500  --m_1stNullItemsMiddleCount;
+
11501  suballocations1st.pop_back();
+
11502  }
+
11503 
+
11504  // Find more null items at the end of 2nd vector.
+
11505  while(m_2ndNullItemsCount > 0 &&
+
11506  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
+
11507  {
+
11508  --m_2ndNullItemsCount;
+
11509  suballocations2nd.pop_back();
+
11510  }
+
11511 
+
11512  // Find more null items at the beginning of 2nd vector.
+
11513  while(m_2ndNullItemsCount > 0 &&
+
11514  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
+
11515  {
+
11516  --m_2ndNullItemsCount;
+
11517  VmaVectorRemove(suballocations2nd, 0);
+
11518  }
+
11519 
+
11520  if(ShouldCompact1st())
+
11521  {
+
11522  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
+
11523  size_t srcIndex = m_1stNullItemsBeginCount;
+
11524  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
+
11525  {
+
11526  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
+
11527  {
+
11528  ++srcIndex;
+
11529  }
+
11530  if(dstIndex != srcIndex)
+
11531  {
+
11532  suballocations1st[dstIndex] = suballocations1st[srcIndex];
+
11533  }
+
11534  ++srcIndex;
+
11535  }
+
11536  suballocations1st.resize(nonNullItemCount);
+
11537  m_1stNullItemsBeginCount = 0;
+
11538  m_1stNullItemsMiddleCount = 0;
+
11539  }
+
11540 
+
11541  // 2nd vector became empty.
+
11542  if(suballocations2nd.empty())
+
11543  {
+
11544  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+
11545  }
+
11546 
+
11547  // 1st vector became empty.
+
11548  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
+
11549  {
+
11550  suballocations1st.clear();
+
11551  m_1stNullItemsBeginCount = 0;
+
11552 
+
11553  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
11554  {
+
11555  // Swap 1st with 2nd. Now 2nd is empty.
+
11556  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+
11557  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
+
11558  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
+
11559  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
+
11560  {
+
11561  ++m_1stNullItemsBeginCount;
+
11562  --m_1stNullItemsMiddleCount;
+
11563  }
+
11564  m_2ndNullItemsCount = 0;
+
11565  m_1stVectorIndex ^= 1;
+
11566  }
+
11567  }
+
11568  }
+
11569 
+
11570  VMA_HEAVY_ASSERT(Validate());
+
11571 }
+
11572 
11573 
-
11574  // Calculate m_LevelCount.
-
11575  m_LevelCount = 1;
-
11576  while(m_LevelCount < MAX_LEVELS &&
-
11577  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
-
11578  {
-
11579  ++m_LevelCount;
-
11580  }
-
11581 
-
11582  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
-
11583  rootNode->offset = 0;
-
11584  rootNode->type = Node::TYPE_FREE;
-
11585  rootNode->parent = VMA_NULL;
-
11586  rootNode->buddy = VMA_NULL;
-
11587 
-
11588  m_Root = rootNode;
-
11589  AddToFreeListFront(0, rootNode);
+
11575 // class VmaBlockMetadata_Buddy
+
11576 
+
11577 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
+
11578  VmaBlockMetadata(hAllocator),
+
11579  m_Root(VMA_NULL),
+
11580  m_AllocationCount(0),
+
11581  m_FreeCount(1),
+
11582  m_SumFreeSize(0)
+
11583 {
+
11584  memset(m_FreeList, 0, sizeof(m_FreeList));
+
11585 }
+
11586 
+
11587 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
+
11588 {
+
11589  DeleteNode(m_Root);
11590 }
11591 
-
11592 bool VmaBlockMetadata_Buddy::Validate() const
-
11593 {
-
11594  // Validate tree.
-
11595  ValidationContext ctx;
-
11596  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
-
11597  {
-
11598  VMA_VALIDATE(false && "ValidateNode failed.");
-
11599  }
-
11600  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
-
11601  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
-
11602 
-
11603  // Validate free node lists.
-
11604  for(uint32_t level = 0; level < m_LevelCount; ++level)
-
11605  {
-
11606  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
-
11607  m_FreeList[level].front->free.prev == VMA_NULL);
-
11608 
-
11609  for(Node* node = m_FreeList[level].front;
-
11610  node != VMA_NULL;
-
11611  node = node->free.next)
-
11612  {
-
11613  VMA_VALIDATE(node->type == Node::TYPE_FREE);
-
11614 
-
11615  if(node->free.next == VMA_NULL)
-
11616  {
-
11617  VMA_VALIDATE(m_FreeList[level].back == node);
-
11618  }
-
11619  else
-
11620  {
-
11621  VMA_VALIDATE(node->free.next->free.prev == node);
-
11622  }
-
11623  }
+
11592 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
+
11593 {
+
11594  VmaBlockMetadata::Init(size);
+
11595 
+
11596  m_UsableSize = VmaPrevPow2(size);
+
11597  m_SumFreeSize = m_UsableSize;
+
11598 
+
11599  // Calculate m_LevelCount.
+
11600  m_LevelCount = 1;
+
11601  while(m_LevelCount < MAX_LEVELS &&
+
11602  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
+
11603  {
+
11604  ++m_LevelCount;
+
11605  }
+
11606 
+
11607  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
+
11608  rootNode->offset = 0;
+
11609  rootNode->type = Node::TYPE_FREE;
+
11610  rootNode->parent = VMA_NULL;
+
11611  rootNode->buddy = VMA_NULL;
+
11612 
+
11613  m_Root = rootNode;
+
11614  AddToFreeListFront(0, rootNode);
+
11615 }
+
11616 
+
11617 bool VmaBlockMetadata_Buddy::Validate() const
+
11618 {
+
11619  // Validate tree.
+
11620  ValidationContext ctx;
+
11621  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
+
11622  {
+
11623  VMA_VALIDATE(false && "ValidateNode failed.");
11624  }
-
11625 
-
11626  // Validate that free lists ar higher levels are empty.
-
11627  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
-
11628  {
-
11629  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
-
11630  }
-
11631 
-
11632  return true;
-
11633 }
-
11634 
-
11635 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
-
11636 {
-
11637  for(uint32_t level = 0; level < m_LevelCount; ++level)
-
11638  {
-
11639  if(m_FreeList[level].front != VMA_NULL)
-
11640  {
-
11641  return LevelToNodeSize(level);
-
11642  }
-
11643  }
-
11644  return 0;
-
11645 }
-
11646 
-
11647 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
-
11648 {
-
11649  const VkDeviceSize unusableSize = GetUnusableSize();
+
11625  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
+
11626  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
+
11627 
+
11628  // Validate free node lists.
+
11629  for(uint32_t level = 0; level < m_LevelCount; ++level)
+
11630  {
+
11631  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
+
11632  m_FreeList[level].front->free.prev == VMA_NULL);
+
11633 
+
11634  for(Node* node = m_FreeList[level].front;
+
11635  node != VMA_NULL;
+
11636  node = node->free.next)
+
11637  {
+
11638  VMA_VALIDATE(node->type == Node::TYPE_FREE);
+
11639 
+
11640  if(node->free.next == VMA_NULL)
+
11641  {
+
11642  VMA_VALIDATE(m_FreeList[level].back == node);
+
11643  }
+
11644  else
+
11645  {
+
11646  VMA_VALIDATE(node->free.next->free.prev == node);
+
11647  }
+
11648  }
+
11649  }
11650 
-
11651  outInfo.blockCount = 1;
-
11652 
-
11653  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
-
11654  outInfo.usedBytes = outInfo.unusedBytes = 0;
-
11655 
-
11656  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
-
11657  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
-
11658  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
+
11651  // Validate that free lists ar higher levels are empty.
+
11652  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
+
11653  {
+
11654  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
+
11655  }
+
11656 
+
11657  return true;
+
11658 }
11659 
-
11660  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
-
11661 
-
11662  if(unusableSize > 0)
+
11660 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
+
11661 {
+
11662  for(uint32_t level = 0; level < m_LevelCount; ++level)
11663  {
-
11664  ++outInfo.unusedRangeCount;
-
11665  outInfo.unusedBytes += unusableSize;
-
11666  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
-
11667  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
+
11664  if(m_FreeList[level].front != VMA_NULL)
+
11665  {
+
11666  return LevelToNodeSize(level);
+
11667  }
11668  }
-
11669 }
-
11670 
-
11671 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
-
11672 {
-
11673  const VkDeviceSize unusableSize = GetUnusableSize();
-
11674 
-
11675  inoutStats.size += GetSize();
-
11676  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
-
11677  inoutStats.allocationCount += m_AllocationCount;
-
11678  inoutStats.unusedRangeCount += m_FreeCount;
-
11679  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
+
11669  return 0;
+
11670 }
+
11671 
+
11672 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
+
11673 {
+
11674  const VkDeviceSize unusableSize = GetUnusableSize();
+
11675 
+
11676  outInfo.blockCount = 1;
+
11677 
+
11678  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
+
11679  outInfo.usedBytes = outInfo.unusedBytes = 0;
11680 
-
11681  if(unusableSize > 0)
-
11682  {
-
11683  ++inoutStats.unusedRangeCount;
-
11684  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
-
11685  }
-
11686 }
-
11687 
-
11688 #if VMA_STATS_STRING_ENABLED
-
11689 
-
11690 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
-
11691 {
-
11692  // TODO optimize
-
11693  VmaStatInfo stat;
-
11694  CalcAllocationStatInfo(stat);
+
11681  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
+
11682  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
+
11683  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
+
11684 
+
11685  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
+
11686 
+
11687  if(unusableSize > 0)
+
11688  {
+
11689  ++outInfo.unusedRangeCount;
+
11690  outInfo.unusedBytes += unusableSize;
+
11691  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
+
11692  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
+
11693  }
+
11694 }
11695 
-
11696  PrintDetailedMap_Begin(
-
11697  json,
-
11698  stat.unusedBytes,
-
11699  stat.allocationCount,
-
11700  stat.unusedRangeCount);
-
11701 
-
11702  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
-
11703 
-
11704  const VkDeviceSize unusableSize = GetUnusableSize();
-
11705  if(unusableSize > 0)
-
11706  {
-
11707  PrintDetailedMap_UnusedRange(json,
-
11708  m_UsableSize, // offset
-
11709  unusableSize); // size
+
11696 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
+
11697 {
+
11698  const VkDeviceSize unusableSize = GetUnusableSize();
+
11699 
+
11700  inoutStats.size += GetSize();
+
11701  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
+
11702  inoutStats.allocationCount += m_AllocationCount;
+
11703  inoutStats.unusedRangeCount += m_FreeCount;
+
11704  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
+
11705 
+
11706  if(unusableSize > 0)
+
11707  {
+
11708  ++inoutStats.unusedRangeCount;
+
11709  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11710  }
-
11711 
-
11712  PrintDetailedMap_End(json);
-
11713 }
+
11711 }
+
11712 
+
11713 #if VMA_STATS_STRING_ENABLED
11714 
-
11715 #endif // #if VMA_STATS_STRING_ENABLED
-
11716 
-
11717 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
-
11718  uint32_t currentFrameIndex,
-
11719  uint32_t frameInUseCount,
-
11720  VkDeviceSize bufferImageGranularity,
-
11721  VkDeviceSize allocSize,
-
11722  VkDeviceSize allocAlignment,
-
11723  bool upperAddress,
-
11724  VmaSuballocationType allocType,
-
11725  bool canMakeOtherLost,
-
11726  uint32_t strategy,
-
11727  VmaAllocationRequest* pAllocationRequest)
-
11728 {
-
11729  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
-
11730 
-
11731  // Simple way to respect bufferImageGranularity. May be optimized some day.
-
11732  // Whenever it might be an OPTIMAL image...
-
11733  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-
11734  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-
11735  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-
11736  {
-
11737  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
-
11738  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
-
11739  }
-
11740 
-
11741  if(allocSize > m_UsableSize)
-
11742  {
-
11743  return false;
-
11744  }
-
11745 
-
11746  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-
11747  for(uint32_t level = targetLevel + 1; level--; )
-
11748  {
-
11749  for(Node* freeNode = m_FreeList[level].front;
-
11750  freeNode != VMA_NULL;
-
11751  freeNode = freeNode->free.next)
-
11752  {
-
11753  if(freeNode->offset % allocAlignment == 0)
-
11754  {
-
11755  pAllocationRequest->type = VmaAllocationRequestType::Normal;
-
11756  pAllocationRequest->offset = freeNode->offset;
-
11757  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
-
11758  pAllocationRequest->sumItemSize = 0;
-
11759  pAllocationRequest->itemsToMakeLostCount = 0;
-
11760  pAllocationRequest->customData = (void*)(uintptr_t)level;
-
11761  return true;
-
11762  }
-
11763  }
+
11715 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
+
11716 {
+
11717  // TODO optimize
+
11718  VmaStatInfo stat;
+
11719  CalcAllocationStatInfo(stat);
+
11720 
+
11721  PrintDetailedMap_Begin(
+
11722  json,
+
11723  stat.unusedBytes,
+
11724  stat.allocationCount,
+
11725  stat.unusedRangeCount);
+
11726 
+
11727  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
+
11728 
+
11729  const VkDeviceSize unusableSize = GetUnusableSize();
+
11730  if(unusableSize > 0)
+
11731  {
+
11732  PrintDetailedMap_UnusedRange(json,
+
11733  m_UsableSize, // offset
+
11734  unusableSize); // size
+
11735  }
+
11736 
+
11737  PrintDetailedMap_End(json);
+
11738 }
+
11739 
+
11740 #endif // #if VMA_STATS_STRING_ENABLED
+
11741 
+
11742 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
+
11743  uint32_t currentFrameIndex,
+
11744  uint32_t frameInUseCount,
+
11745  VkDeviceSize bufferImageGranularity,
+
11746  VkDeviceSize allocSize,
+
11747  VkDeviceSize allocAlignment,
+
11748  bool upperAddress,
+
11749  VmaSuballocationType allocType,
+
11750  bool canMakeOtherLost,
+
11751  uint32_t strategy,
+
11752  VmaAllocationRequest* pAllocationRequest)
+
11753 {
+
11754  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
+
11755 
+
11756  // Simple way to respect bufferImageGranularity. May be optimized some day.
+
11757  // Whenever it might be an OPTIMAL image...
+
11758  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
+
11759  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+
11760  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
+
11761  {
+
11762  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
+
11763  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11764  }
11765 
-
11766  return false;
-
11767 }
-
11768 
-
11769 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
-
11770  uint32_t currentFrameIndex,
-
11771  uint32_t frameInUseCount,
-
11772  VmaAllocationRequest* pAllocationRequest)
-
11773 {
-
11774  /*
-
11775  Lost allocations are not supported in buddy allocator at the moment.
-
11776  Support might be added in the future.
-
11777  */
-
11778  return pAllocationRequest->itemsToMakeLostCount == 0;
-
11779 }
-
11780 
-
11781 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
11782 {
-
11783  /*
-
11784  Lost allocations are not supported in buddy allocator at the moment.
-
11785  Support might be added in the future.
-
11786  */
-
11787  return 0;
-
11788 }
-
11789 
-
11790 void VmaBlockMetadata_Buddy::Alloc(
-
11791  const VmaAllocationRequest& request,
-
11792  VmaSuballocationType type,
-
11793  VkDeviceSize allocSize,
-
11794  VmaAllocation hAllocation)
-
11795 {
-
11796  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-
11797 
-
11798  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-
11799  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
-
11800 
-
11801  Node* currNode = m_FreeList[currLevel].front;
-
11802  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-
11803  while(currNode->offset != request.offset)
-
11804  {
-
11805  currNode = currNode->free.next;
-
11806  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-
11807  }
-
11808 
-
11809  // Go down, splitting free nodes.
-
11810  while(currLevel < targetLevel)
-
11811  {
-
11812  // currNode is already first free node at currLevel.
-
11813  // Remove it from list of free nodes at this currLevel.
-
11814  RemoveFromFreeList(currLevel, currNode);
-
11815 
-
11816  const uint32_t childrenLevel = currLevel + 1;
-
11817 
-
11818  // Create two free sub-nodes.
-
11819  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
-
11820  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
-
11821 
-
11822  leftChild->offset = currNode->offset;
-
11823  leftChild->type = Node::TYPE_FREE;
-
11824  leftChild->parent = currNode;
-
11825  leftChild->buddy = rightChild;
-
11826 
-
11827  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
-
11828  rightChild->type = Node::TYPE_FREE;
-
11829  rightChild->parent = currNode;
-
11830  rightChild->buddy = leftChild;
-
11831 
-
11832  // Convert current currNode to split type.
-
11833  currNode->type = Node::TYPE_SPLIT;
-
11834  currNode->split.leftChild = leftChild;
-
11835 
-
11836  // Add child nodes to free list. Order is important!
-
11837  AddToFreeListFront(childrenLevel, rightChild);
-
11838  AddToFreeListFront(childrenLevel, leftChild);
-
11839 
-
11840  ++m_FreeCount;
-
11841  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
-
11842  ++currLevel;
-
11843  currNode = m_FreeList[currLevel].front;
-
11844 
-
11845  /*
-
11846  We can be sure that currNode, as left child of node previously split,
-
11847  also fullfills the alignment requirement.
-
11848  */
-
11849  }
-
11850 
-
11851  // Remove from free list.
-
11852  VMA_ASSERT(currLevel == targetLevel &&
-
11853  currNode != VMA_NULL &&
-
11854  currNode->type == Node::TYPE_FREE);
-
11855  RemoveFromFreeList(currLevel, currNode);
+
11766  if(allocSize > m_UsableSize)
+
11767  {
+
11768  return false;
+
11769  }
+
11770 
+
11771  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+
11772  for(uint32_t level = targetLevel + 1; level--; )
+
11773  {
+
11774  for(Node* freeNode = m_FreeList[level].front;
+
11775  freeNode != VMA_NULL;
+
11776  freeNode = freeNode->free.next)
+
11777  {
+
11778  if(freeNode->offset % allocAlignment == 0)
+
11779  {
+
11780  pAllocationRequest->type = VmaAllocationRequestType::Normal;
+
11781  pAllocationRequest->offset = freeNode->offset;
+
11782  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
+
11783  pAllocationRequest->sumItemSize = 0;
+
11784  pAllocationRequest->itemsToMakeLostCount = 0;
+
11785  pAllocationRequest->customData = (void*)(uintptr_t)level;
+
11786  return true;
+
11787  }
+
11788  }
+
11789  }
+
11790 
+
11791  return false;
+
11792 }
+
11793 
+
11794 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
+
11795  uint32_t currentFrameIndex,
+
11796  uint32_t frameInUseCount,
+
11797  VmaAllocationRequest* pAllocationRequest)
+
11798 {
+
11799  /*
+
11800  Lost allocations are not supported in buddy allocator at the moment.
+
11801  Support might be added in the future.
+
11802  */
+
11803  return pAllocationRequest->itemsToMakeLostCount == 0;
+
11804 }
+
11805 
+
11806 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
11807 {
+
11808  /*
+
11809  Lost allocations are not supported in buddy allocator at the moment.
+
11810  Support might be added in the future.
+
11811  */
+
11812  return 0;
+
11813 }
+
11814 
+
11815 void VmaBlockMetadata_Buddy::Alloc(
+
11816  const VmaAllocationRequest& request,
+
11817  VmaSuballocationType type,
+
11818  VkDeviceSize allocSize,
+
11819  VmaAllocation hAllocation)
+
11820 {
+
11821  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+
11822 
+
11823  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+
11824  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
+
11825 
+
11826  Node* currNode = m_FreeList[currLevel].front;
+
11827  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+
11828  while(currNode->offset != request.offset)
+
11829  {
+
11830  currNode = currNode->free.next;
+
11831  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+
11832  }
+
11833 
+
11834  // Go down, splitting free nodes.
+
11835  while(currLevel < targetLevel)
+
11836  {
+
11837  // currNode is already first free node at currLevel.
+
11838  // Remove it from list of free nodes at this currLevel.
+
11839  RemoveFromFreeList(currLevel, currNode);
+
11840 
+
11841  const uint32_t childrenLevel = currLevel + 1;
+
11842 
+
11843  // Create two free sub-nodes.
+
11844  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
+
11845  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
+
11846 
+
11847  leftChild->offset = currNode->offset;
+
11848  leftChild->type = Node::TYPE_FREE;
+
11849  leftChild->parent = currNode;
+
11850  leftChild->buddy = rightChild;
+
11851 
+
11852  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
+
11853  rightChild->type = Node::TYPE_FREE;
+
11854  rightChild->parent = currNode;
+
11855  rightChild->buddy = leftChild;
11856 
-
11857  // Convert to allocation node.
-
11858  currNode->type = Node::TYPE_ALLOCATION;
-
11859  currNode->allocation.alloc = hAllocation;
+
11857  // Convert current currNode to split type.
+
11858  currNode->type = Node::TYPE_SPLIT;
+
11859  currNode->split.leftChild = leftChild;
11860 
-
11861  ++m_AllocationCount;
-
11862  --m_FreeCount;
-
11863  m_SumFreeSize -= allocSize;
-
11864 }
-
11865 
-
11866 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
-
11867 {
-
11868  if(node->type == Node::TYPE_SPLIT)
-
11869  {
-
11870  DeleteNode(node->split.leftChild->buddy);
-
11871  DeleteNode(node->split.leftChild);
-
11872  }
-
11873 
-
11874  vma_delete(GetAllocationCallbacks(), node);
-
11875 }
-
11876 
-
11877 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
-
11878 {
-
11879  VMA_VALIDATE(level < m_LevelCount);
-
11880  VMA_VALIDATE(curr->parent == parent);
-
11881  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
-
11882  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
-
11883  switch(curr->type)
-
11884  {
-
11885  case Node::TYPE_FREE:
-
11886  // curr->free.prev, next are validated separately.
-
11887  ctx.calculatedSumFreeSize += levelNodeSize;
-
11888  ++ctx.calculatedFreeCount;
-
11889  break;
-
11890  case Node::TYPE_ALLOCATION:
-
11891  ++ctx.calculatedAllocationCount;
-
11892  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
-
11893  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
-
11894  break;
-
11895  case Node::TYPE_SPLIT:
-
11896  {
-
11897  const uint32_t childrenLevel = level + 1;
-
11898  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
-
11899  const Node* const leftChild = curr->split.leftChild;
-
11900  VMA_VALIDATE(leftChild != VMA_NULL);
-
11901  VMA_VALIDATE(leftChild->offset == curr->offset);
-
11902  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
-
11903  {
-
11904  VMA_VALIDATE(false && "ValidateNode for left child failed.");
-
11905  }
-
11906  const Node* const rightChild = leftChild->buddy;
-
11907  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
-
11908  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
-
11909  {
-
11910  VMA_VALIDATE(false && "ValidateNode for right child failed.");
-
11911  }
-
11912  }
-
11913  break;
-
11914  default:
-
11915  return false;
-
11916  }
-
11917 
-
11918  return true;
-
11919 }
-
11920 
-
11921 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
-
11922 {
-
11923  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
-
11924  uint32_t level = 0;
-
11925  VkDeviceSize currLevelNodeSize = m_UsableSize;
-
11926  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
-
11927  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
-
11928  {
-
11929  ++level;
-
11930  currLevelNodeSize = nextLevelNodeSize;
-
11931  nextLevelNodeSize = currLevelNodeSize >> 1;
-
11932  }
-
11933  return level;
-
11934 }
-
11935 
-
11936 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
-
11937 {
-
11938  // Find node and level.
-
11939  Node* node = m_Root;
-
11940  VkDeviceSize nodeOffset = 0;
-
11941  uint32_t level = 0;
-
11942  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
-
11943  while(node->type == Node::TYPE_SPLIT)
-
11944  {
-
11945  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
-
11946  if(offset < nodeOffset + nextLevelSize)
-
11947  {
-
11948  node = node->split.leftChild;
-
11949  }
-
11950  else
-
11951  {
-
11952  node = node->split.leftChild->buddy;
-
11953  nodeOffset += nextLevelSize;
-
11954  }
-
11955  ++level;
-
11956  levelNodeSize = nextLevelSize;
+
11861  // Add child nodes to free list. Order is important!
+
11862  AddToFreeListFront(childrenLevel, rightChild);
+
11863  AddToFreeListFront(childrenLevel, leftChild);
+
11864 
+
11865  ++m_FreeCount;
+
11866  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
+
11867  ++currLevel;
+
11868  currNode = m_FreeList[currLevel].front;
+
11869 
+
11870  /*
+
11871  We can be sure that currNode, as left child of node previously split,
+
11872  also fullfills the alignment requirement.
+
11873  */
+
11874  }
+
11875 
+
11876  // Remove from free list.
+
11877  VMA_ASSERT(currLevel == targetLevel &&
+
11878  currNode != VMA_NULL &&
+
11879  currNode->type == Node::TYPE_FREE);
+
11880  RemoveFromFreeList(currLevel, currNode);
+
11881 
+
11882  // Convert to allocation node.
+
11883  currNode->type = Node::TYPE_ALLOCATION;
+
11884  currNode->allocation.alloc = hAllocation;
+
11885 
+
11886  ++m_AllocationCount;
+
11887  --m_FreeCount;
+
11888  m_SumFreeSize -= allocSize;
+
11889 }
+
11890 
+
11891 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
+
11892 {
+
11893  if(node->type == Node::TYPE_SPLIT)
+
11894  {
+
11895  DeleteNode(node->split.leftChild->buddy);
+
11896  DeleteNode(node->split.leftChild);
+
11897  }
+
11898 
+
11899  vma_delete(GetAllocationCallbacks(), node);
+
11900 }
+
11901 
+
11902 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
+
11903 {
+
11904  VMA_VALIDATE(level < m_LevelCount);
+
11905  VMA_VALIDATE(curr->parent == parent);
+
11906  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
+
11907  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
+
11908  switch(curr->type)
+
11909  {
+
11910  case Node::TYPE_FREE:
+
11911  // curr->free.prev, next are validated separately.
+
11912  ctx.calculatedSumFreeSize += levelNodeSize;
+
11913  ++ctx.calculatedFreeCount;
+
11914  break;
+
11915  case Node::TYPE_ALLOCATION:
+
11916  ++ctx.calculatedAllocationCount;
+
11917  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
+
11918  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
+
11919  break;
+
11920  case Node::TYPE_SPLIT:
+
11921  {
+
11922  const uint32_t childrenLevel = level + 1;
+
11923  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
+
11924  const Node* const leftChild = curr->split.leftChild;
+
11925  VMA_VALIDATE(leftChild != VMA_NULL);
+
11926  VMA_VALIDATE(leftChild->offset == curr->offset);
+
11927  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
+
11928  {
+
11929  VMA_VALIDATE(false && "ValidateNode for left child failed.");
+
11930  }
+
11931  const Node* const rightChild = leftChild->buddy;
+
11932  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
+
11933  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
+
11934  {
+
11935  VMA_VALIDATE(false && "ValidateNode for right child failed.");
+
11936  }
+
11937  }
+
11938  break;
+
11939  default:
+
11940  return false;
+
11941  }
+
11942 
+
11943  return true;
+
11944 }
+
11945 
+
11946 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
+
11947 {
+
11948  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
+
11949  uint32_t level = 0;
+
11950  VkDeviceSize currLevelNodeSize = m_UsableSize;
+
11951  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
+
11952  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
+
11953  {
+
11954  ++level;
+
11955  currLevelNodeSize = nextLevelNodeSize;
+
11956  nextLevelNodeSize = currLevelNodeSize >> 1;
11957  }
-
11958 
-
11959  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
-
11960  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
-
11961 
-
11962  ++m_FreeCount;
-
11963  --m_AllocationCount;
-
11964  m_SumFreeSize += alloc->GetSize();
-
11965 
-
11966  node->type = Node::TYPE_FREE;
-
11967 
-
11968  // Join free nodes if possible.
-
11969  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
-
11970  {
-
11971  RemoveFromFreeList(level, node->buddy);
-
11972  Node* const parent = node->parent;
-
11973 
-
11974  vma_delete(GetAllocationCallbacks(), node->buddy);
-
11975  vma_delete(GetAllocationCallbacks(), node);
-
11976  parent->type = Node::TYPE_FREE;
-
11977 
-
11978  node = parent;
-
11979  --level;
-
11980  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
-
11981  --m_FreeCount;
+
11958  return level;
+
11959 }
+
11960 
+
11961 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
+
11962 {
+
11963  // Find node and level.
+
11964  Node* node = m_Root;
+
11965  VkDeviceSize nodeOffset = 0;
+
11966  uint32_t level = 0;
+
11967  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
+
11968  while(node->type == Node::TYPE_SPLIT)
+
11969  {
+
11970  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
+
11971  if(offset < nodeOffset + nextLevelSize)
+
11972  {
+
11973  node = node->split.leftChild;
+
11974  }
+
11975  else
+
11976  {
+
11977  node = node->split.leftChild->buddy;
+
11978  nodeOffset += nextLevelSize;
+
11979  }
+
11980  ++level;
+
11981  levelNodeSize = nextLevelSize;
11982  }
11983 
-
11984  AddToFreeListFront(level, node);
-
11985 }
+
11984  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
+
11985  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11986 
-
11987 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
-
11988 {
-
11989  switch(node->type)
-
11990  {
-
11991  case Node::TYPE_FREE:
-
11992  ++outInfo.unusedRangeCount;
-
11993  outInfo.unusedBytes += levelNodeSize;
-
11994  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
-
11995  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
-
11996  break;
-
11997  case Node::TYPE_ALLOCATION:
-
11998  {
-
11999  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
-
12000  ++outInfo.allocationCount;
-
12001  outInfo.usedBytes += allocSize;
-
12002  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
-
12003  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
-
12004 
-
12005  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
-
12006  if(unusedRangeSize > 0)
-
12007  {
-
12008  ++outInfo.unusedRangeCount;
-
12009  outInfo.unusedBytes += unusedRangeSize;
-
12010  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
-
12011  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
-
12012  }
-
12013  }
-
12014  break;
-
12015  case Node::TYPE_SPLIT:
-
12016  {
-
12017  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-
12018  const Node* const leftChild = node->split.leftChild;
-
12019  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
-
12020  const Node* const rightChild = leftChild->buddy;
-
12021  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
-
12022  }
-
12023  break;
-
12024  default:
-
12025  VMA_ASSERT(0);
-
12026  }
-
12027 }
-
12028 
-
12029 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
-
12030 {
-
12031  VMA_ASSERT(node->type == Node::TYPE_FREE);
-
12032 
-
12033  // List is empty.
-
12034  Node* const frontNode = m_FreeList[level].front;
-
12035  if(frontNode == VMA_NULL)
-
12036  {
-
12037  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
-
12038  node->free.prev = node->free.next = VMA_NULL;
-
12039  m_FreeList[level].front = m_FreeList[level].back = node;
-
12040  }
-
12041  else
-
12042  {
-
12043  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
-
12044  node->free.prev = VMA_NULL;
-
12045  node->free.next = frontNode;
-
12046  frontNode->free.prev = node;
-
12047  m_FreeList[level].front = node;
-
12048  }
-
12049 }
-
12050 
-
12051 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
-
12052 {
-
12053  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
-
12054 
-
12055  // It is at the front.
-
12056  if(node->free.prev == VMA_NULL)
-
12057  {
-
12058  VMA_ASSERT(m_FreeList[level].front == node);
-
12059  m_FreeList[level].front = node->free.next;
-
12060  }
-
12061  else
-
12062  {
-
12063  Node* const prevFreeNode = node->free.prev;
-
12064  VMA_ASSERT(prevFreeNode->free.next == node);
-
12065  prevFreeNode->free.next = node->free.next;
-
12066  }
-
12067 
-
12068  // It is at the back.
-
12069  if(node->free.next == VMA_NULL)
-
12070  {
-
12071  VMA_ASSERT(m_FreeList[level].back == node);
-
12072  m_FreeList[level].back = node->free.prev;
+
11987  ++m_FreeCount;
+
11988  --m_AllocationCount;
+
11989  m_SumFreeSize += alloc->GetSize();
+
11990 
+
11991  node->type = Node::TYPE_FREE;
+
11992 
+
11993  // Join free nodes if possible.
+
11994  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
+
11995  {
+
11996  RemoveFromFreeList(level, node->buddy);
+
11997  Node* const parent = node->parent;
+
11998 
+
11999  vma_delete(GetAllocationCallbacks(), node->buddy);
+
12000  vma_delete(GetAllocationCallbacks(), node);
+
12001  parent->type = Node::TYPE_FREE;
+
12002 
+
12003  node = parent;
+
12004  --level;
+
12005  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
+
12006  --m_FreeCount;
+
12007  }
+
12008 
+
12009  AddToFreeListFront(level, node);
+
12010 }
+
12011 
+
12012 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
+
12013 {
+
12014  switch(node->type)
+
12015  {
+
12016  case Node::TYPE_FREE:
+
12017  ++outInfo.unusedRangeCount;
+
12018  outInfo.unusedBytes += levelNodeSize;
+
12019  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
+
12020  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
+
12021  break;
+
12022  case Node::TYPE_ALLOCATION:
+
12023  {
+
12024  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+
12025  ++outInfo.allocationCount;
+
12026  outInfo.usedBytes += allocSize;
+
12027  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
+
12028  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
+
12029 
+
12030  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
+
12031  if(unusedRangeSize > 0)
+
12032  {
+
12033  ++outInfo.unusedRangeCount;
+
12034  outInfo.unusedBytes += unusedRangeSize;
+
12035  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
+
12036  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
+
12037  }
+
12038  }
+
12039  break;
+
12040  case Node::TYPE_SPLIT:
+
12041  {
+
12042  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+
12043  const Node* const leftChild = node->split.leftChild;
+
12044  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
+
12045  const Node* const rightChild = leftChild->buddy;
+
12046  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
+
12047  }
+
12048  break;
+
12049  default:
+
12050  VMA_ASSERT(0);
+
12051  }
+
12052 }
+
12053 
+
12054 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
+
12055 {
+
12056  VMA_ASSERT(node->type == Node::TYPE_FREE);
+
12057 
+
12058  // List is empty.
+
12059  Node* const frontNode = m_FreeList[level].front;
+
12060  if(frontNode == VMA_NULL)
+
12061  {
+
12062  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
+
12063  node->free.prev = node->free.next = VMA_NULL;
+
12064  m_FreeList[level].front = m_FreeList[level].back = node;
+
12065  }
+
12066  else
+
12067  {
+
12068  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
+
12069  node->free.prev = VMA_NULL;
+
12070  node->free.next = frontNode;
+
12071  frontNode->free.prev = node;
+
12072  m_FreeList[level].front = node;
12073  }
-
12074  else
-
12075  {
-
12076  Node* const nextFreeNode = node->free.next;
-
12077  VMA_ASSERT(nextFreeNode->free.prev == node);
-
12078  nextFreeNode->free.prev = node->free.prev;
-
12079  }
-
12080 }
-
12081 
-
12082 #if VMA_STATS_STRING_ENABLED
-
12083 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
-
12084 {
-
12085  switch(node->type)
-
12086  {
-
12087  case Node::TYPE_FREE:
-
12088  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
-
12089  break;
-
12090  case Node::TYPE_ALLOCATION:
-
12091  {
-
12092  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
-
12093  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
-
12094  if(allocSize < levelNodeSize)
-
12095  {
-
12096  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
-
12097  }
-
12098  }
-
12099  break;
-
12100  case Node::TYPE_SPLIT:
-
12101  {
-
12102  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-
12103  const Node* const leftChild = node->split.leftChild;
-
12104  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
-
12105  const Node* const rightChild = leftChild->buddy;
-
12106  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
-
12107  }
-
12108  break;
-
12109  default:
-
12110  VMA_ASSERT(0);
-
12111  }
-
12112 }
-
12113 #endif // #if VMA_STATS_STRING_ENABLED
-
12114 
-
12115 
-
12117 // class VmaDeviceMemoryBlock
-
12118 
-
12119 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
-
12120  m_pMetadata(VMA_NULL),
-
12121  m_MemoryTypeIndex(UINT32_MAX),
-
12122  m_Id(0),
-
12123  m_hMemory(VK_NULL_HANDLE),
-
12124  m_MapCount(0),
-
12125  m_pMappedData(VMA_NULL)
-
12126 {
-
12127 }
-
12128 
-
12129 void VmaDeviceMemoryBlock::Init(
-
12130  VmaAllocator hAllocator,
-
12131  VmaPool hParentPool,
-
12132  uint32_t newMemoryTypeIndex,
-
12133  VkDeviceMemory newMemory,
-
12134  VkDeviceSize newSize,
-
12135  uint32_t id,
-
12136  uint32_t algorithm)
-
12137 {
-
12138  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
12074 }
+
12075 
+
12076 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
+
12077 {
+
12078  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
+
12079 
+
12080  // It is at the front.
+
12081  if(node->free.prev == VMA_NULL)
+
12082  {
+
12083  VMA_ASSERT(m_FreeList[level].front == node);
+
12084  m_FreeList[level].front = node->free.next;
+
12085  }
+
12086  else
+
12087  {
+
12088  Node* const prevFreeNode = node->free.prev;
+
12089  VMA_ASSERT(prevFreeNode->free.next == node);
+
12090  prevFreeNode->free.next = node->free.next;
+
12091  }
+
12092 
+
12093  // It is at the back.
+
12094  if(node->free.next == VMA_NULL)
+
12095  {
+
12096  VMA_ASSERT(m_FreeList[level].back == node);
+
12097  m_FreeList[level].back = node->free.prev;
+
12098  }
+
12099  else
+
12100  {
+
12101  Node* const nextFreeNode = node->free.next;
+
12102  VMA_ASSERT(nextFreeNode->free.prev == node);
+
12103  nextFreeNode->free.prev = node->free.prev;
+
12104  }
+
12105 }
+
12106 
+
12107 #if VMA_STATS_STRING_ENABLED
+
12108 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
+
12109 {
+
12110  switch(node->type)
+
12111  {
+
12112  case Node::TYPE_FREE:
+
12113  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
+
12114  break;
+
12115  case Node::TYPE_ALLOCATION:
+
12116  {
+
12117  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
+
12118  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+
12119  if(allocSize < levelNodeSize)
+
12120  {
+
12121  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
+
12122  }
+
12123  }
+
12124  break;
+
12125  case Node::TYPE_SPLIT:
+
12126  {
+
12127  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+
12128  const Node* const leftChild = node->split.leftChild;
+
12129  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
+
12130  const Node* const rightChild = leftChild->buddy;
+
12131  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
+
12132  }
+
12133  break;
+
12134  default:
+
12135  VMA_ASSERT(0);
+
12136  }
+
12137 }
+
12138 #endif // #if VMA_STATS_STRING_ENABLED
12139 
-
12140  m_hParentPool = hParentPool;
-
12141  m_MemoryTypeIndex = newMemoryTypeIndex;
-
12142  m_Id = id;
-
12143  m_hMemory = newMemory;
-
12144 
-
12145  switch(algorithm)
-
12146  {
- -
12148  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
-
12149  break;
- -
12151  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
-
12152  break;
-
12153  default:
-
12154  VMA_ASSERT(0);
-
12155  // Fall-through.
-
12156  case 0:
-
12157  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
-
12158  }
-
12159  m_pMetadata->Init(newSize);
-
12160 }
-
12161 
-
12162 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
-
12163 {
-
12164  // This is the most important assert in the entire library.
-
12165  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
-
12166  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
-
12167 
-
12168  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
-
12169  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
-
12170  m_hMemory = VK_NULL_HANDLE;
-
12171 
-
12172  vma_delete(allocator, m_pMetadata);
-
12173  m_pMetadata = VMA_NULL;
-
12174 }
-
12175 
-
12176 bool VmaDeviceMemoryBlock::Validate() const
-
12177 {
-
12178  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
-
12179  (m_pMetadata->GetSize() != 0));
-
12180 
-
12181  return m_pMetadata->Validate();
-
12182 }
-
12183 
-
12184 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
-
12185 {
-
12186  void* pData = nullptr;
-
12187  VkResult res = Map(hAllocator, 1, &pData);
-
12188  if(res != VK_SUCCESS)
-
12189  {
-
12190  return res;
-
12191  }
+
12140 
+
12142 // class VmaDeviceMemoryBlock
+
12143 
+
12144 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
+
12145  m_pMetadata(VMA_NULL),
+
12146  m_MemoryTypeIndex(UINT32_MAX),
+
12147  m_Id(0),
+
12148  m_hMemory(VK_NULL_HANDLE),
+
12149  m_MapCount(0),
+
12150  m_pMappedData(VMA_NULL)
+
12151 {
+
12152 }
+
12153 
+
12154 void VmaDeviceMemoryBlock::Init(
+
12155  VmaAllocator hAllocator,
+
12156  VmaPool hParentPool,
+
12157  uint32_t newMemoryTypeIndex,
+
12158  VkDeviceMemory newMemory,
+
12159  VkDeviceSize newSize,
+
12160  uint32_t id,
+
12161  uint32_t algorithm)
+
12162 {
+
12163  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
12164 
+
12165  m_hParentPool = hParentPool;
+
12166  m_MemoryTypeIndex = newMemoryTypeIndex;
+
12167  m_Id = id;
+
12168  m_hMemory = newMemory;
+
12169 
+
12170  switch(algorithm)
+
12171  {
+ +
12173  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
+
12174  break;
+ +
12176  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
+
12177  break;
+
12178  default:
+
12179  VMA_ASSERT(0);
+
12180  // Fall-through.
+
12181  case 0:
+
12182  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
+
12183  }
+
12184  m_pMetadata->Init(newSize);
+
12185 }
+
12186 
+
12187 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
+
12188 {
+
12189  // This is the most important assert in the entire library.
+
12190  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
+
12191  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12192 
-
12193  res = m_pMetadata->CheckCorruption(pData);
-
12194 
-
12195  Unmap(hAllocator, 1);
+
12193  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
+
12194  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
+
12195  m_hMemory = VK_NULL_HANDLE;
12196 
-
12197  return res;
-
12198 }
-
12199 
-
12200 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
-
12201 {
-
12202  if(count == 0)
-
12203  {
-
12204  return VK_SUCCESS;
-
12205  }
-
12206 
-
12207  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-
12208  if(m_MapCount != 0)
-
12209  {
-
12210  m_MapCount += count;
-
12211  VMA_ASSERT(m_pMappedData != VMA_NULL);
-
12212  if(ppData != VMA_NULL)
-
12213  {
-
12214  *ppData = m_pMappedData;
-
12215  }
-
12216  return VK_SUCCESS;
-
12217  }
-
12218  else
-
12219  {
-
12220  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-
12221  hAllocator->m_hDevice,
-
12222  m_hMemory,
-
12223  0, // offset
-
12224  VK_WHOLE_SIZE,
-
12225  0, // flags
-
12226  &m_pMappedData);
-
12227  if(result == VK_SUCCESS)
-
12228  {
-
12229  if(ppData != VMA_NULL)
-
12230  {
-
12231  *ppData = m_pMappedData;
-
12232  }
-
12233  m_MapCount = count;
-
12234  }
-
12235  return result;
-
12236  }
-
12237 }
-
12238 
-
12239 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
-
12240 {
-
12241  if(count == 0)
-
12242  {
-
12243  return;
-
12244  }
-
12245 
-
12246  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-
12247  if(m_MapCount >= count)
-
12248  {
-
12249  m_MapCount -= count;
-
12250  if(m_MapCount == 0)
-
12251  {
-
12252  m_pMappedData = VMA_NULL;
-
12253  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-
12254  }
-
12255  }
-
12256  else
-
12257  {
-
12258  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
-
12259  }
-
12260 }
-
12261 
-
12262 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-
12263 {
-
12264  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-
12265  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
-
12266 
-
12267  void* pData;
-
12268  VkResult res = Map(hAllocator, 1, &pData);
-
12269  if(res != VK_SUCCESS)
-
12270  {
-
12271  return res;
-
12272  }
-
12273 
-
12274  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
-
12275  VmaWriteMagicValue(pData, allocOffset + allocSize);
-
12276 
-
12277  Unmap(hAllocator, 1);
-
12278 
-
12279  return VK_SUCCESS;
-
12280 }
-
12281 
-
12282 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-
12283 {
-
12284  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-
12285  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
12197  vma_delete(allocator, m_pMetadata);
+
12198  m_pMetadata = VMA_NULL;
+
12199 }
+
12200 
+
12201 bool VmaDeviceMemoryBlock::Validate() const
+
12202 {
+
12203  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
+
12204  (m_pMetadata->GetSize() != 0));
+
12205 
+
12206  return m_pMetadata->Validate();
+
12207 }
+
12208 
+
12209 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
+
12210 {
+
12211  void* pData = nullptr;
+
12212  VkResult res = Map(hAllocator, 1, &pData);
+
12213  if(res != VK_SUCCESS)
+
12214  {
+
12215  return res;
+
12216  }
+
12217 
+
12218  res = m_pMetadata->CheckCorruption(pData);
+
12219 
+
12220  Unmap(hAllocator, 1);
+
12221 
+
12222  return res;
+
12223 }
+
12224 
+
12225 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
+
12226 {
+
12227  if(count == 0)
+
12228  {
+
12229  return VK_SUCCESS;
+
12230  }
+
12231 
+
12232  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+
12233  if(m_MapCount != 0)
+
12234  {
+
12235  m_MapCount += count;
+
12236  VMA_ASSERT(m_pMappedData != VMA_NULL);
+
12237  if(ppData != VMA_NULL)
+
12238  {
+
12239  *ppData = m_pMappedData;
+
12240  }
+
12241  return VK_SUCCESS;
+
12242  }
+
12243  else
+
12244  {
+
12245  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+
12246  hAllocator->m_hDevice,
+
12247  m_hMemory,
+
12248  0, // offset
+
12249  VK_WHOLE_SIZE,
+
12250  0, // flags
+
12251  &m_pMappedData);
+
12252  if(result == VK_SUCCESS)
+
12253  {
+
12254  if(ppData != VMA_NULL)
+
12255  {
+
12256  *ppData = m_pMappedData;
+
12257  }
+
12258  m_MapCount = count;
+
12259  }
+
12260  return result;
+
12261  }
+
12262 }
+
12263 
+
12264 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
+
12265 {
+
12266  if(count == 0)
+
12267  {
+
12268  return;
+
12269  }
+
12270 
+
12271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+
12272  if(m_MapCount >= count)
+
12273  {
+
12274  m_MapCount -= count;
+
12275  if(m_MapCount == 0)
+
12276  {
+
12277  m_pMappedData = VMA_NULL;
+
12278  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
+
12279  }
+
12280  }
+
12281  else
+
12282  {
+
12283  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
+
12284  }
+
12285 }
12286 
-
12287  void* pData;
-
12288  VkResult res = Map(hAllocator, 1, &pData);
-
12289  if(res != VK_SUCCESS)
-
12290  {
-
12291  return res;
-
12292  }
-
12293 
-
12294  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
+
12287 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+
12288 {
+
12289  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+
12290  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
12291 
+
12292  void* pData;
+
12293  VkResult res = Map(hAllocator, 1, &pData);
+
12294  if(res != VK_SUCCESS)
12295  {
-
12296  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
+
12296  return res;
12297  }
-
12298  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
-
12299  {
-
12300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
-
12301  }
-
12302 
-
12303  Unmap(hAllocator, 1);
-
12304 
-
12305  return VK_SUCCESS;
-
12306 }
-
12307 
-
12308 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
-
12309  const VmaAllocator hAllocator,
-
12310  const VmaAllocation hAllocation,
-
12311  VkDeviceSize allocationLocalOffset,
-
12312  VkBuffer hBuffer,
-
12313  const void* pNext)
-
12314 {
-
12315  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-
12316  hAllocation->GetBlock() == this);
-
12317  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-
12318  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-
12319  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-
12320  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-
12321  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-
12322  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
-
12323 }
-
12324 
-
12325 VkResult VmaDeviceMemoryBlock::BindImageMemory(
-
12326  const VmaAllocator hAllocator,
-
12327  const VmaAllocation hAllocation,
-
12328  VkDeviceSize allocationLocalOffset,
-
12329  VkImage hImage,
-
12330  const void* pNext)
-
12331 {
-
12332  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-
12333  hAllocation->GetBlock() == this);
-
12334  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-
12335  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-
12336  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-
12337  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-
12338  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-
12339  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
-
12340 }
-
12341 
-
12342 static void InitStatInfo(VmaStatInfo& outInfo)
-
12343 {
-
12344  memset(&outInfo, 0, sizeof(outInfo));
-
12345  outInfo.allocationSizeMin = UINT64_MAX;
-
12346  outInfo.unusedRangeSizeMin = UINT64_MAX;
-
12347 }
-
12348 
-
12349 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
-
12350 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
-
12351 {
-
12352  inoutInfo.blockCount += srcInfo.blockCount;
-
12353  inoutInfo.allocationCount += srcInfo.allocationCount;
-
12354  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
-
12355  inoutInfo.usedBytes += srcInfo.usedBytes;
-
12356  inoutInfo.unusedBytes += srcInfo.unusedBytes;
-
12357  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
-
12358  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
-
12359  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
-
12360  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
-
12361 }
-
12362 
-
12363 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
-
12364 {
-
12365  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
-
12366  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
-
12367  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
-
12368  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
-
12369 }
-
12370 
-
12371 VmaPool_T::VmaPool_T(
-
12372  VmaAllocator hAllocator,
-
12373  const VmaPoolCreateInfo& createInfo,
-
12374  VkDeviceSize preferredBlockSize) :
-
12375  m_BlockVector(
-
12376  hAllocator,
-
12377  this, // hParentPool
-
12378  createInfo.memoryTypeIndex,
-
12379  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
-
12380  createInfo.minBlockCount,
-
12381  createInfo.maxBlockCount,
-
12382  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
-
12383  createInfo.frameInUseCount,
-
12384  createInfo.blockSize != 0, // explicitBlockSize
-
12385  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
-
12386  m_Id(0),
-
12387  m_Name(VMA_NULL)
-
12388 {
-
12389 }
-
12390 
-
12391 VmaPool_T::~VmaPool_T()
-
12392 {
-
12393 }
-
12394 
-
12395 void VmaPool_T::SetName(const char* pName)
-
12396 {
-
12397  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
-
12398  VmaFreeString(allocs, m_Name);
-
12399 
-
12400  if(pName != VMA_NULL)
-
12401  {
-
12402  m_Name = VmaCreateStringCopy(allocs, pName);
-
12403  }
-
12404  else
-
12405  {
-
12406  m_Name = VMA_NULL;
-
12407  }
-
12408 }
-
12409 
-
12410 #if VMA_STATS_STRING_ENABLED
-
12411 
-
12412 #endif // #if VMA_STATS_STRING_ENABLED
-
12413 
-
12414 VmaBlockVector::VmaBlockVector(
-
12415  VmaAllocator hAllocator,
-
12416  VmaPool hParentPool,
-
12417  uint32_t memoryTypeIndex,
-
12418  VkDeviceSize preferredBlockSize,
-
12419  size_t minBlockCount,
-
12420  size_t maxBlockCount,
-
12421  VkDeviceSize bufferImageGranularity,
-
12422  uint32_t frameInUseCount,
-
12423  bool explicitBlockSize,
-
12424  uint32_t algorithm) :
-
12425  m_hAllocator(hAllocator),
-
12426  m_hParentPool(hParentPool),
-
12427  m_MemoryTypeIndex(memoryTypeIndex),
-
12428  m_PreferredBlockSize(preferredBlockSize),
-
12429  m_MinBlockCount(minBlockCount),
-
12430  m_MaxBlockCount(maxBlockCount),
-
12431  m_BufferImageGranularity(bufferImageGranularity),
-
12432  m_FrameInUseCount(frameInUseCount),
-
12433  m_ExplicitBlockSize(explicitBlockSize),
-
12434  m_Algorithm(algorithm),
-
12435  m_HasEmptyBlock(false),
-
12436  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
-
12437  m_NextBlockId(0)
-
12438 {
-
12439 }
-
12440 
-
12441 VmaBlockVector::~VmaBlockVector()
-
12442 {
-
12443  for(size_t i = m_Blocks.size(); i--; )
-
12444  {
-
12445  m_Blocks[i]->Destroy(m_hAllocator);
-
12446  vma_delete(m_hAllocator, m_Blocks[i]);
-
12447  }
-
12448 }
-
12449 
-
12450 VkResult VmaBlockVector::CreateMinBlocks()
-
12451 {
-
12452  for(size_t i = 0; i < m_MinBlockCount; ++i)
-
12453  {
-
12454  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
-
12455  if(res != VK_SUCCESS)
-
12456  {
-
12457  return res;
-
12458  }
-
12459  }
-
12460  return VK_SUCCESS;
-
12461 }
-
12462 
-
12463 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
-
12464 {
-
12465  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12466 
-
12467  const size_t blockCount = m_Blocks.size();
-
12468 
-
12469  pStats->size = 0;
-
12470  pStats->unusedSize = 0;
-
12471  pStats->allocationCount = 0;
-
12472  pStats->unusedRangeCount = 0;
-
12473  pStats->unusedRangeSizeMax = 0;
-
12474  pStats->blockCount = blockCount;
-
12475 
-
12476  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
12477  {
-
12478  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
12479  VMA_ASSERT(pBlock);
-
12480  VMA_HEAVY_ASSERT(pBlock->Validate());
-
12481  pBlock->m_pMetadata->AddPoolStats(*pStats);
-
12482  }
-
12483 }
-
12484 
-
12485 bool VmaBlockVector::IsEmpty()
-
12486 {
-
12487  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12488  return m_Blocks.empty();
-
12489 }
-
12490 
-
12491 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
-
12492 {
-
12493  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-
12494  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
-
12495  (VMA_DEBUG_MARGIN > 0) &&
-
12496  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
-
12497  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
-
12498 }
-
12499 
-
12500 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
-
12501 
-
12502 VkResult VmaBlockVector::Allocate(
-
12503  uint32_t currentFrameIndex,
-
12504  VkDeviceSize size,
-
12505  VkDeviceSize alignment,
-
12506  const VmaAllocationCreateInfo& createInfo,
-
12507  VmaSuballocationType suballocType,
-
12508  size_t allocationCount,
-
12509  VmaAllocation* pAllocations)
-
12510 {
-
12511  size_t allocIndex;
-
12512  VkResult res = VK_SUCCESS;
-
12513 
-
12514  if(IsCorruptionDetectionEnabled())
-
12515  {
-
12516  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-
12517  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-
12518  }
-
12519 
-
12520  {
-
12521  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12522  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
12523  {
-
12524  res = AllocatePage(
-
12525  currentFrameIndex,
-
12526  size,
-
12527  alignment,
-
12528  createInfo,
-
12529  suballocType,
-
12530  pAllocations + allocIndex);
-
12531  if(res != VK_SUCCESS)
-
12532  {
-
12533  break;
-
12534  }
-
12535  }
-
12536  }
-
12537 
-
12538  if(res != VK_SUCCESS)
-
12539  {
-
12540  // Free all already created allocations.
-
12541  while(allocIndex--)
-
12542  {
-
12543  Free(pAllocations[allocIndex]);
-
12544  }
-
12545  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
12546  }
-
12547 
-
12548  return res;
-
12549 }
-
12550 
-
12551 VkResult VmaBlockVector::AllocatePage(
-
12552  uint32_t currentFrameIndex,
-
12553  VkDeviceSize size,
-
12554  VkDeviceSize alignment,
-
12555  const VmaAllocationCreateInfo& createInfo,
-
12556  VmaSuballocationType suballocType,
-
12557  VmaAllocation* pAllocation)
-
12558 {
-
12559  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-
12560  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
-
12561  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-
12562  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-
12563 
-
12564  VkDeviceSize freeMemory;
-
12565  {
-
12566  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
12567  VmaBudget heapBudget = {};
-
12568  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-
12569  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
-
12570  }
-
12571 
-
12572  const bool canFallbackToDedicated = !IsCustomPool();
-
12573  const bool canCreateNewBlock =
-
12574  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
-
12575  (m_Blocks.size() < m_MaxBlockCount) &&
-
12576  (freeMemory >= size || !canFallbackToDedicated);
-
12577  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
-
12578 
-
12579  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
-
12580  // Which in turn is available only when maxBlockCount = 1.
-
12581  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
-
12582  {
-
12583  canMakeOtherLost = false;
-
12584  }
-
12585 
-
12586  // Upper address can only be used with linear allocator and within single memory block.
-
12587  if(isUpperAddress &&
-
12588  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
-
12589  {
-
12590  return VK_ERROR_FEATURE_NOT_PRESENT;
-
12591  }
-
12592 
-
12593  // Validate strategy.
-
12594  switch(strategy)
-
12595  {
-
12596  case 0:
- -
12598  break;
- - - -
12602  break;
-
12603  default:
-
12604  return VK_ERROR_FEATURE_NOT_PRESENT;
-
12605  }
-
12606 
-
12607  // Early reject: requested allocation size is larger that maximum block size for this block vector.
-
12608  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
-
12609  {
-
12610  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12611  }
-
12612 
-
12613  /*
-
12614  Under certain condition, this whole section can be skipped for optimization, so
-
12615  we move on directly to trying to allocate with canMakeOtherLost. That's the case
-
12616  e.g. for custom pools with linear algorithm.
-
12617  */
-
12618  if(!canMakeOtherLost || canCreateNewBlock)
-
12619  {
-
12620  // 1. Search existing allocations. Try to allocate without making other allocations lost.
-
12621  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
- -
12623 
-
12624  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-
12625  {
-
12626  // Use only last block.
-
12627  if(!m_Blocks.empty())
-
12628  {
-
12629  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
-
12630  VMA_ASSERT(pCurrBlock);
-
12631  VkResult res = AllocateFromBlock(
-
12632  pCurrBlock,
-
12633  currentFrameIndex,
-
12634  size,
-
12635  alignment,
-
12636  allocFlagsCopy,
-
12637  createInfo.pUserData,
-
12638  suballocType,
-
12639  strategy,
-
12640  pAllocation);
-
12641  if(res == VK_SUCCESS)
-
12642  {
-
12643  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
-
12644  return VK_SUCCESS;
-
12645  }
-
12646  }
-
12647  }
-
12648  else
-
12649  {
- -
12651  {
-
12652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-
12653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
12654  {
-
12655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12656  VMA_ASSERT(pCurrBlock);
-
12657  VkResult res = AllocateFromBlock(
-
12658  pCurrBlock,
-
12659  currentFrameIndex,
-
12660  size,
-
12661  alignment,
-
12662  allocFlagsCopy,
-
12663  createInfo.pUserData,
-
12664  suballocType,
-
12665  strategy,
-
12666  pAllocation);
-
12667  if(res == VK_SUCCESS)
-
12668  {
-
12669  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
-
12670  return VK_SUCCESS;
-
12671  }
-
12672  }
-
12673  }
-
12674  else // WORST_FIT, FIRST_FIT
-
12675  {
-
12676  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-
12677  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
12678  {
-
12679  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12680  VMA_ASSERT(pCurrBlock);
-
12681  VkResult res = AllocateFromBlock(
-
12682  pCurrBlock,
-
12683  currentFrameIndex,
-
12684  size,
-
12685  alignment,
-
12686  allocFlagsCopy,
-
12687  createInfo.pUserData,
-
12688  suballocType,
-
12689  strategy,
-
12690  pAllocation);
-
12691  if(res == VK_SUCCESS)
-
12692  {
-
12693  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
-
12694  return VK_SUCCESS;
-
12695  }
-
12696  }
-
12697  }
-
12698  }
-
12699 
-
12700  // 2. Try to create new block.
-
12701  if(canCreateNewBlock)
-
12702  {
-
12703  // Calculate optimal size for new block.
-
12704  VkDeviceSize newBlockSize = m_PreferredBlockSize;
-
12705  uint32_t newBlockSizeShift = 0;
-
12706  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
-
12707 
-
12708  if(!m_ExplicitBlockSize)
-
12709  {
-
12710  // Allocate 1/8, 1/4, 1/2 as first blocks.
-
12711  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
-
12712  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
-
12713  {
-
12714  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-
12715  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
-
12716  {
-
12717  newBlockSize = smallerNewBlockSize;
-
12718  ++newBlockSizeShift;
-
12719  }
-
12720  else
-
12721  {
-
12722  break;
-
12723  }
-
12724  }
-
12725  }
-
12726 
-
12727  size_t newBlockIndex = 0;
-
12728  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-
12729  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12730  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
-
12731  if(!m_ExplicitBlockSize)
-
12732  {
-
12733  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
-
12734  {
-
12735  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-
12736  if(smallerNewBlockSize >= size)
-
12737  {
-
12738  newBlockSize = smallerNewBlockSize;
-
12739  ++newBlockSizeShift;
-
12740  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-
12741  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12742  }
-
12743  else
-
12744  {
-
12745  break;
-
12746  }
-
12747  }
-
12748  }
-
12749 
-
12750  if(res == VK_SUCCESS)
-
12751  {
-
12752  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
-
12753  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
-
12754 
-
12755  res = AllocateFromBlock(
-
12756  pBlock,
-
12757  currentFrameIndex,
-
12758  size,
-
12759  alignment,
-
12760  allocFlagsCopy,
-
12761  createInfo.pUserData,
-
12762  suballocType,
-
12763  strategy,
-
12764  pAllocation);
-
12765  if(res == VK_SUCCESS)
-
12766  {
-
12767  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
-
12768  return VK_SUCCESS;
-
12769  }
-
12770  else
-
12771  {
-
12772  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
-
12773  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12774  }
-
12775  }
-
12776  }
-
12777  }
-
12778 
-
12779  // 3. Try to allocate from existing blocks with making other allocations lost.
-
12780  if(canMakeOtherLost)
-
12781  {
-
12782  uint32_t tryIndex = 0;
-
12783  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
-
12784  {
-
12785  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
-
12786  VmaAllocationRequest bestRequest = {};
-
12787  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
-
12788 
-
12789  // 1. Search existing allocations.
- -
12791  {
-
12792  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-
12793  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
12794  {
-
12795  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12796  VMA_ASSERT(pCurrBlock);
-
12797  VmaAllocationRequest currRequest = {};
-
12798  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
12799  currentFrameIndex,
-
12800  m_FrameInUseCount,
-
12801  m_BufferImageGranularity,
-
12802  size,
-
12803  alignment,
-
12804  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
-
12805  suballocType,
-
12806  canMakeOtherLost,
-
12807  strategy,
-
12808  &currRequest))
-
12809  {
-
12810  const VkDeviceSize currRequestCost = currRequest.CalcCost();
-
12811  if(pBestRequestBlock == VMA_NULL ||
-
12812  currRequestCost < bestRequestCost)
-
12813  {
-
12814  pBestRequestBlock = pCurrBlock;
-
12815  bestRequest = currRequest;
-
12816  bestRequestCost = currRequestCost;
-
12817 
-
12818  if(bestRequestCost == 0)
-
12819  {
-
12820  break;
-
12821  }
-
12822  }
-
12823  }
-
12824  }
-
12825  }
-
12826  else // WORST_FIT, FIRST_FIT
-
12827  {
-
12828  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-
12829  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
12830  {
-
12831  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12832  VMA_ASSERT(pCurrBlock);
-
12833  VmaAllocationRequest currRequest = {};
-
12834  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
12835  currentFrameIndex,
-
12836  m_FrameInUseCount,
-
12837  m_BufferImageGranularity,
-
12838  size,
-
12839  alignment,
-
12840  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
-
12841  suballocType,
-
12842  canMakeOtherLost,
-
12843  strategy,
-
12844  &currRequest))
-
12845  {
-
12846  const VkDeviceSize currRequestCost = currRequest.CalcCost();
-
12847  if(pBestRequestBlock == VMA_NULL ||
-
12848  currRequestCost < bestRequestCost ||
- -
12850  {
-
12851  pBestRequestBlock = pCurrBlock;
-
12852  bestRequest = currRequest;
-
12853  bestRequestCost = currRequestCost;
-
12854 
-
12855  if(bestRequestCost == 0 ||
- -
12857  {
-
12858  break;
-
12859  }
-
12860  }
-
12861  }
-
12862  }
-
12863  }
-
12864 
-
12865  if(pBestRequestBlock != VMA_NULL)
-
12866  {
-
12867  if(mapped)
-
12868  {
-
12869  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
-
12870  if(res != VK_SUCCESS)
-
12871  {
-
12872  return res;
-
12873  }
-
12874  }
-
12875 
-
12876  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
-
12877  currentFrameIndex,
-
12878  m_FrameInUseCount,
-
12879  &bestRequest))
-
12880  {
-
12881  // Allocate from this pBlock.
-
12882  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
-
12883  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
-
12884  UpdateHasEmptyBlock();
-
12885  (*pAllocation)->InitBlockAllocation(
-
12886  pBestRequestBlock,
-
12887  bestRequest.offset,
-
12888  alignment,
-
12889  size,
-
12890  m_MemoryTypeIndex,
-
12891  suballocType,
-
12892  mapped,
-
12893  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
-
12894  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
-
12895  VMA_DEBUG_LOG(" Returned from existing block");
-
12896  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
-
12897  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-
12898  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
12899  {
-
12900  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
12901  }
-
12902  if(IsCorruptionDetectionEnabled())
-
12903  {
-
12904  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
-
12905  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
-
12906  }
-
12907  return VK_SUCCESS;
-
12908  }
-
12909  // else: Some allocations must have been touched while we are here. Next try.
-
12910  }
-
12911  else
-
12912  {
-
12913  // Could not find place in any of the blocks - break outer loop.
-
12914  break;
-
12915  }
-
12916  }
-
12917  /* Maximum number of tries exceeded - a very unlike event when many other
-
12918  threads are simultaneously touching allocations making it impossible to make
-
12919  lost at the same time as we try to allocate. */
-
12920  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
-
12921  {
-
12922  return VK_ERROR_TOO_MANY_OBJECTS;
-
12923  }
-
12924  }
-
12925 
-
12926  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12927 }
-
12928 
-
12929 void VmaBlockVector::Free(
-
12930  const VmaAllocation hAllocation)
-
12931 {
-
12932  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
-
12933 
-
12934  bool budgetExceeded = false;
-
12935  {
-
12936  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
12937  VmaBudget heapBudget = {};
-
12938  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-
12939  budgetExceeded = heapBudget.usage >= heapBudget.budget;
-
12940  }
-
12941 
-
12942  // Scope for lock.
-
12943  {
-
12944  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12945 
-
12946  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-
12947 
-
12948  if(IsCorruptionDetectionEnabled())
-
12949  {
-
12950  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
-
12951  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
-
12952  }
+
12298 
+
12299  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
+
12300  VmaWriteMagicValue(pData, allocOffset + allocSize);
+
12301 
+
12302  Unmap(hAllocator, 1);
+
12303 
+
12304  return VK_SUCCESS;
+
12305 }
+
12306 
+
12307 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+
12308 {
+
12309  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+
12310  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
12311 
+
12312  void* pData;
+
12313  VkResult res = Map(hAllocator, 1, &pData);
+
12314  if(res != VK_SUCCESS)
+
12315  {
+
12316  return res;
+
12317  }
+
12318 
+
12319  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
+
12320  {
+
12321  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
+
12322  }
+
12323  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
+
12324  {
+
12325  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
+
12326  }
+
12327 
+
12328  Unmap(hAllocator, 1);
+
12329 
+
12330  return VK_SUCCESS;
+
12331 }
+
12332 
+
12333 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
+
12334  const VmaAllocator hAllocator,
+
12335  const VmaAllocation hAllocation,
+
12336  VkDeviceSize allocationLocalOffset,
+
12337  VkBuffer hBuffer,
+
12338  const void* pNext)
+
12339 {
+
12340  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+
12341  hAllocation->GetBlock() == this);
+
12342  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+
12343  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+
12344  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+
12345  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+
12346  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+
12347  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
+
12348 }
+
12349 
+
12350 VkResult VmaDeviceMemoryBlock::BindImageMemory(
+
12351  const VmaAllocator hAllocator,
+
12352  const VmaAllocation hAllocation,
+
12353  VkDeviceSize allocationLocalOffset,
+
12354  VkImage hImage,
+
12355  const void* pNext)
+
12356 {
+
12357  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+
12358  hAllocation->GetBlock() == this);
+
12359  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+
12360  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+
12361  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+
12362  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+
12363  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+
12364  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
+
12365 }
+
12366 
+
12367 static void InitStatInfo(VmaStatInfo& outInfo)
+
12368 {
+
12369  memset(&outInfo, 0, sizeof(outInfo));
+
12370  outInfo.allocationSizeMin = UINT64_MAX;
+
12371  outInfo.unusedRangeSizeMin = UINT64_MAX;
+
12372 }
+
12373 
+
12374 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
+
12375 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
+
12376 {
+
12377  inoutInfo.blockCount += srcInfo.blockCount;
+
12378  inoutInfo.allocationCount += srcInfo.allocationCount;
+
12379  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
+
12380  inoutInfo.usedBytes += srcInfo.usedBytes;
+
12381  inoutInfo.unusedBytes += srcInfo.unusedBytes;
+
12382  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
+
12383  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
+
12384  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
+
12385  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
+
12386 }
+
12387 
+
12388 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
+
12389 {
+
12390  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
+
12391  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
+
12392  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
+
12393  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
+
12394 }
+
12395 
+
12396 VmaPool_T::VmaPool_T(
+
12397  VmaAllocator hAllocator,
+
12398  const VmaPoolCreateInfo& createInfo,
+
12399  VkDeviceSize preferredBlockSize) :
+
12400  m_BlockVector(
+
12401  hAllocator,
+
12402  this, // hParentPool
+
12403  createInfo.memoryTypeIndex,
+
12404  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
+
12405  createInfo.minBlockCount,
+
12406  createInfo.maxBlockCount,
+
12407  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
+
12408  createInfo.frameInUseCount,
+
12409  createInfo.blockSize != 0, // explicitBlockSize
+
12410  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
+
12411  m_Id(0),
+
12412  m_Name(VMA_NULL)
+
12413 {
+
12414 }
+
12415 
+
12416 VmaPool_T::~VmaPool_T()
+
12417 {
+
12418 }
+
12419 
+
12420 void VmaPool_T::SetName(const char* pName)
+
12421 {
+
12422  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
+
12423  VmaFreeString(allocs, m_Name);
+
12424 
+
12425  if(pName != VMA_NULL)
+
12426  {
+
12427  m_Name = VmaCreateStringCopy(allocs, pName);
+
12428  }
+
12429  else
+
12430  {
+
12431  m_Name = VMA_NULL;
+
12432  }
+
12433 }
+
12434 
+
12435 #if VMA_STATS_STRING_ENABLED
+
12436 
+
12437 #endif // #if VMA_STATS_STRING_ENABLED
+
12438 
+
12439 VmaBlockVector::VmaBlockVector(
+
12440  VmaAllocator hAllocator,
+
12441  VmaPool hParentPool,
+
12442  uint32_t memoryTypeIndex,
+
12443  VkDeviceSize preferredBlockSize,
+
12444  size_t minBlockCount,
+
12445  size_t maxBlockCount,
+
12446  VkDeviceSize bufferImageGranularity,
+
12447  uint32_t frameInUseCount,
+
12448  bool explicitBlockSize,
+
12449  uint32_t algorithm) :
+
12450  m_hAllocator(hAllocator),
+
12451  m_hParentPool(hParentPool),
+
12452  m_MemoryTypeIndex(memoryTypeIndex),
+
12453  m_PreferredBlockSize(preferredBlockSize),
+
12454  m_MinBlockCount(minBlockCount),
+
12455  m_MaxBlockCount(maxBlockCount),
+
12456  m_BufferImageGranularity(bufferImageGranularity),
+
12457  m_FrameInUseCount(frameInUseCount),
+
12458  m_ExplicitBlockSize(explicitBlockSize),
+
12459  m_Algorithm(algorithm),
+
12460  m_HasEmptyBlock(false),
+
12461  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
+
12462  m_NextBlockId(0)
+
12463 {
+
12464 }
+
12465 
+
12466 VmaBlockVector::~VmaBlockVector()
+
12467 {
+
12468  for(size_t i = m_Blocks.size(); i--; )
+
12469  {
+
12470  m_Blocks[i]->Destroy(m_hAllocator);
+
12471  vma_delete(m_hAllocator, m_Blocks[i]);
+
12472  }
+
12473 }
+
12474 
+
12475 VkResult VmaBlockVector::CreateMinBlocks()
+
12476 {
+
12477  for(size_t i = 0; i < m_MinBlockCount; ++i)
+
12478  {
+
12479  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
+
12480  if(res != VK_SUCCESS)
+
12481  {
+
12482  return res;
+
12483  }
+
12484  }
+
12485  return VK_SUCCESS;
+
12486 }
+
12487 
+
12488 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
+
12489 {
+
12490  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12491 
+
12492  const size_t blockCount = m_Blocks.size();
+
12493 
+
12494  pStats->size = 0;
+
12495  pStats->unusedSize = 0;
+
12496  pStats->allocationCount = 0;
+
12497  pStats->unusedRangeCount = 0;
+
12498  pStats->unusedRangeSizeMax = 0;
+
12499  pStats->blockCount = blockCount;
+
12500 
+
12501  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
12502  {
+
12503  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
12504  VMA_ASSERT(pBlock);
+
12505  VMA_HEAVY_ASSERT(pBlock->Validate());
+
12506  pBlock->m_pMetadata->AddPoolStats(*pStats);
+
12507  }
+
12508 }
+
12509 
+
12510 bool VmaBlockVector::IsEmpty()
+
12511 {
+
12512  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12513  return m_Blocks.empty();
+
12514 }
+
12515 
+
12516 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
+
12517 {
+
12518  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
12519  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
+
12520  (VMA_DEBUG_MARGIN > 0) &&
+
12521  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
+
12522  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
+
12523 }
+
12524 
+
12525 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+
12526 
+
12527 VkResult VmaBlockVector::Allocate(
+
12528  uint32_t currentFrameIndex,
+
12529  VkDeviceSize size,
+
12530  VkDeviceSize alignment,
+
12531  const VmaAllocationCreateInfo& createInfo,
+
12532  VmaSuballocationType suballocType,
+
12533  size_t allocationCount,
+
12534  VmaAllocation* pAllocations)
+
12535 {
+
12536  size_t allocIndex;
+
12537  VkResult res = VK_SUCCESS;
+
12538 
+
12539  if(IsCorruptionDetectionEnabled())
+
12540  {
+
12541  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+
12542  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+
12543  }
+
12544 
+
12545  {
+
12546  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12547  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
12548  {
+
12549  res = AllocatePage(
+
12550  currentFrameIndex,
+
12551  size,
+
12552  alignment,
+
12553  createInfo,
+
12554  suballocType,
+
12555  pAllocations + allocIndex);
+
12556  if(res != VK_SUCCESS)
+
12557  {
+
12558  break;
+
12559  }
+
12560  }
+
12561  }
+
12562 
+
12563  if(res != VK_SUCCESS)
+
12564  {
+
12565  // Free all already created allocations.
+
12566  while(allocIndex--)
+
12567  {
+
12568  Free(pAllocations[allocIndex]);
+
12569  }
+
12570  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
12571  }
+
12572 
+
12573  return res;
+
12574 }
+
12575 
+
12576 VkResult VmaBlockVector::AllocatePage(
+
12577  uint32_t currentFrameIndex,
+
12578  VkDeviceSize size,
+
12579  VkDeviceSize alignment,
+
12580  const VmaAllocationCreateInfo& createInfo,
+
12581  VmaSuballocationType suballocType,
+
12582  VmaAllocation* pAllocation)
+
12583 {
+
12584  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+
12585  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
+
12586  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+
12587  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+
12588 
+
12589  VkDeviceSize freeMemory;
+
12590  {
+
12591  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
12592  VmaBudget heapBudget = {};
+
12593  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+
12594  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
+
12595  }
+
12596 
+
12597  const bool canFallbackToDedicated = !IsCustomPool();
+
12598  const bool canCreateNewBlock =
+
12599  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
+
12600  (m_Blocks.size() < m_MaxBlockCount) &&
+
12601  (freeMemory >= size || !canFallbackToDedicated);
+
12602  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
+
12603 
+
12604  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
+
12605  // Which in turn is available only when maxBlockCount = 1.
+
12606  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
+
12607  {
+
12608  canMakeOtherLost = false;
+
12609  }
+
12610 
+
12611  // Upper address can only be used with linear allocator and within single memory block.
+
12612  if(isUpperAddress &&
+
12613  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
+
12614  {
+
12615  return VK_ERROR_FEATURE_NOT_PRESENT;
+
12616  }
+
12617 
+
12618  // Validate strategy.
+
12619  switch(strategy)
+
12620  {
+
12621  case 0:
+ +
12623  break;
+ + + +
12627  break;
+
12628  default:
+
12629  return VK_ERROR_FEATURE_NOT_PRESENT;
+
12630  }
+
12631 
+
12632  // Early reject: requested allocation size is larger that maximum block size for this block vector.
+
12633  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
+
12634  {
+
12635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12636  }
+
12637 
+
12638  /*
+
12639  Under certain condition, this whole section can be skipped for optimization, so
+
12640  we move on directly to trying to allocate with canMakeOtherLost. That's the case
+
12641  e.g. for custom pools with linear algorithm.
+
12642  */
+
12643  if(!canMakeOtherLost || canCreateNewBlock)
+
12644  {
+
12645  // 1. Search existing allocations. Try to allocate without making other allocations lost.
+
12646  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
+ +
12648 
+
12649  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+
12650  {
+
12651  // Use only last block.
+
12652  if(!m_Blocks.empty())
+
12653  {
+
12654  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
+
12655  VMA_ASSERT(pCurrBlock);
+
12656  VkResult res = AllocateFromBlock(
+
12657  pCurrBlock,
+
12658  currentFrameIndex,
+
12659  size,
+
12660  alignment,
+
12661  allocFlagsCopy,
+
12662  createInfo.pUserData,
+
12663  suballocType,
+
12664  strategy,
+
12665  pAllocation);
+
12666  if(res == VK_SUCCESS)
+
12667  {
+
12668  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
+
12669  return VK_SUCCESS;
+
12670  }
+
12671  }
+
12672  }
+
12673  else
+
12674  {
+ +
12676  {
+
12677  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+
12678  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
12679  {
+
12680  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12681  VMA_ASSERT(pCurrBlock);
+
12682  VkResult res = AllocateFromBlock(
+
12683  pCurrBlock,
+
12684  currentFrameIndex,
+
12685  size,
+
12686  alignment,
+
12687  allocFlagsCopy,
+
12688  createInfo.pUserData,
+
12689  suballocType,
+
12690  strategy,
+
12691  pAllocation);
+
12692  if(res == VK_SUCCESS)
+
12693  {
+
12694  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
+
12695  return VK_SUCCESS;
+
12696  }
+
12697  }
+
12698  }
+
12699  else // WORST_FIT, FIRST_FIT
+
12700  {
+
12701  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+
12702  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
12703  {
+
12704  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12705  VMA_ASSERT(pCurrBlock);
+
12706  VkResult res = AllocateFromBlock(
+
12707  pCurrBlock,
+
12708  currentFrameIndex,
+
12709  size,
+
12710  alignment,
+
12711  allocFlagsCopy,
+
12712  createInfo.pUserData,
+
12713  suballocType,
+
12714  strategy,
+
12715  pAllocation);
+
12716  if(res == VK_SUCCESS)
+
12717  {
+
12718  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
+
12719  return VK_SUCCESS;
+
12720  }
+
12721  }
+
12722  }
+
12723  }
+
12724 
+
12725  // 2. Try to create new block.
+
12726  if(canCreateNewBlock)
+
12727  {
+
12728  // Calculate optimal size for new block.
+
12729  VkDeviceSize newBlockSize = m_PreferredBlockSize;
+
12730  uint32_t newBlockSizeShift = 0;
+
12731  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
+
12732 
+
12733  if(!m_ExplicitBlockSize)
+
12734  {
+
12735  // Allocate 1/8, 1/4, 1/2 as first blocks.
+
12736  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
+
12737  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
+
12738  {
+
12739  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+
12740  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
+
12741  {
+
12742  newBlockSize = smallerNewBlockSize;
+
12743  ++newBlockSizeShift;
+
12744  }
+
12745  else
+
12746  {
+
12747  break;
+
12748  }
+
12749  }
+
12750  }
+
12751 
+
12752  size_t newBlockIndex = 0;
+
12753  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+
12754  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12755  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
+
12756  if(!m_ExplicitBlockSize)
+
12757  {
+
12758  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
+
12759  {
+
12760  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+
12761  if(smallerNewBlockSize >= size)
+
12762  {
+
12763  newBlockSize = smallerNewBlockSize;
+
12764  ++newBlockSizeShift;
+
12765  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+
12766  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12767  }
+
12768  else
+
12769  {
+
12770  break;
+
12771  }
+
12772  }
+
12773  }
+
12774 
+
12775  if(res == VK_SUCCESS)
+
12776  {
+
12777  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
+
12778  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
+
12779 
+
12780  res = AllocateFromBlock(
+
12781  pBlock,
+
12782  currentFrameIndex,
+
12783  size,
+
12784  alignment,
+
12785  allocFlagsCopy,
+
12786  createInfo.pUserData,
+
12787  suballocType,
+
12788  strategy,
+
12789  pAllocation);
+
12790  if(res == VK_SUCCESS)
+
12791  {
+
12792  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
+
12793  return VK_SUCCESS;
+
12794  }
+
12795  else
+
12796  {
+
12797  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
+
12798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12799  }
+
12800  }
+
12801  }
+
12802  }
+
12803 
+
12804  // 3. Try to allocate from existing blocks with making other allocations lost.
+
12805  if(canMakeOtherLost)
+
12806  {
+
12807  uint32_t tryIndex = 0;
+
12808  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
+
12809  {
+
12810  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
+
12811  VmaAllocationRequest bestRequest = {};
+
12812  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
+
12813 
+
12814  // 1. Search existing allocations.
+ +
12816  {
+
12817  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+
12818  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
12819  {
+
12820  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12821  VMA_ASSERT(pCurrBlock);
+
12822  VmaAllocationRequest currRequest = {};
+
12823  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
12824  currentFrameIndex,
+
12825  m_FrameInUseCount,
+
12826  m_BufferImageGranularity,
+
12827  size,
+
12828  alignment,
+
12829  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+
12830  suballocType,
+
12831  canMakeOtherLost,
+
12832  strategy,
+
12833  &currRequest))
+
12834  {
+
12835  const VkDeviceSize currRequestCost = currRequest.CalcCost();
+
12836  if(pBestRequestBlock == VMA_NULL ||
+
12837  currRequestCost < bestRequestCost)
+
12838  {
+
12839  pBestRequestBlock = pCurrBlock;
+
12840  bestRequest = currRequest;
+
12841  bestRequestCost = currRequestCost;
+
12842 
+
12843  if(bestRequestCost == 0)
+
12844  {
+
12845  break;
+
12846  }
+
12847  }
+
12848  }
+
12849  }
+
12850  }
+
12851  else // WORST_FIT, FIRST_FIT
+
12852  {
+
12853  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+
12854  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
12855  {
+
12856  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12857  VMA_ASSERT(pCurrBlock);
+
12858  VmaAllocationRequest currRequest = {};
+
12859  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
12860  currentFrameIndex,
+
12861  m_FrameInUseCount,
+
12862  m_BufferImageGranularity,
+
12863  size,
+
12864  alignment,
+
12865  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+
12866  suballocType,
+
12867  canMakeOtherLost,
+
12868  strategy,
+
12869  &currRequest))
+
12870  {
+
12871  const VkDeviceSize currRequestCost = currRequest.CalcCost();
+
12872  if(pBestRequestBlock == VMA_NULL ||
+
12873  currRequestCost < bestRequestCost ||
+ +
12875  {
+
12876  pBestRequestBlock = pCurrBlock;
+
12877  bestRequest = currRequest;
+
12878  bestRequestCost = currRequestCost;
+
12879 
+
12880  if(bestRequestCost == 0 ||
+ +
12882  {
+
12883  break;
+
12884  }
+
12885  }
+
12886  }
+
12887  }
+
12888  }
+
12889 
+
12890  if(pBestRequestBlock != VMA_NULL)
+
12891  {
+
12892  if(mapped)
+
12893  {
+
12894  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
+
12895  if(res != VK_SUCCESS)
+
12896  {
+
12897  return res;
+
12898  }
+
12899  }
+
12900 
+
12901  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
+
12902  currentFrameIndex,
+
12903  m_FrameInUseCount,
+
12904  &bestRequest))
+
12905  {
+
12906  // Allocate from this pBlock.
+
12907  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
+
12908  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
+
12909  UpdateHasEmptyBlock();
+
12910  (*pAllocation)->InitBlockAllocation(
+
12911  pBestRequestBlock,
+
12912  bestRequest.offset,
+
12913  alignment,
+
12914  size,
+
12915  m_MemoryTypeIndex,
+
12916  suballocType,
+
12917  mapped,
+
12918  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+
12919  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
+
12920  VMA_DEBUG_LOG(" Returned from existing block");
+
12921  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
+
12922  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+
12923  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
12924  {
+
12925  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
12926  }
+
12927  if(IsCorruptionDetectionEnabled())
+
12928  {
+
12929  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
+
12930  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+
12931  }
+
12932  return VK_SUCCESS;
+
12933  }
+
12934  // else: Some allocations must have been touched while we are here. Next try.
+
12935  }
+
12936  else
+
12937  {
+
12938  // Could not find place in any of the blocks - break outer loop.
+
12939  break;
+
12940  }
+
12941  }
+
12942  /* Maximum number of tries exceeded - a very unlike event when many other
+
12943  threads are simultaneously touching allocations making it impossible to make
+
12944  lost at the same time as we try to allocate. */
+
12945  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
+
12946  {
+
12947  return VK_ERROR_TOO_MANY_OBJECTS;
+
12948  }
+
12949  }
+
12950 
+
12951  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12952 }
12953 
-
12954  if(hAllocation->IsPersistentMap())
-
12955  {
-
12956  pBlock->Unmap(m_hAllocator, 1);
-
12957  }
+
12954 void VmaBlockVector::Free(
+
12955  const VmaAllocation hAllocation)
+
12956 {
+
12957  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12958 
-
12959  pBlock->m_pMetadata->Free(hAllocation);
-
12960  VMA_HEAVY_ASSERT(pBlock->Validate());
-
12961 
-
12962  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
-
12963 
-
12964  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
-
12965  // pBlock became empty after this deallocation.
-
12966  if(pBlock->m_pMetadata->IsEmpty())
-
12967  {
-
12968  // Already has empty block. We don't want to have two, so delete this one.
-
12969  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
-
12970  {
-
12971  pBlockToDelete = pBlock;
-
12972  Remove(pBlock);
-
12973  }
-
12974  // else: We now have an empty block - leave it.
-
12975  }
-
12976  // pBlock didn't become empty, but we have another empty block - find and free that one.
-
12977  // (This is optional, heuristics.)
-
12978  else if(m_HasEmptyBlock && canDeleteBlock)
-
12979  {
-
12980  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
-
12981  if(pLastBlock->m_pMetadata->IsEmpty())
-
12982  {
-
12983  pBlockToDelete = pLastBlock;
-
12984  m_Blocks.pop_back();
-
12985  }
-
12986  }
-
12987 
-
12988  UpdateHasEmptyBlock();
-
12989  IncrementallySortBlocks();
-
12990  }
-
12991 
-
12992  // Destruction of a free block. Deferred until this point, outside of mutex
-
12993  // lock, for performance reason.
-
12994  if(pBlockToDelete != VMA_NULL)
-
12995  {
-
12996  VMA_DEBUG_LOG(" Deleted empty block");
-
12997  pBlockToDelete->Destroy(m_hAllocator);
-
12998  vma_delete(m_hAllocator, pBlockToDelete);
-
12999  }
-
13000 }
-
13001 
-
13002 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
-
13003 {
-
13004  VkDeviceSize result = 0;
-
13005  for(size_t i = m_Blocks.size(); i--; )
-
13006  {
-
13007  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
-
13008  if(result >= m_PreferredBlockSize)
-
13009  {
-
13010  break;
+
12959  bool budgetExceeded = false;
+
12960  {
+
12961  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
12962  VmaBudget heapBudget = {};
+
12963  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+
12964  budgetExceeded = heapBudget.usage >= heapBudget.budget;
+
12965  }
+
12966 
+
12967  // Scope for lock.
+
12968  {
+
12969  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12970 
+
12971  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
12972 
+
12973  if(IsCorruptionDetectionEnabled())
+
12974  {
+
12975  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+
12976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
+
12977  }
+
12978 
+
12979  if(hAllocation->IsPersistentMap())
+
12980  {
+
12981  pBlock->Unmap(m_hAllocator, 1);
+
12982  }
+
12983 
+
12984  pBlock->m_pMetadata->Free(hAllocation);
+
12985  VMA_HEAVY_ASSERT(pBlock->Validate());
+
12986 
+
12987  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
+
12988 
+
12989  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
+
12990  // pBlock became empty after this deallocation.
+
12991  if(pBlock->m_pMetadata->IsEmpty())
+
12992  {
+
12993  // Already has empty block. We don't want to have two, so delete this one.
+
12994  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
+
12995  {
+
12996  pBlockToDelete = pBlock;
+
12997  Remove(pBlock);
+
12998  }
+
12999  // else: We now have an empty block - leave it.
+
13000  }
+
13001  // pBlock didn't become empty, but we have another empty block - find and free that one.
+
13002  // (This is optional, heuristics.)
+
13003  else if(m_HasEmptyBlock && canDeleteBlock)
+
13004  {
+
13005  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
+
13006  if(pLastBlock->m_pMetadata->IsEmpty())
+
13007  {
+
13008  pBlockToDelete = pLastBlock;
+
13009  m_Blocks.pop_back();
+
13010  }
13011  }
-
13012  }
-
13013  return result;
-
13014 }
-
13015 
-
13016 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
-
13017 {
-
13018  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
13019  {
-
13020  if(m_Blocks[blockIndex] == pBlock)
-
13021  {
-
13022  VmaVectorRemove(m_Blocks, blockIndex);
-
13023  return;
-
13024  }
-
13025  }
-
13026  VMA_ASSERT(0);
-
13027 }
-
13028 
-
13029 void VmaBlockVector::IncrementallySortBlocks()
-
13030 {
-
13031  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-
13032  {
-
13033  // Bubble sort only until first swap.
-
13034  for(size_t i = 1; i < m_Blocks.size(); ++i)
-
13035  {
-
13036  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
-
13037  {
-
13038  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
-
13039  return;
-
13040  }
-
13041  }
-
13042  }
-
13043 }
-
13044 
-
13045 VkResult VmaBlockVector::AllocateFromBlock(
-
13046  VmaDeviceMemoryBlock* pBlock,
-
13047  uint32_t currentFrameIndex,
-
13048  VkDeviceSize size,
-
13049  VkDeviceSize alignment,
-
13050  VmaAllocationCreateFlags allocFlags,
-
13051  void* pUserData,
-
13052  VmaSuballocationType suballocType,
-
13053  uint32_t strategy,
-
13054  VmaAllocation* pAllocation)
+
13012 
+
13013  UpdateHasEmptyBlock();
+
13014  IncrementallySortBlocks();
+
13015  }
+
13016 
+
13017  // Destruction of a free block. Deferred until this point, outside of mutex
+
13018  // lock, for performance reason.
+
13019  if(pBlockToDelete != VMA_NULL)
+
13020  {
+
13021  VMA_DEBUG_LOG(" Deleted empty block");
+
13022  pBlockToDelete->Destroy(m_hAllocator);
+
13023  vma_delete(m_hAllocator, pBlockToDelete);
+
13024  }
+
13025 }
+
13026 
+
13027 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
+
13028 {
+
13029  VkDeviceSize result = 0;
+
13030  for(size_t i = m_Blocks.size(); i--; )
+
13031  {
+
13032  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
+
13033  if(result >= m_PreferredBlockSize)
+
13034  {
+
13035  break;
+
13036  }
+
13037  }
+
13038  return result;
+
13039 }
+
13040 
+
13041 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
+
13042 {
+
13043  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
13044  {
+
13045  if(m_Blocks[blockIndex] == pBlock)
+
13046  {
+
13047  VmaVectorRemove(m_Blocks, blockIndex);
+
13048  return;
+
13049  }
+
13050  }
+
13051  VMA_ASSERT(0);
+
13052 }
+
13053 
+
13054 void VmaBlockVector::IncrementallySortBlocks()
13055 {
-
13056  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
-
13057  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-
13058  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-
13059  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-
13060 
-
13061  VmaAllocationRequest currRequest = {};
-
13062  if(pBlock->m_pMetadata->CreateAllocationRequest(
-
13063  currentFrameIndex,
-
13064  m_FrameInUseCount,
-
13065  m_BufferImageGranularity,
-
13066  size,
-
13067  alignment,
-
13068  isUpperAddress,
-
13069  suballocType,
-
13070  false, // canMakeOtherLost
-
13071  strategy,
-
13072  &currRequest))
-
13073  {
-
13074  // Allocate from pCurrBlock.
-
13075  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
-
13076 
-
13077  if(mapped)
-
13078  {
-
13079  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
-
13080  if(res != VK_SUCCESS)
-
13081  {
-
13082  return res;
-
13083  }
-
13084  }
-
13085 
-
13086  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
-
13087  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
-
13088  UpdateHasEmptyBlock();
-
13089  (*pAllocation)->InitBlockAllocation(
-
13090  pBlock,
-
13091  currRequest.offset,
-
13092  alignment,
-
13093  size,
-
13094  m_MemoryTypeIndex,
-
13095  suballocType,
-
13096  mapped,
-
13097  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
-
13098  VMA_HEAVY_ASSERT(pBlock->Validate());
-
13099  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
-
13100  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-
13101  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
13102  {
-
13103  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
13104  }
-
13105  if(IsCorruptionDetectionEnabled())
-
13106  {
-
13107  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
-
13108  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+
13056  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+
13057  {
+
13058  // Bubble sort only until first swap.
+
13059  for(size_t i = 1; i < m_Blocks.size(); ++i)
+
13060  {
+
13061  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
+
13062  {
+
13063  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
+
13064  return;
+
13065  }
+
13066  }
+
13067  }
+
13068 }
+
13069 
+
13070 VkResult VmaBlockVector::AllocateFromBlock(
+
13071  VmaDeviceMemoryBlock* pBlock,
+
13072  uint32_t currentFrameIndex,
+
13073  VkDeviceSize size,
+
13074  VkDeviceSize alignment,
+
13075  VmaAllocationCreateFlags allocFlags,
+
13076  void* pUserData,
+
13077  VmaSuballocationType suballocType,
+
13078  uint32_t strategy,
+
13079  VmaAllocation* pAllocation)
+
13080 {
+
13081  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
+
13082  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+
13083  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+
13084  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+
13085 
+
13086  VmaAllocationRequest currRequest = {};
+
13087  if(pBlock->m_pMetadata->CreateAllocationRequest(
+
13088  currentFrameIndex,
+
13089  m_FrameInUseCount,
+
13090  m_BufferImageGranularity,
+
13091  size,
+
13092  alignment,
+
13093  isUpperAddress,
+
13094  suballocType,
+
13095  false, // canMakeOtherLost
+
13096  strategy,
+
13097  &currRequest))
+
13098  {
+
13099  // Allocate from pCurrBlock.
+
13100  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
+
13101 
+
13102  if(mapped)
+
13103  {
+
13104  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
+
13105  if(res != VK_SUCCESS)
+
13106  {
+
13107  return res;
+
13108  }
13109  }
-
13110  return VK_SUCCESS;
-
13111  }
-
13112  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
13113 }
-
13114 
-
13115 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
-
13116 {
-
13117  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-
13118  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
-
13119  allocInfo.allocationSize = blockSize;
-
13120 
-
13121 #if VMA_BUFFER_DEVICE_ADDRESS
-
13122  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
-
13123  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-
13124  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
-
13125  {
-
13126  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-
13127  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-
13128  }
-
13129 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
-
13130 
-
13131  VkDeviceMemory mem = VK_NULL_HANDLE;
-
13132  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
-
13133  if(res < 0)
-
13134  {
-
13135  return res;
+
13110 
+
13111  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
+
13112  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
+
13113  UpdateHasEmptyBlock();
+
13114  (*pAllocation)->InitBlockAllocation(
+
13115  pBlock,
+
13116  currRequest.offset,
+
13117  alignment,
+
13118  size,
+
13119  m_MemoryTypeIndex,
+
13120  suballocType,
+
13121  mapped,
+
13122  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+
13123  VMA_HEAVY_ASSERT(pBlock->Validate());
+
13124  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
+
13125  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+
13126  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
13127  {
+
13128  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
13129  }
+
13130  if(IsCorruptionDetectionEnabled())
+
13131  {
+
13132  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
+
13133  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+
13134  }
+
13135  return VK_SUCCESS;
13136  }
-
13137 
-
13138  // New VkDeviceMemory successfully created.
+
13137  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
13138 }
13139 
-
13140  // Create new Allocation for it.
-
13141  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
-
13142  pBlock->Init(
-
13143  m_hAllocator,
-
13144  m_hParentPool,
-
13145  m_MemoryTypeIndex,
-
13146  mem,
-
13147  allocInfo.allocationSize,
-
13148  m_NextBlockId++,
-
13149  m_Algorithm);
-
13150 
-
13151  m_Blocks.push_back(pBlock);
-
13152  if(pNewBlockIndex != VMA_NULL)
-
13153  {
-
13154  *pNewBlockIndex = m_Blocks.size() - 1;
-
13155  }
-
13156 
-
13157  return VK_SUCCESS;
-
13158 }
-
13159 
-
13160 void VmaBlockVector::ApplyDefragmentationMovesCpu(
-
13161  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
13162  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
-
13163 {
-
13164  const size_t blockCount = m_Blocks.size();
-
13165  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
-
13166 
-
13167  enum BLOCK_FLAG
-
13168  {
-
13169  BLOCK_FLAG_USED = 0x00000001,
-
13170  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
-
13171  };
-
13172 
-
13173  struct BlockInfo
-
13174  {
-
13175  uint32_t flags;
-
13176  void* pMappedData;
-
13177  };
-
13178  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
-
13179  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
-
13180  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
+
13140 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
+
13141 {
+
13142  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+
13143  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
+
13144  allocInfo.allocationSize = blockSize;
+
13145 
+
13146 #if VMA_BUFFER_DEVICE_ADDRESS
+
13147  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
+
13148  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+
13149  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
+
13150  {
+
13151  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+
13152  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+
13153  }
+
13154 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
+
13155 
+
13156  VkDeviceMemory mem = VK_NULL_HANDLE;
+
13157  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
+
13158  if(res < 0)
+
13159  {
+
13160  return res;
+
13161  }
+
13162 
+
13163  // New VkDeviceMemory successfully created.
+
13164 
+
13165  // Create new Allocation for it.
+
13166  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
+
13167  pBlock->Init(
+
13168  m_hAllocator,
+
13169  m_hParentPool,
+
13170  m_MemoryTypeIndex,
+
13171  mem,
+
13172  allocInfo.allocationSize,
+
13173  m_NextBlockId++,
+
13174  m_Algorithm);
+
13175 
+
13176  m_Blocks.push_back(pBlock);
+
13177  if(pNewBlockIndex != VMA_NULL)
+
13178  {
+
13179  *pNewBlockIndex = m_Blocks.size() - 1;
+
13180  }
13181 
-
13182  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
-
13183  const size_t moveCount = moves.size();
-
13184  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
13185  {
-
13186  const VmaDefragmentationMove& move = moves[moveIndex];
-
13187  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
-
13188  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
-
13189  }
-
13190 
-
13191  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
-
13192 
-
13193  // Go over all blocks. Get mapped pointer or map if necessary.
-
13194  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
13195  {
-
13196  BlockInfo& currBlockInfo = blockInfo[blockIndex];
-
13197  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
13198  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
-
13199  {
-
13200  currBlockInfo.pMappedData = pBlock->GetMappedData();
-
13201  // It is not originally mapped - map it.
-
13202  if(currBlockInfo.pMappedData == VMA_NULL)
-
13203  {
-
13204  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
-
13205  if(pDefragCtx->res == VK_SUCCESS)
-
13206  {
-
13207  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
-
13208  }
-
13209  }
-
13210  }
-
13211  }
-
13212 
-
13213  // Go over all moves. Do actual data transfer.
-
13214  if(pDefragCtx->res == VK_SUCCESS)
-
13215  {
-
13216  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-
13217  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
-
13218 
-
13219  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
13220  {
-
13221  const VmaDefragmentationMove& move = moves[moveIndex];
-
13222 
-
13223  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
-
13224  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
-
13225 
-
13226  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
-
13227 
-
13228  // Invalidate source.
-
13229  if(isNonCoherent)
-
13230  {
-
13231  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
-
13232  memRange.memory = pSrcBlock->GetDeviceMemory();
-
13233  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
-
13234  memRange.size = VMA_MIN(
-
13235  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
-
13236  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
-
13237  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
13238  }
-
13239 
-
13240  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
-
13241  memmove(
-
13242  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
-
13243  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
-
13244  static_cast<size_t>(move.size));
-
13245 
-
13246  if(IsCorruptionDetectionEnabled())
-
13247  {
-
13248  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
-
13249  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
-
13250  }
-
13251 
-
13252  // Flush destination.
-
13253  if(isNonCoherent)
-
13254  {
-
13255  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
-
13256  memRange.memory = pDstBlock->GetDeviceMemory();
-
13257  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
-
13258  memRange.size = VMA_MIN(
-
13259  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
-
13260  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
-
13261  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
13262  }
-
13263  }
-
13264  }
-
13265 
-
13266  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
-
13267  // Regardless of pCtx->res == VK_SUCCESS.
-
13268  for(size_t blockIndex = blockCount; blockIndex--; )
-
13269  {
-
13270  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
-
13271  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
-
13272  {
-
13273  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
13274  pBlock->Unmap(m_hAllocator, 1);
-
13275  }
-
13276  }
-
13277 }
-
13278 
-
13279 void VmaBlockVector::ApplyDefragmentationMovesGpu(
-
13280  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
13281  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13282  VkCommandBuffer commandBuffer)
-
13283 {
-
13284  const size_t blockCount = m_Blocks.size();
-
13285 
-
13286  pDefragCtx->blockContexts.resize(blockCount);
-
13287  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
-
13288 
-
13289  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
-
13290  const size_t moveCount = moves.size();
-
13291  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
13292  {
-
13293  const VmaDefragmentationMove& move = moves[moveIndex];
-
13294 
-
13295  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
-
13296  {
-
13297  // Old school move still require us to map the whole block
-
13298  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
-
13299  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
13182  return VK_SUCCESS;
+
13183 }
+
13184 
+
13185 void VmaBlockVector::ApplyDefragmentationMovesCpu(
+
13186  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
13187  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
+
13188 {
+
13189  const size_t blockCount = m_Blocks.size();
+
13190  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
+
13191 
+
13192  enum BLOCK_FLAG
+
13193  {
+
13194  BLOCK_FLAG_USED = 0x00000001,
+
13195  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
+
13196  };
+
13197 
+
13198  struct BlockInfo
+
13199  {
+
13200  uint32_t flags;
+
13201  void* pMappedData;
+
13202  };
+
13203  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
+
13204  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
+
13205  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
+
13206 
+
13207  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+
13208  const size_t moveCount = moves.size();
+
13209  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
13210  {
+
13211  const VmaDefragmentationMove& move = moves[moveIndex];
+
13212  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
+
13213  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
+
13214  }
+
13215 
+
13216  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
13217 
+
13218  // Go over all blocks. Get mapped pointer or map if necessary.
+
13219  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
13220  {
+
13221  BlockInfo& currBlockInfo = blockInfo[blockIndex];
+
13222  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
13223  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
+
13224  {
+
13225  currBlockInfo.pMappedData = pBlock->GetMappedData();
+
13226  // It is not originally mapped - map it.
+
13227  if(currBlockInfo.pMappedData == VMA_NULL)
+
13228  {
+
13229  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
+
13230  if(pDefragCtx->res == VK_SUCCESS)
+
13231  {
+
13232  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
+
13233  }
+
13234  }
+
13235  }
+
13236  }
+
13237 
+
13238  // Go over all moves. Do actual data transfer.
+
13239  if(pDefragCtx->res == VK_SUCCESS)
+
13240  {
+
13241  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+
13242  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+
13243 
+
13244  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
13245  {
+
13246  const VmaDefragmentationMove& move = moves[moveIndex];
+
13247 
+
13248  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
+
13249  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
+
13250 
+
13251  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
+
13252 
+
13253  // Invalidate source.
+
13254  if(isNonCoherent)
+
13255  {
+
13256  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
+
13257  memRange.memory = pSrcBlock->GetDeviceMemory();
+
13258  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
+
13259  memRange.size = VMA_MIN(
+
13260  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
+
13261  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
+
13262  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
13263  }
+
13264 
+
13265  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
+
13266  memmove(
+
13267  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
+
13268  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
+
13269  static_cast<size_t>(move.size));
+
13270 
+
13271  if(IsCorruptionDetectionEnabled())
+
13272  {
+
13273  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
+
13274  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
+
13275  }
+
13276 
+
13277  // Flush destination.
+
13278  if(isNonCoherent)
+
13279  {
+
13280  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
+
13281  memRange.memory = pDstBlock->GetDeviceMemory();
+
13282  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
+
13283  memRange.size = VMA_MIN(
+
13284  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
+
13285  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
+
13286  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
13287  }
+
13288  }
+
13289  }
+
13290 
+
13291  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
+
13292  // Regardless of pCtx->res == VK_SUCCESS.
+
13293  for(size_t blockIndex = blockCount; blockIndex--; )
+
13294  {
+
13295  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
+
13296  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
+
13297  {
+
13298  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
13299  pBlock->Unmap(m_hAllocator, 1);
13300  }
13301  }
-
13302 
-
13303  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
-
13304 
-
13305  // Go over all blocks. Create and bind buffer for whole block if necessary.
-
13306  {
-
13307  VkBufferCreateInfo bufCreateInfo;
-
13308  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
-
13309 
-
13310  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
13311  {
-
13312  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
-
13313  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
13314  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
-
13315  {
-
13316  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
-
13317  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
-
13318  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
-
13319  if(pDefragCtx->res == VK_SUCCESS)
-
13320  {
-
13321  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
-
13322  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
-
13323  }
-
13324  }
+
13302 }
+
13303 
+
13304 void VmaBlockVector::ApplyDefragmentationMovesGpu(
+
13305  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
13306  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
13307  VkCommandBuffer commandBuffer)
+
13308 {
+
13309  const size_t blockCount = m_Blocks.size();
+
13310 
+
13311  pDefragCtx->blockContexts.resize(blockCount);
+
13312  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
+
13313 
+
13314  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+
13315  const size_t moveCount = moves.size();
+
13316  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
13317  {
+
13318  const VmaDefragmentationMove& move = moves[moveIndex];
+
13319 
+
13320  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
+
13321  {
+
13322  // Old school move still require us to map the whole block
+
13323  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
13324  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13325  }
13326  }
13327 
-
13328  // Go over all moves. Post data transfer commands to command buffer.
-
13329  if(pDefragCtx->res == VK_SUCCESS)
-
13330  {
-
13331  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
13332  {
-
13333  const VmaDefragmentationMove& move = moves[moveIndex];
+
13328  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
13329 
+
13330  // Go over all blocks. Create and bind buffer for whole block if necessary.
+
13331  {
+
13332  VkBufferCreateInfo bufCreateInfo;
+
13333  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13334 
-
13335  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
-
13336  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
-
13337 
-
13338  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
-
13339 
-
13340  VkBufferCopy region = {
-
13341  move.srcOffset,
-
13342  move.dstOffset,
-
13343  move.size };
-
13344  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
-
13345  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
-
13346  }
-
13347  }
-
13348 
-
13349  // Save buffers to defrag context for later destruction.
-
13350  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
-
13351  {
-
13352  pDefragCtx->res = VK_NOT_READY;
-
13353  }
-
13354 }
-
13355 
-
13356 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
-
13357 {
-
13358  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
13359  {
-
13360  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
13361  if(pBlock->m_pMetadata->IsEmpty())
-
13362  {
-
13363  if(m_Blocks.size() > m_MinBlockCount)
-
13364  {
-
13365  if(pDefragmentationStats != VMA_NULL)
-
13366  {
-
13367  ++pDefragmentationStats->deviceMemoryBlocksFreed;
-
13368  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
-
13369  }
-
13370 
-
13371  VmaVectorRemove(m_Blocks, blockIndex);
-
13372  pBlock->Destroy(m_hAllocator);
-
13373  vma_delete(m_hAllocator, pBlock);
-
13374  }
-
13375  else
-
13376  {
-
13377  break;
-
13378  }
-
13379  }
-
13380  }
-
13381  UpdateHasEmptyBlock();
-
13382 }
-
13383 
-
13384 void VmaBlockVector::UpdateHasEmptyBlock()
-
13385 {
-
13386  m_HasEmptyBlock = false;
-
13387  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
-
13388  {
-
13389  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
-
13390  if(pBlock->m_pMetadata->IsEmpty())
-
13391  {
-
13392  m_HasEmptyBlock = true;
-
13393  break;
-
13394  }
-
13395  }
-
13396 }
-
13397 
-
13398 #if VMA_STATS_STRING_ENABLED
-
13399 
-
13400 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
-
13401 {
-
13402  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13403 
-
13404  json.BeginObject();
-
13405 
-
13406  if(IsCustomPool())
-
13407  {
-
13408  const char* poolName = m_hParentPool->GetName();
-
13409  if(poolName != VMA_NULL && poolName[0] != '\0')
-
13410  {
-
13411  json.WriteString("Name");
-
13412  json.WriteString(poolName);
-
13413  }
-
13414 
-
13415  json.WriteString("MemoryTypeIndex");
-
13416  json.WriteNumber(m_MemoryTypeIndex);
-
13417 
-
13418  json.WriteString("BlockSize");
-
13419  json.WriteNumber(m_PreferredBlockSize);
-
13420 
-
13421  json.WriteString("BlockCount");
-
13422  json.BeginObject(true);
-
13423  if(m_MinBlockCount > 0)
-
13424  {
-
13425  json.WriteString("Min");
-
13426  json.WriteNumber((uint64_t)m_MinBlockCount);
-
13427  }
-
13428  if(m_MaxBlockCount < SIZE_MAX)
-
13429  {
-
13430  json.WriteString("Max");
-
13431  json.WriteNumber((uint64_t)m_MaxBlockCount);
-
13432  }
-
13433  json.WriteString("Cur");
-
13434  json.WriteNumber((uint64_t)m_Blocks.size());
-
13435  json.EndObject();
-
13436 
-
13437  if(m_FrameInUseCount > 0)
-
13438  {
-
13439  json.WriteString("FrameInUseCount");
-
13440  json.WriteNumber(m_FrameInUseCount);
-
13441  }
+
13335  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
13336  {
+
13337  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
+
13338  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
13339  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
+
13340  {
+
13341  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
+
13342  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
+
13343  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
+
13344  if(pDefragCtx->res == VK_SUCCESS)
+
13345  {
+
13346  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
+
13347  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
+
13348  }
+
13349  }
+
13350  }
+
13351  }
+
13352 
+
13353  // Go over all moves. Post data transfer commands to command buffer.
+
13354  if(pDefragCtx->res == VK_SUCCESS)
+
13355  {
+
13356  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
13357  {
+
13358  const VmaDefragmentationMove& move = moves[moveIndex];
+
13359 
+
13360  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
+
13361  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
+
13362 
+
13363  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
+
13364 
+
13365  VkBufferCopy region = {
+
13366  move.srcOffset,
+
13367  move.dstOffset,
+
13368  move.size };
+
13369  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
+
13370  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
+
13371  }
+
13372  }
+
13373 
+
13374  // Save buffers to defrag context for later destruction.
+
13375  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
+
13376  {
+
13377  pDefragCtx->res = VK_NOT_READY;
+
13378  }
+
13379 }
+
13380 
+
13381 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
+
13382 {
+
13383  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
13384  {
+
13385  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
13386  if(pBlock->m_pMetadata->IsEmpty())
+
13387  {
+
13388  if(m_Blocks.size() > m_MinBlockCount)
+
13389  {
+
13390  if(pDefragmentationStats != VMA_NULL)
+
13391  {
+
13392  ++pDefragmentationStats->deviceMemoryBlocksFreed;
+
13393  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
+
13394  }
+
13395 
+
13396  VmaVectorRemove(m_Blocks, blockIndex);
+
13397  pBlock->Destroy(m_hAllocator);
+
13398  vma_delete(m_hAllocator, pBlock);
+
13399  }
+
13400  else
+
13401  {
+
13402  break;
+
13403  }
+
13404  }
+
13405  }
+
13406  UpdateHasEmptyBlock();
+
13407 }
+
13408 
+
13409 void VmaBlockVector::UpdateHasEmptyBlock()
+
13410 {
+
13411  m_HasEmptyBlock = false;
+
13412  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
+
13413  {
+
13414  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
+
13415  if(pBlock->m_pMetadata->IsEmpty())
+
13416  {
+
13417  m_HasEmptyBlock = true;
+
13418  break;
+
13419  }
+
13420  }
+
13421 }
+
13422 
+
13423 #if VMA_STATS_STRING_ENABLED
+
13424 
+
13425 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
+
13426 {
+
13427  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13428 
+
13429  json.BeginObject();
+
13430 
+
13431  if(IsCustomPool())
+
13432  {
+
13433  const char* poolName = m_hParentPool->GetName();
+
13434  if(poolName != VMA_NULL && poolName[0] != '\0')
+
13435  {
+
13436  json.WriteString("Name");
+
13437  json.WriteString(poolName);
+
13438  }
+
13439 
+
13440  json.WriteString("MemoryTypeIndex");
+
13441  json.WriteNumber(m_MemoryTypeIndex);
13442 
-
13443  if(m_Algorithm != 0)
-
13444  {
-
13445  json.WriteString("Algorithm");
-
13446  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
-
13447  }
-
13448  }
-
13449  else
-
13450  {
-
13451  json.WriteString("PreferredBlockSize");
-
13452  json.WriteNumber(m_PreferredBlockSize);
-
13453  }
-
13454 
-
13455  json.WriteString("Blocks");
-
13456  json.BeginObject();
-
13457  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
13458  {
-
13459  json.BeginString();
-
13460  json.ContinueString(m_Blocks[i]->GetId());
-
13461  json.EndString();
-
13462 
-
13463  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
-
13464  }
-
13465  json.EndObject();
-
13466 
-
13467  json.EndObject();
-
13468 }
-
13469 
-
13470 #endif // #if VMA_STATS_STRING_ENABLED
-
13471 
-
13472 void VmaBlockVector::Defragment(
-
13473  class VmaBlockVectorDefragmentationContext* pCtx,
- -
13475  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
-
13476  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
-
13477  VkCommandBuffer commandBuffer)
-
13478 {
-
13479  pCtx->res = VK_SUCCESS;
-
13480 
-
13481  const VkMemoryPropertyFlags memPropFlags =
-
13482  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
-
13483  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
-
13484 
-
13485  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
-
13486  isHostVisible;
-
13487  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
-
13488  !IsCorruptionDetectionEnabled() &&
-
13489  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
-
13490 
-
13491  // There are options to defragment this memory type.
-
13492  if(canDefragmentOnCpu || canDefragmentOnGpu)
-
13493  {
-
13494  bool defragmentOnGpu;
-
13495  // There is only one option to defragment this memory type.
-
13496  if(canDefragmentOnGpu != canDefragmentOnCpu)
-
13497  {
-
13498  defragmentOnGpu = canDefragmentOnGpu;
-
13499  }
-
13500  // Both options are available: Heuristics to choose the best one.
-
13501  else
-
13502  {
-
13503  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
-
13504  m_hAllocator->IsIntegratedGpu();
-
13505  }
-
13506 
-
13507  bool overlappingMoveSupported = !defragmentOnGpu;
-
13508 
-
13509  if(m_hAllocator->m_UseMutex)
-
13510  {
- -
13512  {
-
13513  if(!m_Mutex.TryLockWrite())
-
13514  {
-
13515  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
-
13516  return;
-
13517  }
-
13518  }
-
13519  else
-
13520  {
-
13521  m_Mutex.LockWrite();
-
13522  pCtx->mutexLocked = true;
-
13523  }
+
13443  json.WriteString("BlockSize");
+
13444  json.WriteNumber(m_PreferredBlockSize);
+
13445 
+
13446  json.WriteString("BlockCount");
+
13447  json.BeginObject(true);
+
13448  if(m_MinBlockCount > 0)
+
13449  {
+
13450  json.WriteString("Min");
+
13451  json.WriteNumber((uint64_t)m_MinBlockCount);
+
13452  }
+
13453  if(m_MaxBlockCount < SIZE_MAX)
+
13454  {
+
13455  json.WriteString("Max");
+
13456  json.WriteNumber((uint64_t)m_MaxBlockCount);
+
13457  }
+
13458  json.WriteString("Cur");
+
13459  json.WriteNumber((uint64_t)m_Blocks.size());
+
13460  json.EndObject();
+
13461 
+
13462  if(m_FrameInUseCount > 0)
+
13463  {
+
13464  json.WriteString("FrameInUseCount");
+
13465  json.WriteNumber(m_FrameInUseCount);
+
13466  }
+
13467 
+
13468  if(m_Algorithm != 0)
+
13469  {
+
13470  json.WriteString("Algorithm");
+
13471  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
+
13472  }
+
13473  }
+
13474  else
+
13475  {
+
13476  json.WriteString("PreferredBlockSize");
+
13477  json.WriteNumber(m_PreferredBlockSize);
+
13478  }
+
13479 
+
13480  json.WriteString("Blocks");
+
13481  json.BeginObject();
+
13482  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
13483  {
+
13484  json.BeginString();
+
13485  json.ContinueString(m_Blocks[i]->GetId());
+
13486  json.EndString();
+
13487 
+
13488  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
+
13489  }
+
13490  json.EndObject();
+
13491 
+
13492  json.EndObject();
+
13493 }
+
13494 
+
13495 #endif // #if VMA_STATS_STRING_ENABLED
+
13496 
+
13497 void VmaBlockVector::Defragment(
+
13498  class VmaBlockVectorDefragmentationContext* pCtx,
+ +
13500  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
+
13501  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
+
13502  VkCommandBuffer commandBuffer)
+
13503 {
+
13504  pCtx->res = VK_SUCCESS;
+
13505 
+
13506  const VkMemoryPropertyFlags memPropFlags =
+
13507  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
+
13508  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
+
13509 
+
13510  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
+
13511  isHostVisible;
+
13512  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
+
13513  !IsCorruptionDetectionEnabled() &&
+
13514  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
+
13515 
+
13516  // There are options to defragment this memory type.
+
13517  if(canDefragmentOnCpu || canDefragmentOnGpu)
+
13518  {
+
13519  bool defragmentOnGpu;
+
13520  // There is only one option to defragment this memory type.
+
13521  if(canDefragmentOnGpu != canDefragmentOnCpu)
+
13522  {
+
13523  defragmentOnGpu = canDefragmentOnGpu;
13524  }
-
13525 
-
13526  pCtx->Begin(overlappingMoveSupported, flags);
-
13527 
-
13528  // Defragment.
-
13529 
-
13530  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
-
13531  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
-
13532  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
+
13525  // Both options are available: Heuristics to choose the best one.
+
13526  else
+
13527  {
+
13528  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
+
13529  m_hAllocator->IsIntegratedGpu();
+
13530  }
+
13531 
+
13532  bool overlappingMoveSupported = !defragmentOnGpu;
13533 
-
13534  // Accumulate statistics.
-
13535  if(pStats != VMA_NULL)
-
13536  {
-
13537  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
-
13538  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
-
13539  pStats->bytesMoved += bytesMoved;
-
13540  pStats->allocationsMoved += allocationsMoved;
-
13541  VMA_ASSERT(bytesMoved <= maxBytesToMove);
-
13542  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
-
13543  if(defragmentOnGpu)
-
13544  {
-
13545  maxGpuBytesToMove -= bytesMoved;
-
13546  maxGpuAllocationsToMove -= allocationsMoved;
-
13547  }
-
13548  else
-
13549  {
-
13550  maxCpuBytesToMove -= bytesMoved;
-
13551  maxCpuAllocationsToMove -= allocationsMoved;
-
13552  }
-
13553  }
+
13534  if(m_hAllocator->m_UseMutex)
+
13535  {
+ +
13537  {
+
13538  if(!m_Mutex.TryLockWrite())
+
13539  {
+
13540  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
+
13541  return;
+
13542  }
+
13543  }
+
13544  else
+
13545  {
+
13546  m_Mutex.LockWrite();
+
13547  pCtx->mutexLocked = true;
+
13548  }
+
13549  }
+
13550 
+
13551  pCtx->Begin(overlappingMoveSupported, flags);
+
13552 
+
13553  // Defragment.
13554 
- -
13556  {
-
13557  if(m_hAllocator->m_UseMutex)
-
13558  m_Mutex.UnlockWrite();
-
13559 
-
13560  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
-
13561  pCtx->res = VK_NOT_READY;
-
13562 
-
13563  return;
-
13564  }
-
13565 
-
13566  if(pCtx->res >= VK_SUCCESS)
-
13567  {
+
13555  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
+
13556  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
+
13557  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
+
13558 
+
13559  // Accumulate statistics.
+
13560  if(pStats != VMA_NULL)
+
13561  {
+
13562  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
+
13563  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
+
13564  pStats->bytesMoved += bytesMoved;
+
13565  pStats->allocationsMoved += allocationsMoved;
+
13566  VMA_ASSERT(bytesMoved <= maxBytesToMove);
+
13567  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13568  if(defragmentOnGpu)
13569  {
-
13570  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
-
13571  }
-
13572  else
-
13573  {
-
13574  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
-
13575  }
-
13576  }
-
13577  }
-
13578 }
+
13570  maxGpuBytesToMove -= bytesMoved;
+
13571  maxGpuAllocationsToMove -= allocationsMoved;
+
13572  }
+
13573  else
+
13574  {
+
13575  maxCpuBytesToMove -= bytesMoved;
+
13576  maxCpuAllocationsToMove -= allocationsMoved;
+
13577  }
+
13578  }
13579 
-
13580 void VmaBlockVector::DefragmentationEnd(
-
13581  class VmaBlockVectorDefragmentationContext* pCtx,
-
13582  uint32_t flags,
-
13583  VmaDefragmentationStats* pStats)
-
13584 {
-
13585  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
-
13586  {
-
13587  VMA_ASSERT(pCtx->mutexLocked == false);
-
13588 
-
13589  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
-
13590  // lock protecting us. Since we mutate state here, we have to take the lock out now
-
13591  m_Mutex.LockWrite();
-
13592  pCtx->mutexLocked = true;
-
13593  }
-
13594 
-
13595  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
-
13596  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
-
13597  {
-
13598  // Destroy buffers.
-
13599  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
-
13600  {
-
13601  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
-
13602  if(blockCtx.hBuffer)
-
13603  {
-
13604  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
-
13605  }
-
13606  }
-
13607 
-
13608  if(pCtx->res >= VK_SUCCESS)
-
13609  {
-
13610  FreeEmptyBlocks(pStats);
-
13611  }
-
13612  }
+ +
13581  {
+
13582  if(m_hAllocator->m_UseMutex)
+
13583  m_Mutex.UnlockWrite();
+
13584 
+
13585  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
+
13586  pCtx->res = VK_NOT_READY;
+
13587 
+
13588  return;
+
13589  }
+
13590 
+
13591  if(pCtx->res >= VK_SUCCESS)
+
13592  {
+
13593  if(defragmentOnGpu)
+
13594  {
+
13595  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
+
13596  }
+
13597  else
+
13598  {
+
13599  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
+
13600  }
+
13601  }
+
13602  }
+
13603 }
+
13604 
+
13605 void VmaBlockVector::DefragmentationEnd(
+
13606  class VmaBlockVectorDefragmentationContext* pCtx,
+
13607  uint32_t flags,
+
13608  VmaDefragmentationStats* pStats)
+
13609 {
+
13610  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
+
13611  {
+
13612  VMA_ASSERT(pCtx->mutexLocked == false);
13613 
-
13614  if(pCtx->mutexLocked)
-
13615  {
-
13616  VMA_ASSERT(m_hAllocator->m_UseMutex);
-
13617  m_Mutex.UnlockWrite();
+
13614  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
+
13615  // lock protecting us. Since we mutate state here, we have to take the lock out now
+
13616  m_Mutex.LockWrite();
+
13617  pCtx->mutexLocked = true;
13618  }
-
13619 }
-
13620 
-
13621 uint32_t VmaBlockVector::ProcessDefragmentations(
-
13622  class VmaBlockVectorDefragmentationContext *pCtx,
-
13623  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
-
13624 {
-
13625  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13626 
-
13627  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
-
13628 
-
13629  for(uint32_t i = 0; i < moveCount; ++ i)
-
13630  {
-
13631  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
+
13619 
+
13620  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
+
13621  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
+
13622  {
+
13623  // Destroy buffers.
+
13624  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
+
13625  {
+
13626  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
+
13627  if(blockCtx.hBuffer)
+
13628  {
+
13629  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
+
13630  }
+
13631  }
13632 
-
13633  pMove->allocation = move.hAllocation;
-
13634  pMove->memory = move.pDstBlock->GetDeviceMemory();
-
13635  pMove->offset = move.dstOffset;
-
13636 
-
13637  ++ pMove;
-
13638  }
-
13639 
-
13640  pCtx->defragmentationMovesProcessed += moveCount;
-
13641 
-
13642  return moveCount;
-
13643 }
-
13644 
-
13645 void VmaBlockVector::CommitDefragmentations(
-
13646  class VmaBlockVectorDefragmentationContext *pCtx,
-
13647  VmaDefragmentationStats* pStats)
-
13648 {
-
13649  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13650 
-
13651  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
-
13652  {
-
13653  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
-
13654 
-
13655  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
-
13656  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
-
13657  }
-
13658 
-
13659  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
-
13660  FreeEmptyBlocks(pStats);
-
13661 }
-
13662 
-
13663 size_t VmaBlockVector::CalcAllocationCount() const
-
13664 {
-
13665  size_t result = 0;
-
13666  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
13667  {
-
13668  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
-
13669  }
-
13670  return result;
-
13671 }
-
13672 
-
13673 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
-
13674 {
-
13675  if(m_BufferImageGranularity == 1)
-
13676  {
-
13677  return false;
-
13678  }
-
13679  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
-
13680  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
-
13681  {
-
13682  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
-
13683  VMA_ASSERT(m_Algorithm == 0);
-
13684  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
-
13685  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
-
13686  {
-
13687  return true;
-
13688  }
-
13689  }
-
13690  return false;
-
13691 }
-
13692 
-
13693 void VmaBlockVector::MakePoolAllocationsLost(
-
13694  uint32_t currentFrameIndex,
-
13695  size_t* pLostAllocationCount)
-
13696 {
-
13697  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13698  size_t lostAllocationCount = 0;
-
13699  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
13700  {
-
13701  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
13702  VMA_ASSERT(pBlock);
-
13703  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
-
13704  }
-
13705  if(pLostAllocationCount != VMA_NULL)
+
13633  if(pCtx->res >= VK_SUCCESS)
+
13634  {
+
13635  FreeEmptyBlocks(pStats);
+
13636  }
+
13637  }
+
13638 
+
13639  if(pCtx->mutexLocked)
+
13640  {
+
13641  VMA_ASSERT(m_hAllocator->m_UseMutex);
+
13642  m_Mutex.UnlockWrite();
+
13643  }
+
13644 }
+
13645 
+
13646 uint32_t VmaBlockVector::ProcessDefragmentations(
+
13647  class VmaBlockVectorDefragmentationContext *pCtx,
+
13648  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
+
13649 {
+
13650  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13651 
+
13652  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
+
13653 
+
13654  for(uint32_t i = 0; i < moveCount; ++ i)
+
13655  {
+
13656  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
+
13657 
+
13658  pMove->allocation = move.hAllocation;
+
13659  pMove->memory = move.pDstBlock->GetDeviceMemory();
+
13660  pMove->offset = move.dstOffset;
+
13661 
+
13662  ++ pMove;
+
13663  }
+
13664 
+
13665  pCtx->defragmentationMovesProcessed += moveCount;
+
13666 
+
13667  return moveCount;
+
13668 }
+
13669 
+
13670 void VmaBlockVector::CommitDefragmentations(
+
13671  class VmaBlockVectorDefragmentationContext *pCtx,
+
13672  VmaDefragmentationStats* pStats)
+
13673 {
+
13674  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13675 
+
13676  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
+
13677  {
+
13678  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
+
13679 
+
13680  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
+
13681  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
+
13682  }
+
13683 
+
13684  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
+
13685  FreeEmptyBlocks(pStats);
+
13686 }
+
13687 
+
13688 size_t VmaBlockVector::CalcAllocationCount() const
+
13689 {
+
13690  size_t result = 0;
+
13691  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
13692  {
+
13693  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
+
13694  }
+
13695  return result;
+
13696 }
+
13697 
+
13698 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
+
13699 {
+
13700  if(m_BufferImageGranularity == 1)
+
13701  {
+
13702  return false;
+
13703  }
+
13704  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
+
13705  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13706  {
-
13707  *pLostAllocationCount = lostAllocationCount;
-
13708  }
-
13709 }
-
13710 
-
13711 VkResult VmaBlockVector::CheckCorruption()
-
13712 {
-
13713  if(!IsCorruptionDetectionEnabled())
-
13714  {
-
13715  return VK_ERROR_FEATURE_NOT_PRESENT;
-
13716  }
+
13707  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
+
13708  VMA_ASSERT(m_Algorithm == 0);
+
13709  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
+
13710  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
+
13711  {
+
13712  return true;
+
13713  }
+
13714  }
+
13715  return false;
+
13716 }
13717 
-
13718  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13719  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
13720  {
-
13721  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
13722  VMA_ASSERT(pBlock);
-
13723  VkResult res = pBlock->CheckCorruption(m_hAllocator);
-
13724  if(res != VK_SUCCESS)
-
13725  {
-
13726  return res;
-
13727  }
-
13728  }
-
13729  return VK_SUCCESS;
-
13730 }
-
13731 
-
13732 void VmaBlockVector::AddStats(VmaStats* pStats)
-
13733 {
-
13734  const uint32_t memTypeIndex = m_MemoryTypeIndex;
-
13735  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
-
13736 
-
13737  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
13738 
-
13739  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
13740  {
-
13741  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
13742  VMA_ASSERT(pBlock);
-
13743  VMA_HEAVY_ASSERT(pBlock->Validate());
-
13744  VmaStatInfo allocationStatInfo;
-
13745  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
-
13746  VmaAddStatInfo(pStats->total, allocationStatInfo);
-
13747  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
-
13748  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
-
13749  }
-
13750 }
-
13751 
-
13753 // VmaDefragmentationAlgorithm_Generic members definition
-
13754 
-
13755 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
-
13756  VmaAllocator hAllocator,
-
13757  VmaBlockVector* pBlockVector,
-
13758  uint32_t currentFrameIndex,
-
13759  bool overlappingMoveSupported) :
-
13760  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-
13761  m_AllocationCount(0),
-
13762  m_AllAllocations(false),
-
13763  m_BytesMoved(0),
-
13764  m_AllocationsMoved(0),
-
13765  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
-
13766 {
-
13767  // Create block info for each block.
-
13768  const size_t blockCount = m_pBlockVector->m_Blocks.size();
-
13769  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
13770  {
-
13771  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
-
13772  pBlockInfo->m_OriginalBlockIndex = blockIndex;
-
13773  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
-
13774  m_Blocks.push_back(pBlockInfo);
-
13775  }
+
13718 void VmaBlockVector::MakePoolAllocationsLost(
+
13719  uint32_t currentFrameIndex,
+
13720  size_t* pLostAllocationCount)
+
13721 {
+
13722  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13723  size_t lostAllocationCount = 0;
+
13724  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
13725  {
+
13726  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
13727  VMA_ASSERT(pBlock);
+
13728  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
+
13729  }
+
13730  if(pLostAllocationCount != VMA_NULL)
+
13731  {
+
13732  *pLostAllocationCount = lostAllocationCount;
+
13733  }
+
13734 }
+
13735 
+
13736 VkResult VmaBlockVector::CheckCorruption()
+
13737 {
+
13738  if(!IsCorruptionDetectionEnabled())
+
13739  {
+
13740  return VK_ERROR_FEATURE_NOT_PRESENT;
+
13741  }
+
13742 
+
13743  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13744  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
13745  {
+
13746  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
13747  VMA_ASSERT(pBlock);
+
13748  VkResult res = pBlock->CheckCorruption(m_hAllocator);
+
13749  if(res != VK_SUCCESS)
+
13750  {
+
13751  return res;
+
13752  }
+
13753  }
+
13754  return VK_SUCCESS;
+
13755 }
+
13756 
+
13757 void VmaBlockVector::AddStats(VmaStats* pStats)
+
13758 {
+
13759  const uint32_t memTypeIndex = m_MemoryTypeIndex;
+
13760  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
+
13761 
+
13762  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
13763 
+
13764  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
13765  {
+
13766  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
13767  VMA_ASSERT(pBlock);
+
13768  VMA_HEAVY_ASSERT(pBlock->Validate());
+
13769  VmaStatInfo allocationStatInfo;
+
13770  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
+
13771  VmaAddStatInfo(pStats->total, allocationStatInfo);
+
13772  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+
13773  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+
13774  }
+
13775 }
13776 
-
13777  // Sort them by m_pBlock pointer value.
-
13778  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
-
13779 }
-
13780 
-
13781 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
-
13782 {
-
13783  for(size_t i = m_Blocks.size(); i--; )
-
13784  {
-
13785  vma_delete(m_hAllocator, m_Blocks[i]);
-
13786  }
-
13787 }
-
13788 
-
13789 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
-
13790 {
-
13791  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
-
13792  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
-
13793  {
-
13794  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
-
13795  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
-
13796  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
-
13797  {
-
13798  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
-
13799  (*it)->m_Allocations.push_back(allocInfo);
-
13800  }
-
13801  else
-
13802  {
-
13803  VMA_ASSERT(0);
-
13804  }
+
13778 // VmaDefragmentationAlgorithm_Generic members definition
+
13779 
+
13780 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
+
13781  VmaAllocator hAllocator,
+
13782  VmaBlockVector* pBlockVector,
+
13783  uint32_t currentFrameIndex,
+
13784  bool overlappingMoveSupported) :
+
13785  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+
13786  m_AllocationCount(0),
+
13787  m_AllAllocations(false),
+
13788  m_BytesMoved(0),
+
13789  m_AllocationsMoved(0),
+
13790  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
+
13791 {
+
13792  // Create block info for each block.
+
13793  const size_t blockCount = m_pBlockVector->m_Blocks.size();
+
13794  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13795  {
+
13796  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
+
13797  pBlockInfo->m_OriginalBlockIndex = blockIndex;
+
13798  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
+
13799  m_Blocks.push_back(pBlockInfo);
+
13800  }
+
13801 
+
13802  // Sort them by m_pBlock pointer value.
+
13803  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
+
13804 }
13805 
-
13806  ++m_AllocationCount;
-
13807  }
-
13808 }
-
13809 
-
13810 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
-
13811  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13812  VkDeviceSize maxBytesToMove,
-
13813  uint32_t maxAllocationsToMove,
-
13814  bool freeOldAllocations)
+
13806 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
+
13807 {
+
13808  for(size_t i = m_Blocks.size(); i--; )
+
13809  {
+
13810  vma_delete(m_hAllocator, m_Blocks[i]);
+
13811  }
+
13812 }
+
13813 
+
13814 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13815 {
-
13816  if(m_Blocks.empty())
-
13817  {
-
13818  return VK_SUCCESS;
-
13819  }
-
13820 
-
13821  // This is a choice based on research.
-
13822  // Option 1:
-
13823  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
-
13824  // Option 2:
-
13825  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
-
13826  // Option 3:
-
13827  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
-
13828 
-
13829  size_t srcBlockMinIndex = 0;
-
13830  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
-
13831  /*
-
13832  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
-
13833  {
-
13834  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
-
13835  if(blocksWithNonMovableCount > 0)
-
13836  {
-
13837  srcBlockMinIndex = blocksWithNonMovableCount - 1;
-
13838  }
-
13839  }
-
13840  */
-
13841 
-
13842  size_t srcBlockIndex = m_Blocks.size() - 1;
-
13843  size_t srcAllocIndex = SIZE_MAX;
-
13844  for(;;)
-
13845  {
-
13846  // 1. Find next allocation to move.
-
13847  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
-
13848  // 1.2. Then start from last to first m_Allocations.
-
13849  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
-
13850  {
-
13851  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
-
13852  {
-
13853  // Finished: no more allocations to process.
-
13854  if(srcBlockIndex == srcBlockMinIndex)
-
13855  {
-
13856  return VK_SUCCESS;
-
13857  }
-
13858  else
-
13859  {
-
13860  --srcBlockIndex;
-
13861  srcAllocIndex = SIZE_MAX;
-
13862  }
-
13863  }
-
13864  else
-
13865  {
-
13866  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
-
13867  }
-
13868  }
-
13869 
-
13870  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
-
13871  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
-
13872 
-
13873  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
-
13874  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
-
13875  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
-
13876  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
-
13877 
-
13878  // 2. Try to find new place for this allocation in preceding or current block.
-
13879  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
-
13880  {
-
13881  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
-
13882  VmaAllocationRequest dstAllocRequest;
-
13883  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
-
13884  m_CurrentFrameIndex,
-
13885  m_pBlockVector->GetFrameInUseCount(),
-
13886  m_pBlockVector->GetBufferImageGranularity(),
-
13887  size,
-
13888  alignment,
-
13889  false, // upperAddress
-
13890  suballocType,
-
13891  false, // canMakeOtherLost
-
13892  strategy,
-
13893  &dstAllocRequest) &&
-
13894  MoveMakesSense(
-
13895  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
-
13896  {
-
13897  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
-
13898 
-
13899  // Reached limit on number of allocations or bytes to move.
-
13900  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
-
13901  (m_BytesMoved + size > maxBytesToMove))
-
13902  {
-
13903  return VK_SUCCESS;
-
13904  }
-
13905 
-
13906  VmaDefragmentationMove move = {};
-
13907  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
-
13908  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
-
13909  move.srcOffset = srcOffset;
-
13910  move.dstOffset = dstAllocRequest.offset;
-
13911  move.size = size;
-
13912  move.hAllocation = allocInfo.m_hAllocation;
-
13913  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
-
13914  move.pDstBlock = pDstBlockInfo->m_pBlock;
-
13915 
-
13916  moves.push_back(move);
-
13917 
-
13918  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
-
13919  dstAllocRequest,
-
13920  suballocType,
-
13921  size,
-
13922  allocInfo.m_hAllocation);
+
13816  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
+
13817  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
+
13818  {
+
13819  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
+
13820  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
+
13821  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
+
13822  {
+
13823  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
+
13824  (*it)->m_Allocations.push_back(allocInfo);
+
13825  }
+
13826  else
+
13827  {
+
13828  VMA_ASSERT(0);
+
13829  }
+
13830 
+
13831  ++m_AllocationCount;
+
13832  }
+
13833 }
+
13834 
+
13835 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
+
13836  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
13837  VkDeviceSize maxBytesToMove,
+
13838  uint32_t maxAllocationsToMove,
+
13839  bool freeOldAllocations)
+
13840 {
+
13841  if(m_Blocks.empty())
+
13842  {
+
13843  return VK_SUCCESS;
+
13844  }
+
13845 
+
13846  // This is a choice based on research.
+
13847  // Option 1:
+
13848  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
+
13849  // Option 2:
+
13850  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
+
13851  // Option 3:
+
13852  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
+
13853 
+
13854  size_t srcBlockMinIndex = 0;
+
13855  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
+
13856  /*
+
13857  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
+
13858  {
+
13859  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
+
13860  if(blocksWithNonMovableCount > 0)
+
13861  {
+
13862  srcBlockMinIndex = blocksWithNonMovableCount - 1;
+
13863  }
+
13864  }
+
13865  */
+
13866 
+
13867  size_t srcBlockIndex = m_Blocks.size() - 1;
+
13868  size_t srcAllocIndex = SIZE_MAX;
+
13869  for(;;)
+
13870  {
+
13871  // 1. Find next allocation to move.
+
13872  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
+
13873  // 1.2. Then start from last to first m_Allocations.
+
13874  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
+
13875  {
+
13876  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
+
13877  {
+
13878  // Finished: no more allocations to process.
+
13879  if(srcBlockIndex == srcBlockMinIndex)
+
13880  {
+
13881  return VK_SUCCESS;
+
13882  }
+
13883  else
+
13884  {
+
13885  --srcBlockIndex;
+
13886  srcAllocIndex = SIZE_MAX;
+
13887  }
+
13888  }
+
13889  else
+
13890  {
+
13891  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
+
13892  }
+
13893  }
+
13894 
+
13895  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
+
13896  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
+
13897 
+
13898  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
+
13899  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
+
13900  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
+
13901  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
+
13902 
+
13903  // 2. Try to find new place for this allocation in preceding or current block.
+
13904  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
+
13905  {
+
13906  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
+
13907  VmaAllocationRequest dstAllocRequest;
+
13908  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
+
13909  m_CurrentFrameIndex,
+
13910  m_pBlockVector->GetFrameInUseCount(),
+
13911  m_pBlockVector->GetBufferImageGranularity(),
+
13912  size,
+
13913  alignment,
+
13914  false, // upperAddress
+
13915  suballocType,
+
13916  false, // canMakeOtherLost
+
13917  strategy,
+
13918  &dstAllocRequest) &&
+
13919  MoveMakesSense(
+
13920  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
+
13921  {
+
13922  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13923 
-
13924  if(freeOldAllocations)
-
13925  {
-
13926  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
-
13927  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
-
13928  }
-
13929 
-
13930  if(allocInfo.m_pChanged != VMA_NULL)
-
13931  {
-
13932  *allocInfo.m_pChanged = VK_TRUE;
-
13933  }
-
13934 
-
13935  ++m_AllocationsMoved;
-
13936  m_BytesMoved += size;
-
13937 
-
13938  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
-
13939 
-
13940  break;
-
13941  }
-
13942  }
-
13943 
-
13944  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
-
13945 
-
13946  if(srcAllocIndex > 0)
-
13947  {
-
13948  --srcAllocIndex;
-
13949  }
-
13950  else
-
13951  {
-
13952  if(srcBlockIndex > 0)
-
13953  {
-
13954  --srcBlockIndex;
-
13955  srcAllocIndex = SIZE_MAX;
-
13956  }
-
13957  else
-
13958  {
-
13959  return VK_SUCCESS;
-
13960  }
-
13961  }
-
13962  }
-
13963 }
+
13924  // Reached limit on number of allocations or bytes to move.
+
13925  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
+
13926  (m_BytesMoved + size > maxBytesToMove))
+
13927  {
+
13928  return VK_SUCCESS;
+
13929  }
+
13930 
+
13931  VmaDefragmentationMove move = {};
+
13932  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
+
13933  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
+
13934  move.srcOffset = srcOffset;
+
13935  move.dstOffset = dstAllocRequest.offset;
+
13936  move.size = size;
+
13937  move.hAllocation = allocInfo.m_hAllocation;
+
13938  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
+
13939  move.pDstBlock = pDstBlockInfo->m_pBlock;
+
13940 
+
13941  moves.push_back(move);
+
13942 
+
13943  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
+
13944  dstAllocRequest,
+
13945  suballocType,
+
13946  size,
+
13947  allocInfo.m_hAllocation);
+
13948 
+
13949  if(freeOldAllocations)
+
13950  {
+
13951  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
+
13952  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+
13953  }
+
13954 
+
13955  if(allocInfo.m_pChanged != VMA_NULL)
+
13956  {
+
13957  *allocInfo.m_pChanged = VK_TRUE;
+
13958  }
+
13959 
+
13960  ++m_AllocationsMoved;
+
13961  m_BytesMoved += size;
+
13962 
+
13963  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13964 
-
13965 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
-
13966 {
-
13967  size_t result = 0;
-
13968  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
13969  {
-
13970  if(m_Blocks[i]->m_HasNonMovableAllocations)
-
13971  {
-
13972  ++result;
-
13973  }
-
13974  }
-
13975  return result;
-
13976 }
-
13977 
-
13978 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
-
13979  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13980  VkDeviceSize maxBytesToMove,
-
13981  uint32_t maxAllocationsToMove,
- -
13983 {
-
13984  if(!m_AllAllocations && m_AllocationCount == 0)
-
13985  {
-
13986  return VK_SUCCESS;
+
13965  break;
+
13966  }
+
13967  }
+
13968 
+
13969  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
+
13970 
+
13971  if(srcAllocIndex > 0)
+
13972  {
+
13973  --srcAllocIndex;
+
13974  }
+
13975  else
+
13976  {
+
13977  if(srcBlockIndex > 0)
+
13978  {
+
13979  --srcBlockIndex;
+
13980  srcAllocIndex = SIZE_MAX;
+
13981  }
+
13982  else
+
13983  {
+
13984  return VK_SUCCESS;
+
13985  }
+
13986  }
13987  }
-
13988 
-
13989  const size_t blockCount = m_Blocks.size();
-
13990  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
13991  {
-
13992  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
-
13993 
-
13994  if(m_AllAllocations)
-
13995  {
-
13996  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
-
13997  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
-
13998  it != pMetadata->m_Suballocations.end();
-
13999  ++it)
-
14000  {
-
14001  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
-
14002  {
-
14003  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
-
14004  pBlockInfo->m_Allocations.push_back(allocInfo);
-
14005  }
-
14006  }
-
14007  }
-
14008 
-
14009  pBlockInfo->CalcHasNonMovableAllocations();
-
14010 
-
14011  // This is a choice based on research.
-
14012  // Option 1:
-
14013  pBlockInfo->SortAllocationsByOffsetDescending();
-
14014  // Option 2:
-
14015  //pBlockInfo->SortAllocationsBySizeDescending();
-
14016  }
-
14017 
-
14018  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
-
14019  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
-
14020 
-
14021  // This is a choice based on research.
-
14022  const uint32_t roundCount = 2;
-
14023 
-
14024  // Execute defragmentation rounds (the main part).
-
14025  VkResult result = VK_SUCCESS;
-
14026  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
-
14027  {
-
14028  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
-
14029  }
-
14030 
-
14031  return result;
-
14032 }
+
13988 }
+
13989 
+
13990 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
+
13991 {
+
13992  size_t result = 0;
+
13993  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
13994  {
+
13995  if(m_Blocks[i]->m_HasNonMovableAllocations)
+
13996  {
+
13997  ++result;
+
13998  }
+
13999  }
+
14000  return result;
+
14001 }
+
14002 
+
14003 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
+
14004  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
14005  VkDeviceSize maxBytesToMove,
+
14006  uint32_t maxAllocationsToMove,
+ +
14008 {
+
14009  if(!m_AllAllocations && m_AllocationCount == 0)
+
14010  {
+
14011  return VK_SUCCESS;
+
14012  }
+
14013 
+
14014  const size_t blockCount = m_Blocks.size();
+
14015  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
14016  {
+
14017  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
+
14018 
+
14019  if(m_AllAllocations)
+
14020  {
+
14021  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
+
14022  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
+
14023  it != pMetadata->m_Suballocations.end();
+
14024  ++it)
+
14025  {
+
14026  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
+
14027  {
+
14028  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
+
14029  pBlockInfo->m_Allocations.push_back(allocInfo);
+
14030  }
+
14031  }
+
14032  }
14033 
-
14034 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
-
14035  size_t dstBlockIndex, VkDeviceSize dstOffset,
-
14036  size_t srcBlockIndex, VkDeviceSize srcOffset)
-
14037 {
-
14038  if(dstBlockIndex < srcBlockIndex)
-
14039  {
-
14040  return true;
+
14034  pBlockInfo->CalcHasNonMovableAllocations();
+
14035 
+
14036  // This is a choice based on research.
+
14037  // Option 1:
+
14038  pBlockInfo->SortAllocationsByOffsetDescending();
+
14039  // Option 2:
+
14040  //pBlockInfo->SortAllocationsBySizeDescending();
14041  }
-
14042  if(dstBlockIndex > srcBlockIndex)
-
14043  {
-
14044  return false;
-
14045  }
-
14046  if(dstOffset < srcOffset)
-
14047  {
-
14048  return true;
-
14049  }
-
14050  return false;
-
14051 }
-
14052 
-
14054 // VmaDefragmentationAlgorithm_Fast
+
14042 
+
14043  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
+
14044  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
+
14045 
+
14046  // This is a choice based on research.
+
14047  const uint32_t roundCount = 2;
+
14048 
+
14049  // Execute defragmentation rounds (the main part).
+
14050  VkResult result = VK_SUCCESS;
+
14051  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
+
14052  {
+
14053  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
+
14054  }
14055 
-
14056 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
-
14057  VmaAllocator hAllocator,
-
14058  VmaBlockVector* pBlockVector,
-
14059  uint32_t currentFrameIndex,
-
14060  bool overlappingMoveSupported) :
-
14061  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-
14062  m_OverlappingMoveSupported(overlappingMoveSupported),
-
14063  m_AllocationCount(0),
-
14064  m_AllAllocations(false),
-
14065  m_BytesMoved(0),
-
14066  m_AllocationsMoved(0),
-
14067  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
-
14068 {
-
14069  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
-
14070 
-
14071 }
-
14072 
-
14073 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
-
14074 {
-
14075 }
-
14076 
-
14077 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
-
14078  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
14079  VkDeviceSize maxBytesToMove,
-
14080  uint32_t maxAllocationsToMove,
- -
14082 {
-
14083  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
-
14084 
-
14085  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
14086  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
-
14087  {
-
14088  return VK_SUCCESS;
-
14089  }
-
14090 
-
14091  PreprocessMetadata();
-
14092 
-
14093  // Sort blocks in order from most destination.
-
14094 
-
14095  m_BlockInfos.resize(blockCount);
-
14096  for(size_t i = 0; i < blockCount; ++i)
-
14097  {
-
14098  m_BlockInfos[i].origBlockIndex = i;
-
14099  }
-
14100 
-
14101  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
-
14102  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
-
14103  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
-
14104  });
-
14105 
-
14106  // THE MAIN ALGORITHM
-
14107 
-
14108  FreeSpaceDatabase freeSpaceDb;
+
14056  return result;
+
14057 }
+
14058 
+
14059 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
+
14060  size_t dstBlockIndex, VkDeviceSize dstOffset,
+
14061  size_t srcBlockIndex, VkDeviceSize srcOffset)
+
14062 {
+
14063  if(dstBlockIndex < srcBlockIndex)
+
14064  {
+
14065  return true;
+
14066  }
+
14067  if(dstBlockIndex > srcBlockIndex)
+
14068  {
+
14069  return false;
+
14070  }
+
14071  if(dstOffset < srcOffset)
+
14072  {
+
14073  return true;
+
14074  }
+
14075  return false;
+
14076 }
+
14077 
+
14079 // VmaDefragmentationAlgorithm_Fast
+
14080 
+
14081 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
+
14082  VmaAllocator hAllocator,
+
14083  VmaBlockVector* pBlockVector,
+
14084  uint32_t currentFrameIndex,
+
14085  bool overlappingMoveSupported) :
+
14086  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+
14087  m_OverlappingMoveSupported(overlappingMoveSupported),
+
14088  m_AllocationCount(0),
+
14089  m_AllAllocations(false),
+
14090  m_BytesMoved(0),
+
14091  m_AllocationsMoved(0),
+
14092  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
+
14093 {
+
14094  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
+
14095 
+
14096 }
+
14097 
+
14098 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
+
14099 {
+
14100 }
+
14101 
+
14102 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
+
14103  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
14104  VkDeviceSize maxBytesToMove,
+
14105  uint32_t maxAllocationsToMove,
+ +
14107 {
+
14108  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14109 
-
14110  size_t dstBlockInfoIndex = 0;
-
14111  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-
14112  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-
14113  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-
14114  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
-
14115  VkDeviceSize dstOffset = 0;
-
14116 
-
14117  bool end = false;
-
14118  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
-
14119  {
-
14120  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
-
14121  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
-
14122  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
-
14123  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
-
14124  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
-
14125  {
-
14126  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
-
14127  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
-
14128  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
-
14129  if(m_AllocationsMoved == maxAllocationsToMove ||
-
14130  m_BytesMoved + srcAllocSize > maxBytesToMove)
-
14131  {
-
14132  end = true;
-
14133  break;
-
14134  }
-
14135  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
-
14136 
-
14137  VmaDefragmentationMove move = {};
-
14138  // Try to place it in one of free spaces from the database.
-
14139  size_t freeSpaceInfoIndex;
-
14140  VkDeviceSize dstAllocOffset;
-
14141  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
-
14142  freeSpaceInfoIndex, dstAllocOffset))
-
14143  {
-
14144  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
-
14145  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
-
14146  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
-
14147 
-
14148  // Same block
-
14149  if(freeSpaceInfoIndex == srcBlockInfoIndex)
-
14150  {
-
14151  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
14152 
-
14153  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
-
14154 
-
14155  VmaSuballocation suballoc = *srcSuballocIt;
-
14156  suballoc.offset = dstAllocOffset;
-
14157  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
-
14158  m_BytesMoved += srcAllocSize;
-
14159  ++m_AllocationsMoved;
-
14160 
-
14161  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
14162  ++nextSuballocIt;
-
14163  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
14164  srcSuballocIt = nextSuballocIt;
-
14165 
-
14166  InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
14167 
-
14168  move.srcBlockIndex = srcOrigBlockIndex;
-
14169  move.dstBlockIndex = freeSpaceOrigBlockIndex;
-
14170  move.srcOffset = srcAllocOffset;
-
14171  move.dstOffset = dstAllocOffset;
-
14172  move.size = srcAllocSize;
-
14173 
-
14174  moves.push_back(move);
-
14175  }
-
14176  // Different block
-
14177  else
-
14178  {
-
14179  // MOVE OPTION 2: Move the allocation to a different block.
-
14180 
-
14181  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
-
14182 
-
14183  VmaSuballocation suballoc = *srcSuballocIt;
-
14184  suballoc.offset = dstAllocOffset;
-
14185  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
-
14186  m_BytesMoved += srcAllocSize;
-
14187  ++m_AllocationsMoved;
-
14188 
-
14189  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
14190  ++nextSuballocIt;
-
14191  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
14192  srcSuballocIt = nextSuballocIt;
-
14193 
-
14194  InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
14195 
-
14196  move.srcBlockIndex = srcOrigBlockIndex;
-
14197  move.dstBlockIndex = freeSpaceOrigBlockIndex;
-
14198  move.srcOffset = srcAllocOffset;
-
14199  move.dstOffset = dstAllocOffset;
-
14200  move.size = srcAllocSize;
-
14201 
-
14202  moves.push_back(move);
-
14203  }
-
14204  }
-
14205  else
-
14206  {
-
14207  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
-
14208 
-
14209  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
-
14210  while(dstBlockInfoIndex < srcBlockInfoIndex &&
-
14211  dstAllocOffset + srcAllocSize > dstBlockSize)
-
14212  {
-
14213  // But before that, register remaining free space at the end of dst block.
-
14214  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
-
14215 
-
14216  ++dstBlockInfoIndex;
-
14217  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-
14218  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-
14219  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-
14220  dstBlockSize = pDstMetadata->GetSize();
-
14221  dstOffset = 0;
-
14222  dstAllocOffset = 0;
-
14223  }
-
14224 
-
14225  // Same block
-
14226  if(dstBlockInfoIndex == srcBlockInfoIndex)
-
14227  {
-
14228  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
14229 
-
14230  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
-
14231 
-
14232  bool skipOver = overlap;
-
14233  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
-
14234  {
-
14235  // If destination and source place overlap, skip if it would move it
-
14236  // by only < 1/64 of its size.
-
14237  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
-
14238  }
-
14239 
-
14240  if(skipOver)
-
14241  {
-
14242  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
-
14243 
-
14244  dstOffset = srcAllocOffset + srcAllocSize;
-
14245  ++srcSuballocIt;
-
14246  }
-
14247  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
-
14248  else
-
14249  {
-
14250  srcSuballocIt->offset = dstAllocOffset;
-
14251  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
-
14252  dstOffset = dstAllocOffset + srcAllocSize;
-
14253  m_BytesMoved += srcAllocSize;
-
14254  ++m_AllocationsMoved;
-
14255  ++srcSuballocIt;
-
14256 
-
14257  move.srcBlockIndex = srcOrigBlockIndex;
-
14258  move.dstBlockIndex = dstOrigBlockIndex;
-
14259  move.srcOffset = srcAllocOffset;
-
14260  move.dstOffset = dstAllocOffset;
-
14261  move.size = srcAllocSize;
-
14262 
-
14263  moves.push_back(move);
-
14264  }
-
14265  }
-
14266  // Different block
-
14267  else
-
14268  {
-
14269  // MOVE OPTION 2: Move the allocation to a different block.
-
14270 
-
14271  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
-
14272  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
-
14273 
-
14274  VmaSuballocation suballoc = *srcSuballocIt;
-
14275  suballoc.offset = dstAllocOffset;
-
14276  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
-
14277  dstOffset = dstAllocOffset + srcAllocSize;
-
14278  m_BytesMoved += srcAllocSize;
-
14279  ++m_AllocationsMoved;
-
14280 
-
14281  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
14282  ++nextSuballocIt;
-
14283  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
14284  srcSuballocIt = nextSuballocIt;
-
14285 
-
14286  pDstMetadata->m_Suballocations.push_back(suballoc);
-
14287 
-
14288  move.srcBlockIndex = srcOrigBlockIndex;
-
14289  move.dstBlockIndex = dstOrigBlockIndex;
-
14290  move.srcOffset = srcAllocOffset;
-
14291  move.dstOffset = dstAllocOffset;
-
14292  move.size = srcAllocSize;
-
14293 
-
14294  moves.push_back(move);
-
14295  }
-
14296  }
-
14297  }
-
14298  }
-
14299 
-
14300  m_BlockInfos.clear();
-
14301 
-
14302  PostprocessMetadata();
-
14303 
-
14304  return VK_SUCCESS;
-
14305 }
-
14306 
-
14307 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
-
14308 {
-
14309  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
14310  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
14311  {
-
14312  VmaBlockMetadata_Generic* const pMetadata =
-
14313  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-
14314  pMetadata->m_FreeCount = 0;
-
14315  pMetadata->m_SumFreeSize = pMetadata->GetSize();
-
14316  pMetadata->m_FreeSuballocationsBySize.clear();
-
14317  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-
14318  it != pMetadata->m_Suballocations.end(); )
-
14319  {
-
14320  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
-
14321  {
-
14322  VmaSuballocationList::iterator nextIt = it;
-
14323  ++nextIt;
-
14324  pMetadata->m_Suballocations.erase(it);
-
14325  it = nextIt;
-
14326  }
-
14327  else
-
14328  {
-
14329  ++it;
-
14330  }
-
14331  }
-
14332  }
-
14333 }
-
14334 
-
14335 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
-
14336 {
-
14337  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
14338  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
14339  {
-
14340  VmaBlockMetadata_Generic* const pMetadata =
-
14341  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-
14342  const VkDeviceSize blockSize = pMetadata->GetSize();
-
14343 
-
14344  // No allocations in this block - entire area is free.
-
14345  if(pMetadata->m_Suballocations.empty())
-
14346  {
-
14347  pMetadata->m_FreeCount = 1;
-
14348  //pMetadata->m_SumFreeSize is already set to blockSize.
-
14349  VmaSuballocation suballoc = {
-
14350  0, // offset
-
14351  blockSize, // size
-
14352  VMA_NULL, // hAllocation
-
14353  VMA_SUBALLOCATION_TYPE_FREE };
-
14354  pMetadata->m_Suballocations.push_back(suballoc);
-
14355  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
+
14110  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
14111  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
+
14112  {
+
14113  return VK_SUCCESS;
+
14114  }
+
14115 
+
14116  PreprocessMetadata();
+
14117 
+
14118  // Sort blocks in order from most destination.
+
14119 
+
14120  m_BlockInfos.resize(blockCount);
+
14121  for(size_t i = 0; i < blockCount; ++i)
+
14122  {
+
14123  m_BlockInfos[i].origBlockIndex = i;
+
14124  }
+
14125 
+
14126  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
+
14127  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
+
14128  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
+
14129  });
+
14130 
+
14131  // THE MAIN ALGORITHM
+
14132 
+
14133  FreeSpaceDatabase freeSpaceDb;
+
14134 
+
14135  size_t dstBlockInfoIndex = 0;
+
14136  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+
14137  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+
14138  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+
14139  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
+
14140  VkDeviceSize dstOffset = 0;
+
14141 
+
14142  bool end = false;
+
14143  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
+
14144  {
+
14145  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
+
14146  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
+
14147  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
+
14148  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
+
14149  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
+
14150  {
+
14151  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
+
14152  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
+
14153  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
+
14154  if(m_AllocationsMoved == maxAllocationsToMove ||
+
14155  m_BytesMoved + srcAllocSize > maxBytesToMove)
+
14156  {
+
14157  end = true;
+
14158  break;
+
14159  }
+
14160  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
+
14161 
+
14162  VmaDefragmentationMove move = {};
+
14163  // Try to place it in one of free spaces from the database.
+
14164  size_t freeSpaceInfoIndex;
+
14165  VkDeviceSize dstAllocOffset;
+
14166  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
+
14167  freeSpaceInfoIndex, dstAllocOffset))
+
14168  {
+
14169  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
+
14170  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
+
14171  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
+
14172 
+
14173  // Same block
+
14174  if(freeSpaceInfoIndex == srcBlockInfoIndex)
+
14175  {
+
14176  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
14177 
+
14178  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+
14179 
+
14180  VmaSuballocation suballoc = *srcSuballocIt;
+
14181  suballoc.offset = dstAllocOffset;
+
14182  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
+
14183  m_BytesMoved += srcAllocSize;
+
14184  ++m_AllocationsMoved;
+
14185 
+
14186  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
14187  ++nextSuballocIt;
+
14188  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
14189  srcSuballocIt = nextSuballocIt;
+
14190 
+
14191  InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
14192 
+
14193  move.srcBlockIndex = srcOrigBlockIndex;
+
14194  move.dstBlockIndex = freeSpaceOrigBlockIndex;
+
14195  move.srcOffset = srcAllocOffset;
+
14196  move.dstOffset = dstAllocOffset;
+
14197  move.size = srcAllocSize;
+
14198 
+
14199  moves.push_back(move);
+
14200  }
+
14201  // Different block
+
14202  else
+
14203  {
+
14204  // MOVE OPTION 2: Move the allocation to a different block.
+
14205 
+
14206  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
+
14207 
+
14208  VmaSuballocation suballoc = *srcSuballocIt;
+
14209  suballoc.offset = dstAllocOffset;
+
14210  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
+
14211  m_BytesMoved += srcAllocSize;
+
14212  ++m_AllocationsMoved;
+
14213 
+
14214  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
14215  ++nextSuballocIt;
+
14216  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
14217  srcSuballocIt = nextSuballocIt;
+
14218 
+
14219  InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
14220 
+
14221  move.srcBlockIndex = srcOrigBlockIndex;
+
14222  move.dstBlockIndex = freeSpaceOrigBlockIndex;
+
14223  move.srcOffset = srcAllocOffset;
+
14224  move.dstOffset = dstAllocOffset;
+
14225  move.size = srcAllocSize;
+
14226 
+
14227  moves.push_back(move);
+
14228  }
+
14229  }
+
14230  else
+
14231  {
+
14232  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
+
14233 
+
14234  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
+
14235  while(dstBlockInfoIndex < srcBlockInfoIndex &&
+
14236  dstAllocOffset + srcAllocSize > dstBlockSize)
+
14237  {
+
14238  // But before that, register remaining free space at the end of dst block.
+
14239  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
+
14240 
+
14241  ++dstBlockInfoIndex;
+
14242  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+
14243  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+
14244  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+
14245  dstBlockSize = pDstMetadata->GetSize();
+
14246  dstOffset = 0;
+
14247  dstAllocOffset = 0;
+
14248  }
+
14249 
+
14250  // Same block
+
14251  if(dstBlockInfoIndex == srcBlockInfoIndex)
+
14252  {
+
14253  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
14254 
+
14255  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
+
14256 
+
14257  bool skipOver = overlap;
+
14258  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
+
14259  {
+
14260  // If destination and source place overlap, skip if it would move it
+
14261  // by only < 1/64 of its size.
+
14262  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
+
14263  }
+
14264 
+
14265  if(skipOver)
+
14266  {
+
14267  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
+
14268 
+
14269  dstOffset = srcAllocOffset + srcAllocSize;
+
14270  ++srcSuballocIt;
+
14271  }
+
14272  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+
14273  else
+
14274  {
+
14275  srcSuballocIt->offset = dstAllocOffset;
+
14276  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
+
14277  dstOffset = dstAllocOffset + srcAllocSize;
+
14278  m_BytesMoved += srcAllocSize;
+
14279  ++m_AllocationsMoved;
+
14280  ++srcSuballocIt;
+
14281 
+
14282  move.srcBlockIndex = srcOrigBlockIndex;
+
14283  move.dstBlockIndex = dstOrigBlockIndex;
+
14284  move.srcOffset = srcAllocOffset;
+
14285  move.dstOffset = dstAllocOffset;
+
14286  move.size = srcAllocSize;
+
14287 
+
14288  moves.push_back(move);
+
14289  }
+
14290  }
+
14291  // Different block
+
14292  else
+
14293  {
+
14294  // MOVE OPTION 2: Move the allocation to a different block.
+
14295 
+
14296  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
+
14297  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
+
14298 
+
14299  VmaSuballocation suballoc = *srcSuballocIt;
+
14300  suballoc.offset = dstAllocOffset;
+
14301  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
+
14302  dstOffset = dstAllocOffset + srcAllocSize;
+
14303  m_BytesMoved += srcAllocSize;
+
14304  ++m_AllocationsMoved;
+
14305 
+
14306  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
14307  ++nextSuballocIt;
+
14308  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
14309  srcSuballocIt = nextSuballocIt;
+
14310 
+
14311  pDstMetadata->m_Suballocations.push_back(suballoc);
+
14312 
+
14313  move.srcBlockIndex = srcOrigBlockIndex;
+
14314  move.dstBlockIndex = dstOrigBlockIndex;
+
14315  move.srcOffset = srcAllocOffset;
+
14316  move.dstOffset = dstAllocOffset;
+
14317  move.size = srcAllocSize;
+
14318 
+
14319  moves.push_back(move);
+
14320  }
+
14321  }
+
14322  }
+
14323  }
+
14324 
+
14325  m_BlockInfos.clear();
+
14326 
+
14327  PostprocessMetadata();
+
14328 
+
14329  return VK_SUCCESS;
+
14330 }
+
14331 
+
14332 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
+
14333 {
+
14334  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
14335  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
14336  {
+
14337  VmaBlockMetadata_Generic* const pMetadata =
+
14338  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+
14339  pMetadata->m_FreeCount = 0;
+
14340  pMetadata->m_SumFreeSize = pMetadata->GetSize();
+
14341  pMetadata->m_FreeSuballocationsBySize.clear();
+
14342  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+
14343  it != pMetadata->m_Suballocations.end(); )
+
14344  {
+
14345  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
+
14346  {
+
14347  VmaSuballocationList::iterator nextIt = it;
+
14348  ++nextIt;
+
14349  pMetadata->m_Suballocations.erase(it);
+
14350  it = nextIt;
+
14351  }
+
14352  else
+
14353  {
+
14354  ++it;
+
14355  }
14356  }
-
14357  // There are some allocations in this block.
-
14358  else
-
14359  {
-
14360  VkDeviceSize offset = 0;
-
14361  VmaSuballocationList::iterator it;
-
14362  for(it = pMetadata->m_Suballocations.begin();
-
14363  it != pMetadata->m_Suballocations.end();
-
14364  ++it)
-
14365  {
-
14366  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
-
14367  VMA_ASSERT(it->offset >= offset);
-
14368 
-
14369  // Need to insert preceding free space.
-
14370  if(it->offset > offset)
-
14371  {
-
14372  ++pMetadata->m_FreeCount;
-
14373  const VkDeviceSize freeSize = it->offset - offset;
-
14374  VmaSuballocation suballoc = {
-
14375  offset, // offset
-
14376  freeSize, // size
-
14377  VMA_NULL, // hAllocation
-
14378  VMA_SUBALLOCATION_TYPE_FREE };
-
14379  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-
14380  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
14381  {
-
14382  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
-
14383  }
-
14384  }
-
14385 
-
14386  pMetadata->m_SumFreeSize -= it->size;
-
14387  offset = it->offset + it->size;
-
14388  }
-
14389 
-
14390  // Need to insert trailing free space.
-
14391  if(offset < blockSize)
-
14392  {
-
14393  ++pMetadata->m_FreeCount;
-
14394  const VkDeviceSize freeSize = blockSize - offset;
-
14395  VmaSuballocation suballoc = {
-
14396  offset, // offset
-
14397  freeSize, // size
-
14398  VMA_NULL, // hAllocation
-
14399  VMA_SUBALLOCATION_TYPE_FREE };
-
14400  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
-
14401  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-
14402  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
14403  {
-
14404  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
-
14405  }
-
14406  }
-
14407 
-
14408  VMA_SORT(
-
14409  pMetadata->m_FreeSuballocationsBySize.begin(),
-
14410  pMetadata->m_FreeSuballocationsBySize.end(),
-
14411  VmaSuballocationItemSizeLess());
-
14412  }
-
14413 
-
14414  VMA_HEAVY_ASSERT(pMetadata->Validate());
-
14415  }
-
14416 }
-
14417 
-
14418 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
-
14419 {
-
14420  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
-
14421  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-
14422  while(it != pMetadata->m_Suballocations.end())
-
14423  {
-
14424  if(it->offset < suballoc.offset)
-
14425  {
-
14426  ++it;
-
14427  }
-
14428  }
-
14429  pMetadata->m_Suballocations.insert(it, suballoc);
-
14430 }
-
14431 
-
14433 // VmaBlockVectorDefragmentationContext
-
14434 
-
14435 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
-
14436  VmaAllocator hAllocator,
-
14437  VmaPool hCustomPool,
-
14438  VmaBlockVector* pBlockVector,
-
14439  uint32_t currFrameIndex) :
-
14440  res(VK_SUCCESS),
-
14441  mutexLocked(false),
-
14442  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
-
14443  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
-
14444  defragmentationMovesProcessed(0),
-
14445  defragmentationMovesCommitted(0),
-
14446  hasDefragmentationPlan(0),
-
14447  m_hAllocator(hAllocator),
-
14448  m_hCustomPool(hCustomPool),
-
14449  m_pBlockVector(pBlockVector),
-
14450  m_CurrFrameIndex(currFrameIndex),
-
14451  m_pAlgorithm(VMA_NULL),
-
14452  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
-
14453  m_AllAllocations(false)
-
14454 {
+
14357  }
+
14358 }
+
14359 
+
14360 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
+
14361 {
+
14362  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
14363  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
14364  {
+
14365  VmaBlockMetadata_Generic* const pMetadata =
+
14366  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+
14367  const VkDeviceSize blockSize = pMetadata->GetSize();
+
14368 
+
14369  // No allocations in this block - entire area is free.
+
14370  if(pMetadata->m_Suballocations.empty())
+
14371  {
+
14372  pMetadata->m_FreeCount = 1;
+
14373  //pMetadata->m_SumFreeSize is already set to blockSize.
+
14374  VmaSuballocation suballoc = {
+
14375  0, // offset
+
14376  blockSize, // size
+
14377  VMA_NULL, // hAllocation
+
14378  VMA_SUBALLOCATION_TYPE_FREE };
+
14379  pMetadata->m_Suballocations.push_back(suballoc);
+
14380  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
+
14381  }
+
14382  // There are some allocations in this block.
+
14383  else
+
14384  {
+
14385  VkDeviceSize offset = 0;
+
14386  VmaSuballocationList::iterator it;
+
14387  for(it = pMetadata->m_Suballocations.begin();
+
14388  it != pMetadata->m_Suballocations.end();
+
14389  ++it)
+
14390  {
+
14391  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
+
14392  VMA_ASSERT(it->offset >= offset);
+
14393 
+
14394  // Need to insert preceding free space.
+
14395  if(it->offset > offset)
+
14396  {
+
14397  ++pMetadata->m_FreeCount;
+
14398  const VkDeviceSize freeSize = it->offset - offset;
+
14399  VmaSuballocation suballoc = {
+
14400  offset, // offset
+
14401  freeSize, // size
+
14402  VMA_NULL, // hAllocation
+
14403  VMA_SUBALLOCATION_TYPE_FREE };
+
14404  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+
14405  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
14406  {
+
14407  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
+
14408  }
+
14409  }
+
14410 
+
14411  pMetadata->m_SumFreeSize -= it->size;
+
14412  offset = it->offset + it->size;
+
14413  }
+
14414 
+
14415  // Need to insert trailing free space.
+
14416  if(offset < blockSize)
+
14417  {
+
14418  ++pMetadata->m_FreeCount;
+
14419  const VkDeviceSize freeSize = blockSize - offset;
+
14420  VmaSuballocation suballoc = {
+
14421  offset, // offset
+
14422  freeSize, // size
+
14423  VMA_NULL, // hAllocation
+
14424  VMA_SUBALLOCATION_TYPE_FREE };
+
14425  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
+
14426  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+
14427  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
14428  {
+
14429  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
+
14430  }
+
14431  }
+
14432 
+
14433  VMA_SORT(
+
14434  pMetadata->m_FreeSuballocationsBySize.begin(),
+
14435  pMetadata->m_FreeSuballocationsBySize.end(),
+
14436  VmaSuballocationItemSizeLess());
+
14437  }
+
14438 
+
14439  VMA_HEAVY_ASSERT(pMetadata->Validate());
+
14440  }
+
14441 }
+
14442 
+
14443 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
+
14444 {
+
14445  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
+
14446  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+
14447  while(it != pMetadata->m_Suballocations.end())
+
14448  {
+
14449  if(it->offset < suballoc.offset)
+
14450  {
+
14451  ++it;
+
14452  }
+
14453  }
+
14454  pMetadata->m_Suballocations.insert(it, suballoc);
14455 }
14456 
-
14457 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
-
14458 {
-
14459  vma_delete(m_hAllocator, m_pAlgorithm);
-
14460 }
-
14461 
-
14462 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
-
14463 {
-
14464  AllocInfo info = { hAlloc, pChanged };
-
14465  m_Allocations.push_back(info);
-
14466 }
-
14467 
-
14468 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
-
14469 {
-
14470  const bool allAllocations = m_AllAllocations ||
-
14471  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
-
14472 
-
14473  /********************************
-
14474  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
-
14475  ********************************/
-
14476 
-
14477  /*
-
14478  Fast algorithm is supported only when certain criteria are met:
-
14479  - VMA_DEBUG_MARGIN is 0.
-
14480  - All allocations in this block vector are moveable.
-
14481  - There is no possibility of image/buffer granularity conflict.
-
14482  - The defragmentation is not incremental
-
14483  */
-
14484  if(VMA_DEBUG_MARGIN == 0 &&
-
14485  allAllocations &&
-
14486  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
- -
14488  {
-
14489  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
-
14490  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
14491  }
-
14492  else
-
14493  {
-
14494  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
-
14495  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
14496  }
+
14458 // VmaBlockVectorDefragmentationContext
+
14459 
+
14460 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
+
14461  VmaAllocator hAllocator,
+
14462  VmaPool hCustomPool,
+
14463  VmaBlockVector* pBlockVector,
+
14464  uint32_t currFrameIndex) :
+
14465  res(VK_SUCCESS),
+
14466  mutexLocked(false),
+
14467  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
+
14468  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
+
14469  defragmentationMovesProcessed(0),
+
14470  defragmentationMovesCommitted(0),
+
14471  hasDefragmentationPlan(0),
+
14472  m_hAllocator(hAllocator),
+
14473  m_hCustomPool(hCustomPool),
+
14474  m_pBlockVector(pBlockVector),
+
14475  m_CurrFrameIndex(currFrameIndex),
+
14476  m_pAlgorithm(VMA_NULL),
+
14477  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
+
14478  m_AllAllocations(false)
+
14479 {
+
14480 }
+
14481 
+
14482 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
+
14483 {
+
14484  vma_delete(m_hAllocator, m_pAlgorithm);
+
14485 }
+
14486 
+
14487 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
+
14488 {
+
14489  AllocInfo info = { hAlloc, pChanged };
+
14490  m_Allocations.push_back(info);
+
14491 }
+
14492 
+
14493 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
+
14494 {
+
14495  const bool allAllocations = m_AllAllocations ||
+
14496  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14497 
-
14498  if(allAllocations)
-
14499  {
-
14500  m_pAlgorithm->AddAll();
-
14501  }
-
14502  else
-
14503  {
-
14504  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
-
14505  {
-
14506  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
-
14507  }
-
14508  }
-
14509 }
-
14510 
-
14512 // VmaDefragmentationContext
-
14513 
-
14514 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
-
14515  VmaAllocator hAllocator,
-
14516  uint32_t currFrameIndex,
-
14517  uint32_t flags,
-
14518  VmaDefragmentationStats* pStats) :
-
14519  m_hAllocator(hAllocator),
-
14520  m_CurrFrameIndex(currFrameIndex),
-
14521  m_Flags(flags),
-
14522  m_pStats(pStats),
-
14523  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
-
14524 {
-
14525  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
-
14526 }
-
14527 
-
14528 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
-
14529 {
-
14530  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
14531  {
-
14532  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
-
14533  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
-
14534  vma_delete(m_hAllocator, pBlockVectorCtx);
-
14535  }
-
14536  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
-
14537  {
-
14538  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
-
14539  if(pBlockVectorCtx)
-
14540  {
-
14541  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
-
14542  vma_delete(m_hAllocator, pBlockVectorCtx);
-
14543  }
-
14544  }
-
14545 }
-
14546 
-
14547 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
-
14548 {
-
14549  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
14550  {
-
14551  VmaPool pool = pPools[poolIndex];
-
14552  VMA_ASSERT(pool);
-
14553  // Pools with algorithm other than default are not defragmented.
-
14554  if(pool->m_BlockVector.GetAlgorithm() == 0)
-
14555  {
-
14556  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
14557 
-
14558  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
14559  {
-
14560  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
-
14561  {
-
14562  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
14563  break;
-
14564  }
-
14565  }
-
14566 
-
14567  if(!pBlockVectorDefragCtx)
-
14568  {
-
14569  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
14570  m_hAllocator,
-
14571  pool,
-
14572  &pool->m_BlockVector,
-
14573  m_CurrFrameIndex);
-
14574  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
14575  }
-
14576 
-
14577  pBlockVectorDefragCtx->AddAll();
-
14578  }
-
14579  }
-
14580 }
-
14581 
-
14582 void VmaDefragmentationContext_T::AddAllocations(
-
14583  uint32_t allocationCount,
-
14584  const VmaAllocation* pAllocations,
-
14585  VkBool32* pAllocationsChanged)
-
14586 {
-
14587  // Dispatch pAllocations among defragmentators. Create them when necessary.
-
14588  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
14589  {
-
14590  const VmaAllocation hAlloc = pAllocations[allocIndex];
-
14591  VMA_ASSERT(hAlloc);
-
14592  // DedicatedAlloc cannot be defragmented.
-
14593  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
-
14594  // Lost allocation cannot be defragmented.
-
14595  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
-
14596  {
-
14597  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
14598 
-
14599  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
-
14600  // This allocation belongs to custom pool.
-
14601  if(hAllocPool != VK_NULL_HANDLE)
-
14602  {
-
14603  // Pools with algorithm other than default are not defragmented.
-
14604  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
-
14605  {
-
14606  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
14607  {
-
14608  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
-
14609  {
-
14610  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
14611  break;
-
14612  }
-
14613  }
-
14614  if(!pBlockVectorDefragCtx)
-
14615  {
-
14616  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
14617  m_hAllocator,
-
14618  hAllocPool,
-
14619  &hAllocPool->m_BlockVector,
-
14620  m_CurrFrameIndex);
-
14621  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
14622  }
-
14623  }
-
14624  }
-
14625  // This allocation belongs to default pool.
-
14626  else
+
14498  /********************************
+
14499  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
+
14500  ********************************/
+
14501 
+
14502  /*
+
14503  Fast algorithm is supported only when certain criteria are met:
+
14504  - VMA_DEBUG_MARGIN is 0.
+
14505  - All allocations in this block vector are moveable.
+
14506  - There is no possibility of image/buffer granularity conflict.
+
14507  - The defragmentation is not incremental
+
14508  */
+
14509  if(VMA_DEBUG_MARGIN == 0 &&
+
14510  allAllocations &&
+
14511  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
+ +
14513  {
+
14514  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
+
14515  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
14516  }
+
14517  else
+
14518  {
+
14519  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
+
14520  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
14521  }
+
14522 
+
14523  if(allAllocations)
+
14524  {
+
14525  m_pAlgorithm->AddAll();
+
14526  }
+
14527  else
+
14528  {
+
14529  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
+
14530  {
+
14531  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
+
14532  }
+
14533  }
+
14534 }
+
14535 
+
14537 // VmaDefragmentationContext
+
14538 
+
14539 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
+
14540  VmaAllocator hAllocator,
+
14541  uint32_t currFrameIndex,
+
14542  uint32_t flags,
+
14543  VmaDefragmentationStats* pStats) :
+
14544  m_hAllocator(hAllocator),
+
14545  m_CurrFrameIndex(currFrameIndex),
+
14546  m_Flags(flags),
+
14547  m_pStats(pStats),
+
14548  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
+
14549 {
+
14550  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
+
14551 }
+
14552 
+
14553 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
+
14554 {
+
14555  for(size_t i = m_CustomPoolContexts.size(); i--; )
+
14556  {
+
14557  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
+
14558  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
+
14559  vma_delete(m_hAllocator, pBlockVectorCtx);
+
14560  }
+
14561  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
+
14562  {
+
14563  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
+
14564  if(pBlockVectorCtx)
+
14565  {
+
14566  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
+
14567  vma_delete(m_hAllocator, pBlockVectorCtx);
+
14568  }
+
14569  }
+
14570 }
+
14571 
+
14572 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
+
14573 {
+
14574  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+
14575  {
+
14576  VmaPool pool = pPools[poolIndex];
+
14577  VMA_ASSERT(pool);
+
14578  // Pools with algorithm other than default are not defragmented.
+
14579  if(pool->m_BlockVector.GetAlgorithm() == 0)
+
14580  {
+
14581  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
14582 
+
14583  for(size_t i = m_CustomPoolContexts.size(); i--; )
+
14584  {
+
14585  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
+
14586  {
+
14587  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
14588  break;
+
14589  }
+
14590  }
+
14591 
+
14592  if(!pBlockVectorDefragCtx)
+
14593  {
+
14594  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
14595  m_hAllocator,
+
14596  pool,
+
14597  &pool->m_BlockVector,
+
14598  m_CurrFrameIndex);
+
14599  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
14600  }
+
14601 
+
14602  pBlockVectorDefragCtx->AddAll();
+
14603  }
+
14604  }
+
14605 }
+
14606 
+
14607 void VmaDefragmentationContext_T::AddAllocations(
+
14608  uint32_t allocationCount,
+
14609  const VmaAllocation* pAllocations,
+
14610  VkBool32* pAllocationsChanged)
+
14611 {
+
14612  // Dispatch pAllocations among defragmentators. Create them when necessary.
+
14613  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
14614  {
+
14615  const VmaAllocation hAlloc = pAllocations[allocIndex];
+
14616  VMA_ASSERT(hAlloc);
+
14617  // DedicatedAlloc cannot be defragmented.
+
14618  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
+
14619  // Lost allocation cannot be defragmented.
+
14620  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
+
14621  {
+
14622  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
14623 
+
14624  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
+
14625  // This allocation belongs to custom pool.
+
14626  if(hAllocPool != VK_NULL_HANDLE)
14627  {
-
14628  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
-
14629  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
-
14630  if(!pBlockVectorDefragCtx)
-
14631  {
-
14632  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
14633  m_hAllocator,
-
14634  VMA_NULL, // hCustomPool
-
14635  m_hAllocator->m_pBlockVectors[memTypeIndex],
-
14636  m_CurrFrameIndex);
-
14637  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
-
14638  }
-
14639  }
-
14640 
-
14641  if(pBlockVectorDefragCtx)
-
14642  {
-
14643  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
-
14644  &pAllocationsChanged[allocIndex] : VMA_NULL;
-
14645  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
-
14646  }
-
14647  }
-
14648  }
-
14649 }
-
14650 
-
14651 VkResult VmaDefragmentationContext_T::Defragment(
-
14652  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
-
14653  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
-
14654  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
-
14655 {
-
14656  if(pStats)
-
14657  {
-
14658  memset(pStats, 0, sizeof(VmaDefragmentationStats));
-
14659  }
-
14660 
- -
14662  {
-
14663  // For incremental defragmetnations, we just earmark how much we can move
-
14664  // The real meat is in the defragmentation steps
-
14665  m_MaxCpuBytesToMove = maxCpuBytesToMove;
-
14666  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
-
14667 
-
14668  m_MaxGpuBytesToMove = maxGpuBytesToMove;
-
14669  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
-
14670 
-
14671  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
-
14672  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
-
14673  return VK_SUCCESS;
-
14674 
-
14675  return VK_NOT_READY;
-
14676  }
-
14677 
-
14678  if(commandBuffer == VK_NULL_HANDLE)
-
14679  {
-
14680  maxGpuBytesToMove = 0;
-
14681  maxGpuAllocationsToMove = 0;
-
14682  }
-
14683 
-
14684  VkResult res = VK_SUCCESS;
+
14628  // Pools with algorithm other than default are not defragmented.
+
14629  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
+
14630  {
+
14631  for(size_t i = m_CustomPoolContexts.size(); i--; )
+
14632  {
+
14633  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
+
14634  {
+
14635  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
14636  break;
+
14637  }
+
14638  }
+
14639  if(!pBlockVectorDefragCtx)
+
14640  {
+
14641  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
14642  m_hAllocator,
+
14643  hAllocPool,
+
14644  &hAllocPool->m_BlockVector,
+
14645  m_CurrFrameIndex);
+
14646  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
14647  }
+
14648  }
+
14649  }
+
14650  // This allocation belongs to default pool.
+
14651  else
+
14652  {
+
14653  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
+
14654  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
+
14655  if(!pBlockVectorDefragCtx)
+
14656  {
+
14657  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
14658  m_hAllocator,
+
14659  VMA_NULL, // hCustomPool
+
14660  m_hAllocator->m_pBlockVectors[memTypeIndex],
+
14661  m_CurrFrameIndex);
+
14662  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
+
14663  }
+
14664  }
+
14665 
+
14666  if(pBlockVectorDefragCtx)
+
14667  {
+
14668  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
+
14669  &pAllocationsChanged[allocIndex] : VMA_NULL;
+
14670  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
+
14671  }
+
14672  }
+
14673  }
+
14674 }
+
14675 
+
14676 VkResult VmaDefragmentationContext_T::Defragment(
+
14677  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+
14678  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+
14679  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
+
14680 {
+
14681  if(pStats)
+
14682  {
+
14683  memset(pStats, 0, sizeof(VmaDefragmentationStats));
+
14684  }
14685 
-
14686  // Process default pools.
-
14687  for(uint32_t memTypeIndex = 0;
-
14688  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
-
14689  ++memTypeIndex)
-
14690  {
-
14691  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-
14692  if(pBlockVectorCtx)
-
14693  {
-
14694  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
14695  pBlockVectorCtx->GetBlockVector()->Defragment(
-
14696  pBlockVectorCtx,
-
14697  pStats, flags,
-
14698  maxCpuBytesToMove, maxCpuAllocationsToMove,
-
14699  maxGpuBytesToMove, maxGpuAllocationsToMove,
-
14700  commandBuffer);
-
14701  if(pBlockVectorCtx->res != VK_SUCCESS)
-
14702  {
-
14703  res = pBlockVectorCtx->res;
-
14704  }
-
14705  }
-
14706  }
-
14707 
-
14708  // Process custom pools.
-
14709  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-
14710  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
-
14711  ++customCtxIndex)
-
14712  {
-
14713  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-
14714  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
14715  pBlockVectorCtx->GetBlockVector()->Defragment(
-
14716  pBlockVectorCtx,
-
14717  pStats, flags,
-
14718  maxCpuBytesToMove, maxCpuAllocationsToMove,
-
14719  maxGpuBytesToMove, maxGpuAllocationsToMove,
-
14720  commandBuffer);
-
14721  if(pBlockVectorCtx->res != VK_SUCCESS)
-
14722  {
-
14723  res = pBlockVectorCtx->res;
-
14724  }
-
14725  }
-
14726 
-
14727  return res;
-
14728 }
-
14729 
-
14730 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
-
14731 {
-
14732  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
-
14733  uint32_t movesLeft = pInfo->moveCount;
-
14734 
-
14735  // Process default pools.
-
14736  for(uint32_t memTypeIndex = 0;
-
14737  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
-
14738  ++memTypeIndex)
-
14739  {
-
14740  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-
14741  if(pBlockVectorCtx)
-
14742  {
-
14743  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
14744 
-
14745  if(!pBlockVectorCtx->hasDefragmentationPlan)
-
14746  {
-
14747  pBlockVectorCtx->GetBlockVector()->Defragment(
-
14748  pBlockVectorCtx,
-
14749  m_pStats, m_Flags,
-
14750  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
-
14751  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
-
14752  VK_NULL_HANDLE);
-
14753 
-
14754  if(pBlockVectorCtx->res < VK_SUCCESS)
-
14755  continue;
-
14756 
-
14757  pBlockVectorCtx->hasDefragmentationPlan = true;
-
14758  }
+ +
14687  {
+
14688  // For incremental defragmetnations, we just earmark how much we can move
+
14689  // The real meat is in the defragmentation steps
+
14690  m_MaxCpuBytesToMove = maxCpuBytesToMove;
+
14691  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
+
14692 
+
14693  m_MaxGpuBytesToMove = maxGpuBytesToMove;
+
14694  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
+
14695 
+
14696  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
+
14697  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
+
14698  return VK_SUCCESS;
+
14699 
+
14700  return VK_NOT_READY;
+
14701  }
+
14702 
+
14703  if(commandBuffer == VK_NULL_HANDLE)
+
14704  {
+
14705  maxGpuBytesToMove = 0;
+
14706  maxGpuAllocationsToMove = 0;
+
14707  }
+
14708 
+
14709  VkResult res = VK_SUCCESS;
+
14710 
+
14711  // Process default pools.
+
14712  for(uint32_t memTypeIndex = 0;
+
14713  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
+
14714  ++memTypeIndex)
+
14715  {
+
14716  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+
14717  if(pBlockVectorCtx)
+
14718  {
+
14719  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
14720  pBlockVectorCtx->GetBlockVector()->Defragment(
+
14721  pBlockVectorCtx,
+
14722  pStats, flags,
+
14723  maxCpuBytesToMove, maxCpuAllocationsToMove,
+
14724  maxGpuBytesToMove, maxGpuAllocationsToMove,
+
14725  commandBuffer);
+
14726  if(pBlockVectorCtx->res != VK_SUCCESS)
+
14727  {
+
14728  res = pBlockVectorCtx->res;
+
14729  }
+
14730  }
+
14731  }
+
14732 
+
14733  // Process custom pools.
+
14734  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+
14735  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
+
14736  ++customCtxIndex)
+
14737  {
+
14738  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+
14739  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
14740  pBlockVectorCtx->GetBlockVector()->Defragment(
+
14741  pBlockVectorCtx,
+
14742  pStats, flags,
+
14743  maxCpuBytesToMove, maxCpuAllocationsToMove,
+
14744  maxGpuBytesToMove, maxGpuAllocationsToMove,
+
14745  commandBuffer);
+
14746  if(pBlockVectorCtx->res != VK_SUCCESS)
+
14747  {
+
14748  res = pBlockVectorCtx->res;
+
14749  }
+
14750  }
+
14751 
+
14752  return res;
+
14753 }
+
14754 
+
14755 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
+
14756 {
+
14757  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
+
14758  uint32_t movesLeft = pInfo->moveCount;
14759 
-
14760  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
-
14761  pBlockVectorCtx,
-
14762  pCurrentMove, movesLeft);
-
14763 
-
14764  movesLeft -= processed;
-
14765  pCurrentMove += processed;
-
14766  }
-
14767  }
-
14768 
-
14769  // Process custom pools.
-
14770  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-
14771  customCtxIndex < customCtxCount;
-
14772  ++customCtxIndex)
-
14773  {
-
14774  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-
14775  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
14776 
-
14777  if(!pBlockVectorCtx->hasDefragmentationPlan)
-
14778  {
-
14779  pBlockVectorCtx->GetBlockVector()->Defragment(
-
14780  pBlockVectorCtx,
-
14781  m_pStats, m_Flags,
-
14782  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
-
14783  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
-
14784  VK_NULL_HANDLE);
-
14785 
-
14786  if(pBlockVectorCtx->res < VK_SUCCESS)
-
14787  continue;
+
14760  // Process default pools.
+
14761  for(uint32_t memTypeIndex = 0;
+
14762  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+
14763  ++memTypeIndex)
+
14764  {
+
14765  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+
14766  if(pBlockVectorCtx)
+
14767  {
+
14768  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
14769 
+
14770  if(!pBlockVectorCtx->hasDefragmentationPlan)
+
14771  {
+
14772  pBlockVectorCtx->GetBlockVector()->Defragment(
+
14773  pBlockVectorCtx,
+
14774  m_pStats, m_Flags,
+
14775  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+
14776  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+
14777  VK_NULL_HANDLE);
+
14778 
+
14779  if(pBlockVectorCtx->res < VK_SUCCESS)
+
14780  continue;
+
14781 
+
14782  pBlockVectorCtx->hasDefragmentationPlan = true;
+
14783  }
+
14784 
+
14785  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+
14786  pBlockVectorCtx,
+
14787  pCurrentMove, movesLeft);
14788 
-
14789  pBlockVectorCtx->hasDefragmentationPlan = true;
-
14790  }
-
14791 
-
14792  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
-
14793  pBlockVectorCtx,
-
14794  pCurrentMove, movesLeft);
-
14795 
-
14796  movesLeft -= processed;
-
14797  pCurrentMove += processed;
-
14798  }
-
14799 
-
14800  pInfo->moveCount = pInfo->moveCount - movesLeft;
+
14789  movesLeft -= processed;
+
14790  pCurrentMove += processed;
+
14791  }
+
14792  }
+
14793 
+
14794  // Process custom pools.
+
14795  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+
14796  customCtxIndex < customCtxCount;
+
14797  ++customCtxIndex)
+
14798  {
+
14799  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+
14800  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14801 
-
14802  return VK_SUCCESS;
-
14803 }
-
14804 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
-
14805 {
-
14806  VkResult res = VK_SUCCESS;
-
14807 
-
14808  // Process default pools.
-
14809  for(uint32_t memTypeIndex = 0;
-
14810  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
-
14811  ++memTypeIndex)
-
14812  {
-
14813  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-
14814  if(pBlockVectorCtx)
-
14815  {
-
14816  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
14817 
-
14818  if(!pBlockVectorCtx->hasDefragmentationPlan)
-
14819  {
-
14820  res = VK_NOT_READY;
-
14821  continue;
-
14822  }
-
14823 
-
14824  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
-
14825  pBlockVectorCtx, m_pStats);
+
14802  if(!pBlockVectorCtx->hasDefragmentationPlan)
+
14803  {
+
14804  pBlockVectorCtx->GetBlockVector()->Defragment(
+
14805  pBlockVectorCtx,
+
14806  m_pStats, m_Flags,
+
14807  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+
14808  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+
14809  VK_NULL_HANDLE);
+
14810 
+
14811  if(pBlockVectorCtx->res < VK_SUCCESS)
+
14812  continue;
+
14813 
+
14814  pBlockVectorCtx->hasDefragmentationPlan = true;
+
14815  }
+
14816 
+
14817  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+
14818  pBlockVectorCtx,
+
14819  pCurrentMove, movesLeft);
+
14820 
+
14821  movesLeft -= processed;
+
14822  pCurrentMove += processed;
+
14823  }
+
14824 
+
14825  pInfo->moveCount = pInfo->moveCount - movesLeft;
14826 
-
14827  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
-
14828  res = VK_NOT_READY;
-
14829  }
-
14830  }
-
14831 
-
14832  // Process custom pools.
-
14833  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-
14834  customCtxIndex < customCtxCount;
-
14835  ++customCtxIndex)
-
14836  {
-
14837  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-
14838  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
14839 
-
14840  if(!pBlockVectorCtx->hasDefragmentationPlan)
-
14841  {
-
14842  res = VK_NOT_READY;
-
14843  continue;
-
14844  }
-
14845 
-
14846  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
-
14847  pBlockVectorCtx, m_pStats);
+
14827  return VK_SUCCESS;
+
14828 }
+
14829 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
+
14830 {
+
14831  VkResult res = VK_SUCCESS;
+
14832 
+
14833  // Process default pools.
+
14834  for(uint32_t memTypeIndex = 0;
+
14835  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+
14836  ++memTypeIndex)
+
14837  {
+
14838  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+
14839  if(pBlockVectorCtx)
+
14840  {
+
14841  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
14842 
+
14843  if(!pBlockVectorCtx->hasDefragmentationPlan)
+
14844  {
+
14845  res = VK_NOT_READY;
+
14846  continue;
+
14847  }
14848 
-
14849  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
-
14850  res = VK_NOT_READY;
-
14851  }
-
14852 
-
14853  return res;
-
14854 }
-
14855 
-
14857 // VmaRecorder
-
14858 
-
14859 #if VMA_RECORDING_ENABLED
-
14860 
-
14861 VmaRecorder::VmaRecorder() :
-
14862  m_UseMutex(true),
-
14863  m_Flags(0),
-
14864  m_File(VMA_NULL),
-
14865  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
-
14866 {
-
14867 }
-
14868 
-
14869 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
-
14870 {
-
14871  m_UseMutex = useMutex;
-
14872  m_Flags = settings.flags;
+
14849  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+
14850  pBlockVectorCtx, m_pStats);
+
14851 
+
14852  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+
14853  res = VK_NOT_READY;
+
14854  }
+
14855  }
+
14856 
+
14857  // Process custom pools.
+
14858  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+
14859  customCtxIndex < customCtxCount;
+
14860  ++customCtxIndex)
+
14861  {
+
14862  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+
14863  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
14864 
+
14865  if(!pBlockVectorCtx->hasDefragmentationPlan)
+
14866  {
+
14867  res = VK_NOT_READY;
+
14868  continue;
+
14869  }
+
14870 
+
14871  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+
14872  pBlockVectorCtx, m_pStats);
14873 
-
14874 #if defined(_WIN32)
-
14875  // Open file for writing.
-
14876  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
+
14874  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+
14875  res = VK_NOT_READY;
+
14876  }
14877 
-
14878  if(err != 0)
-
14879  {
-
14880  return VK_ERROR_INITIALIZATION_FAILED;
-
14881  }
-
14882 #else
-
14883  // Open file for writing.
-
14884  m_File = fopen(settings.pFilePath, "wb");
+
14878  return res;
+
14879 }
+
14880 
+
14882 // VmaRecorder
+
14883 
+
14884 #if VMA_RECORDING_ENABLED
14885 
-
14886  if(m_File == 0)
-
14887  {
-
14888  return VK_ERROR_INITIALIZATION_FAILED;
-
14889  }
-
14890 #endif
-
14891 
-
14892  // Write header.
-
14893  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
-
14894  fprintf(m_File, "%s\n", "1,8");
-
14895 
-
14896  return VK_SUCCESS;
-
14897 }
+
14886 VmaRecorder::VmaRecorder() :
+
14887  m_UseMutex(true),
+
14888  m_Flags(0),
+
14889  m_File(VMA_NULL),
+
14890  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
+
14891 {
+
14892 }
+
14893 
+
14894 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
+
14895 {
+
14896  m_UseMutex = useMutex;
+
14897  m_Flags = settings.flags;
14898 
-
14899 VmaRecorder::~VmaRecorder()
-
14900 {
-
14901  if(m_File != VMA_NULL)
-
14902  {
-
14903  fclose(m_File);
-
14904  }
-
14905 }
-
14906 
-
14907 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
-
14908 {
-
14909  CallParams callParams;
-
14910  GetBasicParams(callParams);
-
14911 
-
14912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14913  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
14914  Flush();
-
14915 }
+
14899 #if defined(_WIN32)
+
14900  // Open file for writing.
+
14901  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
+
14902 
+
14903  if(err != 0)
+
14904  {
+
14905  return VK_ERROR_INITIALIZATION_FAILED;
+
14906  }
+
14907 #else
+
14908  // Open file for writing.
+
14909  m_File = fopen(settings.pFilePath, "wb");
+
14910 
+
14911  if(m_File == 0)
+
14912  {
+
14913  return VK_ERROR_INITIALIZATION_FAILED;
+
14914  }
+
14915 #endif
14916 
-
14917 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
-
14918 {
-
14919  CallParams callParams;
-
14920  GetBasicParams(callParams);
-
14921 
-
14922  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14923  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
14924  Flush();
-
14925 }
-
14926 
-
14927 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
-
14928 {
-
14929  CallParams callParams;
-
14930  GetBasicParams(callParams);
+
14917  // Write header.
+
14918  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
+
14919  fprintf(m_File, "%s\n", "1,8");
+
14920 
+
14921  return VK_SUCCESS;
+
14922 }
+
14923 
+
14924 VmaRecorder::~VmaRecorder()
+
14925 {
+
14926  if(m_File != VMA_NULL)
+
14927  {
+
14928  fclose(m_File);
+
14929  }
+
14930 }
14931 
-
14932  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14933  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14934  createInfo.memoryTypeIndex,
-
14935  createInfo.flags,
-
14936  createInfo.blockSize,
-
14937  (uint64_t)createInfo.minBlockCount,
-
14938  (uint64_t)createInfo.maxBlockCount,
-
14939  createInfo.frameInUseCount,
-
14940  pool);
-
14941  Flush();
-
14942 }
-
14943 
-
14944 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
-
14945 {
-
14946  CallParams callParams;
-
14947  GetBasicParams(callParams);
-
14948 
-
14949  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14950  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14951  pool);
-
14952  Flush();
-
14953 }
-
14954 
-
14955 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
-
14956  const VkMemoryRequirements& vkMemReq,
-
14957  const VmaAllocationCreateInfo& createInfo,
-
14958  VmaAllocation allocation)
-
14959 {
-
14960  CallParams callParams;
-
14961  GetBasicParams(callParams);
-
14962 
-
14963  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14964  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14965  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14966  vkMemReq.size,
-
14967  vkMemReq.alignment,
-
14968  vkMemReq.memoryTypeBits,
-
14969  createInfo.flags,
-
14970  createInfo.usage,
-
14971  createInfo.requiredFlags,
-
14972  createInfo.preferredFlags,
-
14973  createInfo.memoryTypeBits,
-
14974  createInfo.pool,
-
14975  allocation,
-
14976  userDataStr.GetString());
+
14932 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
+
14933 {
+
14934  CallParams callParams;
+
14935  GetBasicParams(callParams);
+
14936 
+
14937  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14938  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
14939  Flush();
+
14940 }
+
14941 
+
14942 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
+
14943 {
+
14944  CallParams callParams;
+
14945  GetBasicParams(callParams);
+
14946 
+
14947  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14948  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
14949  Flush();
+
14950 }
+
14951 
+
14952 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
+
14953 {
+
14954  CallParams callParams;
+
14955  GetBasicParams(callParams);
+
14956 
+
14957  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14958  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14959  createInfo.memoryTypeIndex,
+
14960  createInfo.flags,
+
14961  createInfo.blockSize,
+
14962  (uint64_t)createInfo.minBlockCount,
+
14963  (uint64_t)createInfo.maxBlockCount,
+
14964  createInfo.frameInUseCount,
+
14965  pool);
+
14966  Flush();
+
14967 }
+
14968 
+
14969 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
+
14970 {
+
14971  CallParams callParams;
+
14972  GetBasicParams(callParams);
+
14973 
+
14974  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14975  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14976  pool);
14977  Flush();
14978 }
14979 
-
14980 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
-
14981  const VkMemoryRequirements& vkMemReq,
-
14982  const VmaAllocationCreateInfo& createInfo,
-
14983  uint64_t allocationCount,
-
14984  const VmaAllocation* pAllocations)
-
14985 {
-
14986  CallParams callParams;
-
14987  GetBasicParams(callParams);
-
14988 
-
14989  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14990  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14991  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
-
14992  vkMemReq.size,
-
14993  vkMemReq.alignment,
-
14994  vkMemReq.memoryTypeBits,
-
14995  createInfo.flags,
-
14996  createInfo.usage,
-
14997  createInfo.requiredFlags,
-
14998  createInfo.preferredFlags,
-
14999  createInfo.memoryTypeBits,
-
15000  createInfo.pool);
-
15001  PrintPointerList(allocationCount, pAllocations);
-
15002  fprintf(m_File, ",%s\n", userDataStr.GetString());
-
15003  Flush();
-
15004 }
-
15005 
-
15006 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
-
15007  const VkMemoryRequirements& vkMemReq,
-
15008  bool requiresDedicatedAllocation,
-
15009  bool prefersDedicatedAllocation,
-
15010  const VmaAllocationCreateInfo& createInfo,
-
15011  VmaAllocation allocation)
-
15012 {
-
15013  CallParams callParams;
-
15014  GetBasicParams(callParams);
-
15015 
-
15016  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15017  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
15018  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15019  vkMemReq.size,
-
15020  vkMemReq.alignment,
-
15021  vkMemReq.memoryTypeBits,
-
15022  requiresDedicatedAllocation ? 1 : 0,
-
15023  prefersDedicatedAllocation ? 1 : 0,
-
15024  createInfo.flags,
-
15025  createInfo.usage,
-
15026  createInfo.requiredFlags,
-
15027  createInfo.preferredFlags,
-
15028  createInfo.memoryTypeBits,
-
15029  createInfo.pool,
-
15030  allocation,
-
15031  userDataStr.GetString());
-
15032  Flush();
-
15033 }
-
15034 
-
15035 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
-
15036  const VkMemoryRequirements& vkMemReq,
-
15037  bool requiresDedicatedAllocation,
-
15038  bool prefersDedicatedAllocation,
-
15039  const VmaAllocationCreateInfo& createInfo,
-
15040  VmaAllocation allocation)
-
15041 {
-
15042  CallParams callParams;
-
15043  GetBasicParams(callParams);
-
15044 
-
15045  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15046  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
15047  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15048  vkMemReq.size,
-
15049  vkMemReq.alignment,
-
15050  vkMemReq.memoryTypeBits,
-
15051  requiresDedicatedAllocation ? 1 : 0,
-
15052  prefersDedicatedAllocation ? 1 : 0,
-
15053  createInfo.flags,
-
15054  createInfo.usage,
-
15055  createInfo.requiredFlags,
-
15056  createInfo.preferredFlags,
-
15057  createInfo.memoryTypeBits,
-
15058  createInfo.pool,
-
15059  allocation,
-
15060  userDataStr.GetString());
-
15061  Flush();
-
15062 }
-
15063 
-
15064 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
+
14980 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
+
14981  const VkMemoryRequirements& vkMemReq,
+
14982  const VmaAllocationCreateInfo& createInfo,
+
14983  VmaAllocation allocation)
+
14984 {
+
14985  CallParams callParams;
+
14986  GetBasicParams(callParams);
+
14987 
+
14988  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14989  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
14990  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14991  vkMemReq.size,
+
14992  vkMemReq.alignment,
+
14993  vkMemReq.memoryTypeBits,
+
14994  createInfo.flags,
+
14995  createInfo.usage,
+
14996  createInfo.requiredFlags,
+
14997  createInfo.preferredFlags,
+
14998  createInfo.memoryTypeBits,
+
14999  createInfo.pool,
+
15000  allocation,
+
15001  userDataStr.GetString());
+
15002  Flush();
+
15003 }
+
15004 
+
15005 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
+
15006  const VkMemoryRequirements& vkMemReq,
+
15007  const VmaAllocationCreateInfo& createInfo,
+
15008  uint64_t allocationCount,
+
15009  const VmaAllocation* pAllocations)
+
15010 {
+
15011  CallParams callParams;
+
15012  GetBasicParams(callParams);
+
15013 
+
15014  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15015  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
15016  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
+
15017  vkMemReq.size,
+
15018  vkMemReq.alignment,
+
15019  vkMemReq.memoryTypeBits,
+
15020  createInfo.flags,
+
15021  createInfo.usage,
+
15022  createInfo.requiredFlags,
+
15023  createInfo.preferredFlags,
+
15024  createInfo.memoryTypeBits,
+
15025  createInfo.pool);
+
15026  PrintPointerList(allocationCount, pAllocations);
+
15027  fprintf(m_File, ",%s\n", userDataStr.GetString());
+
15028  Flush();
+
15029 }
+
15030 
+
15031 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+
15032  const VkMemoryRequirements& vkMemReq,
+
15033  bool requiresDedicatedAllocation,
+
15034  bool prefersDedicatedAllocation,
+
15035  const VmaAllocationCreateInfo& createInfo,
+
15036  VmaAllocation allocation)
+
15037 {
+
15038  CallParams callParams;
+
15039  GetBasicParams(callParams);
+
15040 
+
15041  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15042  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
15043  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15044  vkMemReq.size,
+
15045  vkMemReq.alignment,
+
15046  vkMemReq.memoryTypeBits,
+
15047  requiresDedicatedAllocation ? 1 : 0,
+
15048  prefersDedicatedAllocation ? 1 : 0,
+
15049  createInfo.flags,
+
15050  createInfo.usage,
+
15051  createInfo.requiredFlags,
+
15052  createInfo.preferredFlags,
+
15053  createInfo.memoryTypeBits,
+
15054  createInfo.pool,
+
15055  allocation,
+
15056  userDataStr.GetString());
+
15057  Flush();
+
15058 }
+
15059 
+
15060 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
+
15061  const VkMemoryRequirements& vkMemReq,
+
15062  bool requiresDedicatedAllocation,
+
15063  bool prefersDedicatedAllocation,
+
15064  const VmaAllocationCreateInfo& createInfo,
15065  VmaAllocation allocation)
15066 {
15067  CallParams callParams;
15068  GetBasicParams(callParams);
15069 
15070  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15071  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15072  allocation);
-
15073  Flush();
-
15074 }
-
15075 
-
15076 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
-
15077  uint64_t allocationCount,
-
15078  const VmaAllocation* pAllocations)
-
15079 {
-
15080  CallParams callParams;
-
15081  GetBasicParams(callParams);
-
15082 
-
15083  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15084  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
-
15085  PrintPointerList(allocationCount, pAllocations);
-
15086  fprintf(m_File, "\n");
-
15087  Flush();
-
15088 }
-
15089 
-
15090 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
-
15091  VmaAllocation allocation,
-
15092  const void* pUserData)
-
15093 {
-
15094  CallParams callParams;
-
15095  GetBasicParams(callParams);
-
15096 
-
15097  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15098  UserDataString userDataStr(
-
15099  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
-
15100  pUserData);
-
15101  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15102  allocation,
-
15103  userDataStr.GetString());
-
15104  Flush();
-
15105 }
-
15106 
-
15107 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
-
15108  VmaAllocation allocation)
-
15109 {
-
15110  CallParams callParams;
-
15111  GetBasicParams(callParams);
-
15112 
-
15113  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15114  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15115  allocation);
-
15116  Flush();
-
15117 }
-
15118 
-
15119 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
-
15120  VmaAllocation allocation)
-
15121 {
-
15122  CallParams callParams;
-
15123  GetBasicParams(callParams);
-
15124 
-
15125  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15126  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15127  allocation);
-
15128  Flush();
-
15129 }
-
15130 
-
15131 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
-
15132  VmaAllocation allocation)
-
15133 {
-
15134  CallParams callParams;
-
15135  GetBasicParams(callParams);
-
15136 
-
15137  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15138  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15139  allocation);
-
15140  Flush();
-
15141 }
-
15142 
-
15143 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
-
15144  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
15145 {
-
15146  CallParams callParams;
-
15147  GetBasicParams(callParams);
-
15148 
-
15149  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15150  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
15151  allocation,
-
15152  offset,
-
15153  size);
-
15154  Flush();
-
15155 }
-
15156 
-
15157 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
-
15158  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
15159 {
-
15160  CallParams callParams;
-
15161  GetBasicParams(callParams);
-
15162 
-
15163  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15164  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
15165  allocation,
-
15166  offset,
-
15167  size);
-
15168  Flush();
-
15169 }
-
15170 
-
15171 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
-
15172  const VkBufferCreateInfo& bufCreateInfo,
-
15173  const VmaAllocationCreateInfo& allocCreateInfo,
-
15174  VmaAllocation allocation)
-
15175 {
-
15176  CallParams callParams;
-
15177  GetBasicParams(callParams);
-
15178 
-
15179  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15180  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
-
15181  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15182  bufCreateInfo.flags,
-
15183  bufCreateInfo.size,
-
15184  bufCreateInfo.usage,
-
15185  bufCreateInfo.sharingMode,
-
15186  allocCreateInfo.flags,
-
15187  allocCreateInfo.usage,
-
15188  allocCreateInfo.requiredFlags,
-
15189  allocCreateInfo.preferredFlags,
-
15190  allocCreateInfo.memoryTypeBits,
-
15191  allocCreateInfo.pool,
-
15192  allocation,
-
15193  userDataStr.GetString());
-
15194  Flush();
-
15195 }
-
15196 
-
15197 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
-
15198  const VkImageCreateInfo& imageCreateInfo,
-
15199  const VmaAllocationCreateInfo& allocCreateInfo,
-
15200  VmaAllocation allocation)
-
15201 {
-
15202  CallParams callParams;
-
15203  GetBasicParams(callParams);
-
15204 
-
15205  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15206  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
-
15207  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15208  imageCreateInfo.flags,
-
15209  imageCreateInfo.imageType,
-
15210  imageCreateInfo.format,
-
15211  imageCreateInfo.extent.width,
-
15212  imageCreateInfo.extent.height,
-
15213  imageCreateInfo.extent.depth,
-
15214  imageCreateInfo.mipLevels,
-
15215  imageCreateInfo.arrayLayers,
-
15216  imageCreateInfo.samples,
-
15217  imageCreateInfo.tiling,
-
15218  imageCreateInfo.usage,
-
15219  imageCreateInfo.sharingMode,
-
15220  imageCreateInfo.initialLayout,
-
15221  allocCreateInfo.flags,
-
15222  allocCreateInfo.usage,
-
15223  allocCreateInfo.requiredFlags,
-
15224  allocCreateInfo.preferredFlags,
-
15225  allocCreateInfo.memoryTypeBits,
-
15226  allocCreateInfo.pool,
-
15227  allocation,
-
15228  userDataStr.GetString());
-
15229  Flush();
-
15230 }
-
15231 
-
15232 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
-
15233  VmaAllocation allocation)
-
15234 {
-
15235  CallParams callParams;
-
15236  GetBasicParams(callParams);
-
15237 
-
15238  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15239  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15240  allocation);
-
15241  Flush();
-
15242 }
-
15243 
-
15244 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
-
15245  VmaAllocation allocation)
-
15246 {
-
15247  CallParams callParams;
-
15248  GetBasicParams(callParams);
-
15249 
-
15250  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15251  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15252  allocation);
-
15253  Flush();
-
15254 }
-
15255 
-
15256 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
-
15257  VmaAllocation allocation)
-
15258 {
-
15259  CallParams callParams;
-
15260  GetBasicParams(callParams);
-
15261 
-
15262  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15263  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15264  allocation);
-
15265  Flush();
-
15266 }
-
15267 
-
15268 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
-
15269  VmaAllocation allocation)
-
15270 {
-
15271  CallParams callParams;
-
15272  GetBasicParams(callParams);
-
15273 
-
15274  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15275  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15276  allocation);
-
15277  Flush();
-
15278 }
-
15279 
-
15280 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
-
15281  VmaPool pool)
-
15282 {
-
15283  CallParams callParams;
-
15284  GetBasicParams(callParams);
-
15285 
-
15286  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15287  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15288  pool);
-
15289  Flush();
-
15290 }
-
15291 
-
15292 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
-
15293  const VmaDefragmentationInfo2& info,
- +
15071  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
15072  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15073  vkMemReq.size,
+
15074  vkMemReq.alignment,
+
15075  vkMemReq.memoryTypeBits,
+
15076  requiresDedicatedAllocation ? 1 : 0,
+
15077  prefersDedicatedAllocation ? 1 : 0,
+
15078  createInfo.flags,
+
15079  createInfo.usage,
+
15080  createInfo.requiredFlags,
+
15081  createInfo.preferredFlags,
+
15082  createInfo.memoryTypeBits,
+
15083  createInfo.pool,
+
15084  allocation,
+
15085  userDataStr.GetString());
+
15086  Flush();
+
15087 }
+
15088 
+
15089 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
+
15090  VmaAllocation allocation)
+
15091 {
+
15092  CallParams callParams;
+
15093  GetBasicParams(callParams);
+
15094 
+
15095  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15096  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15097  allocation);
+
15098  Flush();
+
15099 }
+
15100 
+
15101 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
+
15102  uint64_t allocationCount,
+
15103  const VmaAllocation* pAllocations)
+
15104 {
+
15105  CallParams callParams;
+
15106  GetBasicParams(callParams);
+
15107 
+
15108  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15109  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
+
15110  PrintPointerList(allocationCount, pAllocations);
+
15111  fprintf(m_File, "\n");
+
15112  Flush();
+
15113 }
+
15114 
+
15115 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
+
15116  VmaAllocation allocation,
+
15117  const void* pUserData)
+
15118 {
+
15119  CallParams callParams;
+
15120  GetBasicParams(callParams);
+
15121 
+
15122  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15123  UserDataString userDataStr(
+
15124  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
+
15125  pUserData);
+
15126  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15127  allocation,
+
15128  userDataStr.GetString());
+
15129  Flush();
+
15130 }
+
15131 
+
15132 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
+
15133  VmaAllocation allocation)
+
15134 {
+
15135  CallParams callParams;
+
15136  GetBasicParams(callParams);
+
15137 
+
15138  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15139  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15140  allocation);
+
15141  Flush();
+
15142 }
+
15143 
+
15144 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
+
15145  VmaAllocation allocation)
+
15146 {
+
15147  CallParams callParams;
+
15148  GetBasicParams(callParams);
+
15149 
+
15150  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15151  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15152  allocation);
+
15153  Flush();
+
15154 }
+
15155 
+
15156 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
+
15157  VmaAllocation allocation)
+
15158 {
+
15159  CallParams callParams;
+
15160  GetBasicParams(callParams);
+
15161 
+
15162  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15163  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15164  allocation);
+
15165  Flush();
+
15166 }
+
15167 
+
15168 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
+
15169  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
15170 {
+
15171  CallParams callParams;
+
15172  GetBasicParams(callParams);
+
15173 
+
15174  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15175  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
15176  allocation,
+
15177  offset,
+
15178  size);
+
15179  Flush();
+
15180 }
+
15181 
+
15182 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
+
15183  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
15184 {
+
15185  CallParams callParams;
+
15186  GetBasicParams(callParams);
+
15187 
+
15188  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15189  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
15190  allocation,
+
15191  offset,
+
15192  size);
+
15193  Flush();
+
15194 }
+
15195 
+
15196 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
+
15197  const VkBufferCreateInfo& bufCreateInfo,
+
15198  const VmaAllocationCreateInfo& allocCreateInfo,
+
15199  VmaAllocation allocation)
+
15200 {
+
15201  CallParams callParams;
+
15202  GetBasicParams(callParams);
+
15203 
+
15204  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15205  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+
15206  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15207  bufCreateInfo.flags,
+
15208  bufCreateInfo.size,
+
15209  bufCreateInfo.usage,
+
15210  bufCreateInfo.sharingMode,
+
15211  allocCreateInfo.flags,
+
15212  allocCreateInfo.usage,
+
15213  allocCreateInfo.requiredFlags,
+
15214  allocCreateInfo.preferredFlags,
+
15215  allocCreateInfo.memoryTypeBits,
+
15216  allocCreateInfo.pool,
+
15217  allocation,
+
15218  userDataStr.GetString());
+
15219  Flush();
+
15220 }
+
15221 
+
15222 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
+
15223  const VkImageCreateInfo& imageCreateInfo,
+
15224  const VmaAllocationCreateInfo& allocCreateInfo,
+
15225  VmaAllocation allocation)
+
15226 {
+
15227  CallParams callParams;
+
15228  GetBasicParams(callParams);
+
15229 
+
15230  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15231  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+
15232  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15233  imageCreateInfo.flags,
+
15234  imageCreateInfo.imageType,
+
15235  imageCreateInfo.format,
+
15236  imageCreateInfo.extent.width,
+
15237  imageCreateInfo.extent.height,
+
15238  imageCreateInfo.extent.depth,
+
15239  imageCreateInfo.mipLevels,
+
15240  imageCreateInfo.arrayLayers,
+
15241  imageCreateInfo.samples,
+
15242  imageCreateInfo.tiling,
+
15243  imageCreateInfo.usage,
+
15244  imageCreateInfo.sharingMode,
+
15245  imageCreateInfo.initialLayout,
+
15246  allocCreateInfo.flags,
+
15247  allocCreateInfo.usage,
+
15248  allocCreateInfo.requiredFlags,
+
15249  allocCreateInfo.preferredFlags,
+
15250  allocCreateInfo.memoryTypeBits,
+
15251  allocCreateInfo.pool,
+
15252  allocation,
+
15253  userDataStr.GetString());
+
15254  Flush();
+
15255 }
+
15256 
+
15257 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
+
15258  VmaAllocation allocation)
+
15259 {
+
15260  CallParams callParams;
+
15261  GetBasicParams(callParams);
+
15262 
+
15263  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15264  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15265  allocation);
+
15266  Flush();
+
15267 }
+
15268 
+
15269 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
+
15270  VmaAllocation allocation)
+
15271 {
+
15272  CallParams callParams;
+
15273  GetBasicParams(callParams);
+
15274 
+
15275  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15276  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15277  allocation);
+
15278  Flush();
+
15279 }
+
15280 
+
15281 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
+
15282  VmaAllocation allocation)
+
15283 {
+
15284  CallParams callParams;
+
15285  GetBasicParams(callParams);
+
15286 
+
15287  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15288  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15289  allocation);
+
15290  Flush();
+
15291 }
+
15292 
+
15293 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
+
15294  VmaAllocation allocation)
15295 {
15296  CallParams callParams;
15297  GetBasicParams(callParams);
15298 
15299  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15300  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
-
15301  info.flags);
-
15302  PrintPointerList(info.allocationCount, info.pAllocations);
-
15303  fprintf(m_File, ",");
-
15304  PrintPointerList(info.poolCount, info.pPools);
-
15305  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
-
15306  info.maxCpuBytesToMove,
- -
15308  info.maxGpuBytesToMove,
- -
15310  info.commandBuffer,
-
15311  ctx);
-
15312  Flush();
-
15313 }
-
15314 
-
15315 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
- -
15317 {
-
15318  CallParams callParams;
-
15319  GetBasicParams(callParams);
-
15320 
-
15321  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15322  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
-
15323  ctx);
-
15324  Flush();
-
15325 }
-
15326 
-
15327 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
-
15328  VmaPool pool,
-
15329  const char* name)
-
15330 {
-
15331  CallParams callParams;
-
15332  GetBasicParams(callParams);
-
15333 
-
15334  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
15335  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
15336  pool, name != VMA_NULL ? name : "");
+
15300  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15301  allocation);
+
15302  Flush();
+
15303 }
+
15304 
+
15305 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
+
15306  VmaPool pool)
+
15307 {
+
15308  CallParams callParams;
+
15309  GetBasicParams(callParams);
+
15310 
+
15311  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15312  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15313  pool);
+
15314  Flush();
+
15315 }
+
15316 
+
15317 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
+
15318  const VmaDefragmentationInfo2& info,
+ +
15320 {
+
15321  CallParams callParams;
+
15322  GetBasicParams(callParams);
+
15323 
+
15324  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15325  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
+
15326  info.flags);
+
15327  PrintPointerList(info.allocationCount, info.pAllocations);
+
15328  fprintf(m_File, ",");
+
15329  PrintPointerList(info.poolCount, info.pPools);
+
15330  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
+
15331  info.maxCpuBytesToMove,
+ +
15333  info.maxGpuBytesToMove,
+ +
15335  info.commandBuffer,
+
15336  ctx);
15337  Flush();
15338 }
15339 
-
15340 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
-
15341 {
-
15342  if(pUserData != VMA_NULL)
-
15343  {
-
15344  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
-
15345  {
-
15346  m_Str = (const char*)pUserData;
-
15347  }
-
15348  else
-
15349  {
-
15350  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
-
15351  snprintf(m_PtrStr, 17, "%p", pUserData);
-
15352  m_Str = m_PtrStr;
-
15353  }
-
15354  }
-
15355  else
-
15356  {
-
15357  m_Str = "";
-
15358  }
-
15359 }
-
15360 
-
15361 void VmaRecorder::WriteConfiguration(
-
15362  const VkPhysicalDeviceProperties& devProps,
-
15363  const VkPhysicalDeviceMemoryProperties& memProps,
-
15364  uint32_t vulkanApiVersion,
-
15365  bool dedicatedAllocationExtensionEnabled,
-
15366  bool bindMemory2ExtensionEnabled,
-
15367  bool memoryBudgetExtensionEnabled,
-
15368  bool deviceCoherentMemoryExtensionEnabled)
-
15369 {
-
15370  fprintf(m_File, "Config,Begin\n");
-
15371 
-
15372  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
-
15373 
-
15374  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
-
15375  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
-
15376  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
-
15377  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
-
15378  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
-
15379  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
-
15380 
-
15381  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
-
15382  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
-
15383  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
-
15384 
-
15385  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
-
15386  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
-
15387  {
-
15388  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
-
15389  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
-
15390  }
-
15391  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
-
15392  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
-
15393  {
-
15394  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
-
15395  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
-
15396  }
-
15397 
-
15398  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
-
15399  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
-
15400  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
-
15401  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
-
15402 
-
15403  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
-
15404  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
-
15405  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
-
15406  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
-
15407  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
-
15408  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
-
15409  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
-
15410  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
-
15411  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-
15412 
-
15413  fprintf(m_File, "Config,End\n");
-
15414 }
-
15415 
-
15416 void VmaRecorder::GetBasicParams(CallParams& outParams)
-
15417 {
-
15418  #if defined(_WIN32)
-
15419  outParams.threadId = GetCurrentThreadId();
-
15420  #else
-
15421  // Use C++11 features to get thread id and convert it to uint32_t.
-
15422  // There is room for optimization since sstream is quite slow.
-
15423  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
-
15424  std::thread::id thread_id = std::this_thread::get_id();
-
15425  stringstream thread_id_to_string_converter;
-
15426  thread_id_to_string_converter << thread_id;
-
15427  string thread_id_as_string = thread_id_to_string_converter.str();
-
15428  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
-
15429  #endif
-
15430 
-
15431  auto current_time = std::chrono::high_resolution_clock::now();
-
15432 
-
15433  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
-
15434 }
-
15435 
-
15436 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
-
15437 {
-
15438  if(count)
-
15439  {
-
15440  fprintf(m_File, "%p", pItems[0]);
-
15441  for(uint64_t i = 1; i < count; ++i)
-
15442  {
-
15443  fprintf(m_File, " %p", pItems[i]);
-
15444  }
-
15445  }
-
15446 }
-
15447 
-
15448 void VmaRecorder::Flush()
-
15449 {
-
15450  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
-
15451  {
-
15452  fflush(m_File);
-
15453  }
-
15454 }
-
15455 
-
15456 #endif // #if VMA_RECORDING_ENABLED
+
15340 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
+ +
15342 {
+
15343  CallParams callParams;
+
15344  GetBasicParams(callParams);
+
15345 
+
15346  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15347  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
+
15348  ctx);
+
15349  Flush();
+
15350 }
+
15351 
+
15352 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
+
15353  VmaPool pool,
+
15354  const char* name)
+
15355 {
+
15356  CallParams callParams;
+
15357  GetBasicParams(callParams);
+
15358 
+
15359  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
15360  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
15361  pool, name != VMA_NULL ? name : "");
+
15362  Flush();
+
15363 }
+
15364 
+
15365 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
+
15366 {
+
15367  if(pUserData != VMA_NULL)
+
15368  {
+
15369  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
+
15370  {
+
15371  m_Str = (const char*)pUserData;
+
15372  }
+
15373  else
+
15374  {
+
15375  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
+
15376  snprintf(m_PtrStr, 17, "%p", pUserData);
+
15377  m_Str = m_PtrStr;
+
15378  }
+
15379  }
+
15380  else
+
15381  {
+
15382  m_Str = "";
+
15383  }
+
15384 }
+
15385 
+
15386 void VmaRecorder::WriteConfiguration(
+
15387  const VkPhysicalDeviceProperties& devProps,
+
15388  const VkPhysicalDeviceMemoryProperties& memProps,
+
15389  uint32_t vulkanApiVersion,
+
15390  bool dedicatedAllocationExtensionEnabled,
+
15391  bool bindMemory2ExtensionEnabled,
+
15392  bool memoryBudgetExtensionEnabled,
+
15393  bool deviceCoherentMemoryExtensionEnabled)
+
15394 {
+
15395  fprintf(m_File, "Config,Begin\n");
+
15396 
+
15397  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
+
15398 
+
15399  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
+
15400  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
+
15401  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
+
15402  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
+
15403  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
+
15404  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
+
15405 
+
15406  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
+
15407  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
+
15408  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
+
15409 
+
15410  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
+
15411  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
+
15412  {
+
15413  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
+
15414  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
+
15415  }
+
15416  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
+
15417  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
+
15418  {
+
15419  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
+
15420  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
+
15421  }
+
15422 
+
15423  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
+
15424  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
+
15425  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
+
15426  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
+
15427 
+
15428  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
+
15429  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
+
15430  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
+
15431  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
+
15432  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
+
15433  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
+
15434  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
+
15435  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
+
15436  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
15437 
+
15438  fprintf(m_File, "Config,End\n");
+
15439 }
+
15440 
+
15441 void VmaRecorder::GetBasicParams(CallParams& outParams)
+
15442 {
+
15443  #if defined(_WIN32)
+
15444  outParams.threadId = GetCurrentThreadId();
+
15445  #else
+
15446  // Use C++11 features to get thread id and convert it to uint32_t.
+
15447  // There is room for optimization since sstream is quite slow.
+
15448  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
+
15449  std::thread::id thread_id = std::this_thread::get_id();
+
15450  stringstream thread_id_to_string_converter;
+
15451  thread_id_to_string_converter << thread_id;
+
15452  string thread_id_as_string = thread_id_to_string_converter.str();
+
15453  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
+
15454  #endif
+
15455 
+
15456  auto current_time = std::chrono::high_resolution_clock::now();
15457 
-
15459 // VmaAllocationObjectAllocator
+
15458  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
+
15459 }
15460 
-
15461 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
-
15462  m_Allocator(pAllocationCallbacks, 1024)
-
15463 {
-
15464 }
-
15465 
-
15466 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
-
15467 {
-
15468  VmaMutexLock mutexLock(m_Mutex);
-
15469  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
-
15470 }
-
15471 
-
15472 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
-
15473 {
-
15474  VmaMutexLock mutexLock(m_Mutex);
-
15475  m_Allocator.Free(hAlloc);
-
15476 }
-
15477 
-
15479 // VmaAllocator_T
+
15461 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
+
15462 {
+
15463  if(count)
+
15464  {
+
15465  fprintf(m_File, "%p", pItems[0]);
+
15466  for(uint64_t i = 1; i < count; ++i)
+
15467  {
+
15468  fprintf(m_File, " %p", pItems[i]);
+
15469  }
+
15470  }
+
15471 }
+
15472 
+
15473 void VmaRecorder::Flush()
+
15474 {
+
15475  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
+
15476  {
+
15477  fflush(m_File);
+
15478  }
+
15479 }
15480 
-
15481 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
-
15482  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
-
15483  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
-
15484  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
-
15485  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
-
15486  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
-
15487  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
-
15488  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
-
15489  m_hDevice(pCreateInfo->device),
-
15490  m_hInstance(pCreateInfo->instance),
-
15491  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
-
15492  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
-
15493  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
-
15494  m_AllocationObjectAllocator(&m_AllocationCallbacks),
-
15495  m_HeapSizeLimitMask(0),
-
15496  m_PreferredLargeHeapBlockSize(0),
-
15497  m_PhysicalDevice(pCreateInfo->physicalDevice),
-
15498  m_CurrentFrameIndex(0),
-
15499  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
-
15500  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
-
15501  m_NextPoolId(0),
-
15502  m_GlobalMemoryTypeBits(UINT32_MAX)
- -
15504  ,m_pRecorder(VMA_NULL)
-
15505 #endif
-
15506 {
-
15507  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15508  {
-
15509  m_UseKhrDedicatedAllocation = false;
-
15510  m_UseKhrBindMemory2 = false;
-
15511  }
-
15512 
-
15513  if(VMA_DEBUG_DETECT_CORRUPTION)
-
15514  {
-
15515  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
-
15516  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
-
15517  }
-
15518 
-
15519  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
-
15520 
-
15521  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-
15522  {
-
15523 #if !(VMA_DEDICATED_ALLOCATION)
- -
15525  {
-
15526  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
-
15527  }
-
15528 #endif
-
15529 #if !(VMA_BIND_MEMORY2)
-
15530  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
-
15531  {
-
15532  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
-
15533  }
-
15534 #endif
-
15535  }
-
15536 #if !(VMA_MEMORY_BUDGET)
-
15537  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
-
15538  {
-
15539  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
-
15540  }
-
15541 #endif
-
15542 #if !(VMA_BUFFER_DEVICE_ADDRESS)
-
15543  if(m_UseKhrBufferDeviceAddress)
-
15544  {
-
15545  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-
15546  }
-
15547 #endif
-
15548 #if VMA_VULKAN_VERSION < 1002000
-
15549  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
-
15550  {
-
15551  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
-
15552  }
+
15481 #endif // #if VMA_RECORDING_ENABLED
+
15482 
+
15484 // VmaAllocationObjectAllocator
+
15485 
+
15486 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
+
15487  m_Allocator(pAllocationCallbacks, 1024)
+
15488 {
+
15489 }
+
15490 
+
15491 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
+
15492 {
+
15493  VmaMutexLock mutexLock(m_Mutex);
+
15494  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
+
15495 }
+
15496 
+
15497 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
+
15498 {
+
15499  VmaMutexLock mutexLock(m_Mutex);
+
15500  m_Allocator.Free(hAlloc);
+
15501 }
+
15502 
+
15504 // VmaAllocator_T
+
15505 
+
15506 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
+
15507  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
+
15508  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
+
15509  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
+
15510  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
+
15511  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
+
15512  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
+
15513  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
+
15514  m_hDevice(pCreateInfo->device),
+
15515  m_hInstance(pCreateInfo->instance),
+
15516  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
+
15517  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
+
15518  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
+
15519  m_AllocationObjectAllocator(&m_AllocationCallbacks),
+
15520  m_HeapSizeLimitMask(0),
+
15521  m_PreferredLargeHeapBlockSize(0),
+
15522  m_PhysicalDevice(pCreateInfo->physicalDevice),
+
15523  m_CurrentFrameIndex(0),
+
15524  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
+
15525  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
+
15526  m_NextPoolId(0),
+
15527  m_GlobalMemoryTypeBits(UINT32_MAX)
+ +
15529  ,m_pRecorder(VMA_NULL)
+
15530 #endif
+
15531 {
+
15532  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15533  {
+
15534  m_UseKhrDedicatedAllocation = false;
+
15535  m_UseKhrBindMemory2 = false;
+
15536  }
+
15537 
+
15538  if(VMA_DEBUG_DETECT_CORRUPTION)
+
15539  {
+
15540  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
+
15541  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
+
15542  }
+
15543 
+
15544  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
+
15545 
+
15546  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+
15547  {
+
15548 #if !(VMA_DEDICATED_ALLOCATION)
+ +
15550  {
+
15551  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
+
15552  }
15553 #endif
-
15554 #if VMA_VULKAN_VERSION < 1001000
-
15555  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15556  {
-
15557  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
-
15558  }
+
15554 #if !(VMA_BIND_MEMORY2)
+
15555  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
+
15556  {
+
15557  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
+
15558  }
15559 #endif
-
15560 
-
15561  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
-
15562  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
-
15563  memset(&m_MemProps, 0, sizeof(m_MemProps));
-
15564 
-
15565  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
-
15566  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
-
15567  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
-
15568 
-
15569  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
-
15570  {
-
15571  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
-
15572  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
-
15573  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
-
15574  }
-
15575 
-
15576  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
-
15577 
-
15578  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
-
15579  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
-
15580 
-
15581  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
-
15582  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
-
15583  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
-
15584  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
+
15560  }
+
15561 #if !(VMA_MEMORY_BUDGET)
+
15562  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
+
15563  {
+
15564  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
+
15565  }
+
15566 #endif
+
15567 #if !(VMA_BUFFER_DEVICE_ADDRESS)
+
15568  if(m_UseKhrBufferDeviceAddress)
+
15569  {
+
15570  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+
15571  }
+
15572 #endif
+
15573 #if VMA_VULKAN_VERSION < 1002000
+
15574  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
+
15575  {
+
15576  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
+
15577  }
+
15578 #endif
+
15579 #if VMA_VULKAN_VERSION < 1001000
+
15580  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15581  {
+
15582  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
+
15583  }
+
15584 #endif
15585 
-
15586  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
-
15587  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-
15588 
-
15589  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
-
15590 
-
15591  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
-
15592  {
-
15593  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
15594  {
-
15595  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
-
15596  if(limit != VK_WHOLE_SIZE)
-
15597  {
-
15598  m_HeapSizeLimitMask |= 1u << heapIndex;
-
15599  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
-
15600  {
-
15601  m_MemProps.memoryHeaps[heapIndex].size = limit;
-
15602  }
-
15603  }
-
15604  }
-
15605  }
-
15606 
-
15607  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
15608  {
-
15609  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+
15586  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
+
15587  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
+
15588  memset(&m_MemProps, 0, sizeof(m_MemProps));
+
15589 
+
15590  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
+
15591  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
+
15592  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
+
15593 
+
15594  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
+
15595  {
+
15596  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
+
15597  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
+
15598  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
+
15599  }
+
15600 
+
15601  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
+
15602 
+
15603  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
+
15604  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+
15605 
+
15606  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
+
15607  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
+
15608  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
+
15609  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15610 
-
15611  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
-
15612  this,
-
15613  VK_NULL_HANDLE, // hParentPool
-
15614  memTypeIndex,
-
15615  preferredBlockSize,
-
15616  0,
-
15617  SIZE_MAX,
-
15618  GetBufferImageGranularity(),
-
15619  pCreateInfo->frameInUseCount,
-
15620  false, // explicitBlockSize
-
15621  false); // linearAlgorithm
-
15622  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
-
15623  // becase minBlockCount is 0.
-
15624  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
-
15625 
-
15626  }
-
15627 }
-
15628 
-
15629 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
-
15630 {
-
15631  VkResult res = VK_SUCCESS;
-
15632 
-
15633  if(pCreateInfo->pRecordSettings != VMA_NULL &&
-
15634  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
-
15635  {
-
15636 #if VMA_RECORDING_ENABLED
-
15637  m_pRecorder = vma_new(this, VmaRecorder)();
-
15638  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
-
15639  if(res != VK_SUCCESS)
-
15640  {
-
15641  return res;
-
15642  }
-
15643  m_pRecorder->WriteConfiguration(
-
15644  m_PhysicalDeviceProperties,
-
15645  m_MemProps,
-
15646  m_VulkanApiVersion,
-
15647  m_UseKhrDedicatedAllocation,
-
15648  m_UseKhrBindMemory2,
-
15649  m_UseExtMemoryBudget,
-
15650  m_UseAmdDeviceCoherentMemory);
-
15651  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
-
15652 #else
-
15653  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
-
15654  return VK_ERROR_FEATURE_NOT_PRESENT;
-
15655 #endif
-
15656  }
+
15611  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
+
15612  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
15613 
+
15614  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
+
15615 
+
15616  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
+
15617  {
+
15618  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
15619  {
+
15620  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
+
15621  if(limit != VK_WHOLE_SIZE)
+
15622  {
+
15623  m_HeapSizeLimitMask |= 1u << heapIndex;
+
15624  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
+
15625  {
+
15626  m_MemProps.memoryHeaps[heapIndex].size = limit;
+
15627  }
+
15628  }
+
15629  }
+
15630  }
+
15631 
+
15632  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
15633  {
+
15634  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+
15635 
+
15636  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
+
15637  this,
+
15638  VK_NULL_HANDLE, // hParentPool
+
15639  memTypeIndex,
+
15640  preferredBlockSize,
+
15641  0,
+
15642  SIZE_MAX,
+
15643  GetBufferImageGranularity(),
+
15644  pCreateInfo->frameInUseCount,
+
15645  false, // explicitBlockSize
+
15646  false); // linearAlgorithm
+
15647  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
+
15648  // becase minBlockCount is 0.
+
15649  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
+
15650 
+
15651  }
+
15652 }
+
15653 
+
15654 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
+
15655 {
+
15656  VkResult res = VK_SUCCESS;
15657 
-
15658 #if VMA_MEMORY_BUDGET
-
15659  if(m_UseExtMemoryBudget)
+
15658  if(pCreateInfo->pRecordSettings != VMA_NULL &&
+
15659  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15660  {
-
15661  UpdateVulkanBudget();
-
15662  }
-
15663 #endif // #if VMA_MEMORY_BUDGET
-
15664 
-
15665  return res;
-
15666 }
-
15667 
-
15668 VmaAllocator_T::~VmaAllocator_T()
-
15669 {
-
15670 #if VMA_RECORDING_ENABLED
-
15671  if(m_pRecorder != VMA_NULL)
-
15672  {
-
15673  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
-
15674  vma_delete(this, m_pRecorder);
-
15675  }
-
15676 #endif
-
15677 
-
15678  VMA_ASSERT(m_Pools.empty());
-
15679 
-
15680  for(size_t i = GetMemoryTypeCount(); i--; )
-
15681  {
-
15682  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
-
15683  {
-
15684  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
-
15685  }
-
15686 
-
15687  vma_delete(this, m_pDedicatedAllocations[i]);
-
15688  vma_delete(this, m_pBlockVectors[i]);
-
15689  }
-
15690 }
-
15691 
-
15692 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
-
15693 {
-
15694 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
15695  ImportVulkanFunctions_Static();
-
15696 #endif
-
15697 
-
15698  if(pVulkanFunctions != VMA_NULL)
-
15699  {
-
15700  ImportVulkanFunctions_Custom(pVulkanFunctions);
-
15701  }
-
15702 
-
15703 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-
15704  ImportVulkanFunctions_Dynamic();
-
15705 #endif
-
15706 
-
15707  ValidateVulkanFunctions();
-
15708 }
-
15709 
-
15710 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
15661 #if VMA_RECORDING_ENABLED
+
15662  m_pRecorder = vma_new(this, VmaRecorder)();
+
15663  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
+
15664  if(res != VK_SUCCESS)
+
15665  {
+
15666  return res;
+
15667  }
+
15668  m_pRecorder->WriteConfiguration(
+
15669  m_PhysicalDeviceProperties,
+
15670  m_MemProps,
+
15671  m_VulkanApiVersion,
+
15672  m_UseKhrDedicatedAllocation,
+
15673  m_UseKhrBindMemory2,
+
15674  m_UseExtMemoryBudget,
+
15675  m_UseAmdDeviceCoherentMemory);
+
15676  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
+
15677 #else
+
15678  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
+
15679  return VK_ERROR_FEATURE_NOT_PRESENT;
+
15680 #endif
+
15681  }
+
15682 
+
15683 #if VMA_MEMORY_BUDGET
+
15684  if(m_UseExtMemoryBudget)
+
15685  {
+
15686  UpdateVulkanBudget();
+
15687  }
+
15688 #endif // #if VMA_MEMORY_BUDGET
+
15689 
+
15690  return res;
+
15691 }
+
15692 
+
15693 VmaAllocator_T::~VmaAllocator_T()
+
15694 {
+
15695 #if VMA_RECORDING_ENABLED
+
15696  if(m_pRecorder != VMA_NULL)
+
15697  {
+
15698  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
+
15699  vma_delete(this, m_pRecorder);
+
15700  }
+
15701 #endif
+
15702 
+
15703  VMA_ASSERT(m_Pools.empty());
+
15704 
+
15705  for(size_t i = GetMemoryTypeCount(); i--; )
+
15706  {
+
15707  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
+
15708  {
+
15709  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
+
15710  }
15711 
-
15712 void VmaAllocator_T::ImportVulkanFunctions_Static()
-
15713 {
-
15714  // Vulkan 1.0
-
15715  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
-
15716  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
-
15717  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-
15718  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
-
15719  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
-
15720  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
-
15721  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
-
15722  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
-
15723  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
-
15724  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
-
15725  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
-
15726  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
-
15727  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
-
15728  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
-
15729  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
-
15730  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
-
15731  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
-
15732 
-
15733  // Vulkan 1.1
-
15734 #if VMA_VULKAN_VERSION >= 1001000
-
15735  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15736  {
-
15737  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
-
15738  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
-
15739  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
-
15740  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
-
15741  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
-
15742  }
-
15743 #endif
-
15744 }
-
15745 
-
15746 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
15747 
-
15748 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
-
15749 {
-
15750  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
-
15751 
-
15752 #define VMA_COPY_IF_NOT_NULL(funcName) \
-
15753  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
-
15754 
-
15755  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
-
15756  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
-
15757  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
-
15758  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
-
15759  VMA_COPY_IF_NOT_NULL(vkMapMemory);
-
15760  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
-
15761  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
-
15762  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
-
15763  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
-
15764  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
-
15765  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
-
15766  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
-
15767  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
-
15768  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
-
15769  VMA_COPY_IF_NOT_NULL(vkCreateImage);
-
15770  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
-
15771  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+
15712  vma_delete(this, m_pDedicatedAllocations[i]);
+
15713  vma_delete(this, m_pBlockVectors[i]);
+
15714  }
+
15715 }
+
15716 
+
15717 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
+
15718 {
+
15719 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
15720  ImportVulkanFunctions_Static();
+
15721 #endif
+
15722 
+
15723  if(pVulkanFunctions != VMA_NULL)
+
15724  {
+
15725  ImportVulkanFunctions_Custom(pVulkanFunctions);
+
15726  }
+
15727 
+
15728 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+
15729  ImportVulkanFunctions_Dynamic();
+
15730 #endif
+
15731 
+
15732  ValidateVulkanFunctions();
+
15733 }
+
15734 
+
15735 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
15736 
+
15737 void VmaAllocator_T::ImportVulkanFunctions_Static()
+
15738 {
+
15739  // Vulkan 1.0
+
15740  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
+
15741  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
+
15742  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+
15743  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
+
15744  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
+
15745  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
+
15746  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
+
15747  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
+
15748  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
+
15749  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
+
15750  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
+
15751  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
+
15752  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
+
15753  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
+
15754  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
+
15755  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
+
15756  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+
15757 
+
15758  // Vulkan 1.1
+
15759 #if VMA_VULKAN_VERSION >= 1001000
+
15760  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15761  {
+
15762  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
+
15763  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
+
15764  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
+
15765  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
+
15766  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
+
15767  }
+
15768 #endif
+
15769 }
+
15770 
+
15771 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15772 
-
15773 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15774  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
-
15775  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
-
15776 #endif
-
15777 
-
15778 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-
15779  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
-
15780  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
-
15781 #endif
-
15782 
-
15783 #if VMA_MEMORY_BUDGET
-
15784  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
-
15785 #endif
-
15786 
-
15787 #undef VMA_COPY_IF_NOT_NULL
-
15788 }
-
15789 
-
15790 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-
15791 
-
15792 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
-
15793 {
-
15794 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
-
15795  if(m_VulkanFunctions.memberName == VMA_NULL) \
-
15796  m_VulkanFunctions.memberName = \
-
15797  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
-
15798 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
-
15799  if(m_VulkanFunctions.memberName == VMA_NULL) \
-
15800  m_VulkanFunctions.memberName = \
-
15801  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
+
15773 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
+
15774 {
+
15775  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
+
15776 
+
15777 #define VMA_COPY_IF_NOT_NULL(funcName) \
+
15778  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
+
15779 
+
15780  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+
15781  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+
15782  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+
15783  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+
15784  VMA_COPY_IF_NOT_NULL(vkMapMemory);
+
15785  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+
15786  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+
15787  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+
15788  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+
15789  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+
15790  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+
15791  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+
15792  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+
15793  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+
15794  VMA_COPY_IF_NOT_NULL(vkCreateImage);
+
15795  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+
15796  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+
15797 
+
15798 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15799  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+
15800  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+
15801 #endif
15802 
-
15803  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
-
15804  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
-
15805  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
-
15806  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
-
15807  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
-
15808  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
-
15809  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
-
15810  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
-
15811  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
-
15812  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
-
15813  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
-
15814  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
-
15815  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
-
15816  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
-
15817  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
-
15818  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
-
15819  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
-
15820 
-
15821 #if VMA_VULKAN_VERSION >= 1001000
-
15822  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15823  {
-
15824  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
-
15825  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
-
15826  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
-
15827  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
-
15828  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
-
15829  }
-
15830 #endif
-
15831 
-
15832 #if VMA_DEDICATED_ALLOCATION
-
15833  if(m_UseKhrDedicatedAllocation)
-
15834  {
-
15835  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
-
15836  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
-
15837  }
-
15838 #endif
-
15839 
-
15840 #if VMA_BIND_MEMORY2
-
15841  if(m_UseKhrBindMemory2)
-
15842  {
-
15843  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
-
15844  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
-
15845  }
-
15846 #endif // #if VMA_BIND_MEMORY2
-
15847 
-
15848 #if VMA_MEMORY_BUDGET
-
15849  if(m_UseExtMemoryBudget)
-
15850  {
-
15851  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
-
15852  }
-
15853 #endif // #if VMA_MEMORY_BUDGET
-
15854 
-
15855 #undef VMA_FETCH_DEVICE_FUNC
-
15856 #undef VMA_FETCH_INSTANCE_FUNC
-
15857 }
-
15858 
-
15859 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-
15860 
-
15861 void VmaAllocator_T::ValidateVulkanFunctions()
-
15862 {
-
15863  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
-
15864  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
-
15865  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
-
15866  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
-
15867  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
-
15868  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
-
15869  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
-
15870  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
-
15871  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
-
15872  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
-
15873  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
-
15874  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
-
15875  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
-
15876  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
-
15877  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
-
15878  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
-
15879  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
-
15880 
-
15881 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15882  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
-
15883  {
-
15884  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
-
15885  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
-
15886  }
-
15887 #endif
-
15888 
-
15889 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-
15890  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
-
15891  {
-
15892  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
-
15893  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
-
15894  }
-
15895 #endif
-
15896 
-
15897 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-
15898  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15899  {
-
15900  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
-
15901  }
-
15902 #endif
-
15903 }
-
15904 
-
15905 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
-
15906 {
-
15907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
15908  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-
15909  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
-
15910  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
-
15911 }
-
15912 
-
15913 VkResult VmaAllocator_T::AllocateMemoryOfType(
-
15914  VkDeviceSize size,
-
15915  VkDeviceSize alignment,
-
15916  bool dedicatedAllocation,
-
15917  VkBuffer dedicatedBuffer,
-
15918  VkBufferUsageFlags dedicatedBufferUsage,
-
15919  VkImage dedicatedImage,
-
15920  const VmaAllocationCreateInfo& createInfo,
-
15921  uint32_t memTypeIndex,
-
15922  VmaSuballocationType suballocType,
-
15923  size_t allocationCount,
-
15924  VmaAllocation* pAllocations)
-
15925 {
-
15926  VMA_ASSERT(pAllocations != VMA_NULL);
-
15927  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
-
15928 
-
15929  VmaAllocationCreateInfo finalCreateInfo = createInfo;
-
15930 
-
15931  // If memory type is not HOST_VISIBLE, disable MAPPED.
-
15932  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-
15933  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
15934  {
-
15935  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
15936  }
-
15937  // If memory is lazily allocated, it should be always dedicated.
-
15938  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
-
15939  {
- -
15941  }
-
15942 
-
15943  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
-
15944  VMA_ASSERT(blockVector);
-
15945 
-
15946  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
-
15947  bool preferDedicatedMemory =
-
15948  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
-
15949  dedicatedAllocation ||
-
15950  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
-
15951  size > preferredBlockSize / 2;
-
15952 
-
15953  if(preferDedicatedMemory &&
-
15954  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
-
15955  finalCreateInfo.pool == VK_NULL_HANDLE)
-
15956  {
- -
15958  }
-
15959 
-
15960  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-
15961  {
-
15962  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
15963  {
-
15964  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
15965  }
-
15966  else
-
15967  {
-
15968  return AllocateDedicatedMemory(
-
15969  size,
-
15970  suballocType,
-
15971  memTypeIndex,
-
15972  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
-
15973  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-
15974  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-
15975  finalCreateInfo.pUserData,
-
15976  dedicatedBuffer,
-
15977  dedicatedBufferUsage,
-
15978  dedicatedImage,
-
15979  allocationCount,
-
15980  pAllocations);
-
15981  }
-
15982  }
-
15983  else
-
15984  {
-
15985  VkResult res = blockVector->Allocate(
-
15986  m_CurrentFrameIndex.load(),
-
15987  size,
-
15988  alignment,
-
15989  finalCreateInfo,
-
15990  suballocType,
-
15991  allocationCount,
-
15992  pAllocations);
-
15993  if(res == VK_SUCCESS)
-
15994  {
-
15995  return res;
-
15996  }
-
15997 
-
15998  // 5. Try dedicated memory.
-
15999  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
16000  {
-
16001  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16002  }
-
16003  else
-
16004  {
-
16005  res = AllocateDedicatedMemory(
-
16006  size,
-
16007  suballocType,
-
16008  memTypeIndex,
-
16009  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
-
16010  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-
16011  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-
16012  finalCreateInfo.pUserData,
-
16013  dedicatedBuffer,
-
16014  dedicatedBufferUsage,
-
16015  dedicatedImage,
-
16016  allocationCount,
-
16017  pAllocations);
-
16018  if(res == VK_SUCCESS)
-
16019  {
-
16020  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
-
16021  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-
16022  return VK_SUCCESS;
-
16023  }
-
16024  else
-
16025  {
-
16026  // Everything failed: Return error code.
-
16027  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-
16028  return res;
-
16029  }
-
16030  }
-
16031  }
-
16032 }
-
16033 
-
16034 VkResult VmaAllocator_T::AllocateDedicatedMemory(
-
16035  VkDeviceSize size,
-
16036  VmaSuballocationType suballocType,
-
16037  uint32_t memTypeIndex,
-
16038  bool withinBudget,
-
16039  bool map,
-
16040  bool isUserDataString,
-
16041  void* pUserData,
-
16042  VkBuffer dedicatedBuffer,
-
16043  VkBufferUsageFlags dedicatedBufferUsage,
-
16044  VkImage dedicatedImage,
-
16045  size_t allocationCount,
-
16046  VmaAllocation* pAllocations)
-
16047 {
-
16048  VMA_ASSERT(allocationCount > 0 && pAllocations);
-
16049 
-
16050  if(withinBudget)
-
16051  {
-
16052  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
16053  VmaBudget heapBudget = {};
-
16054  GetBudget(&heapBudget, heapIndex, 1);
-
16055  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
-
16056  {
-
16057  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16058  }
-
16059  }
-
16060 
-
16061  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-
16062  allocInfo.memoryTypeIndex = memTypeIndex;
-
16063  allocInfo.allocationSize = size;
-
16064 
-
16065 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16066  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
-
16067  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
16068  {
-
16069  if(dedicatedBuffer != VK_NULL_HANDLE)
-
16070  {
-
16071  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
-
16072  dedicatedAllocInfo.buffer = dedicatedBuffer;
-
16073  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-
16074  }
-
16075  else if(dedicatedImage != VK_NULL_HANDLE)
-
16076  {
-
16077  dedicatedAllocInfo.image = dedicatedImage;
-
16078  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-
16079  }
-
16080  }
-
16081 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16082 
-
16083 #if VMA_BUFFER_DEVICE_ADDRESS
-
16084  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-
16085  if(m_UseKhrBufferDeviceAddress)
-
16086  {
-
16087  bool canContainBufferWithDeviceAddress = true;
-
16088  if(dedicatedBuffer != VK_NULL_HANDLE)
-
16089  {
-
16090  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
-
16091  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
-
16092  }
-
16093  else if(dedicatedImage != VK_NULL_HANDLE)
-
16094  {
-
16095  canContainBufferWithDeviceAddress = false;
-
16096  }
-
16097  if(canContainBufferWithDeviceAddress)
-
16098  {
-
16099  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-
16100  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-
16101  }
-
16102  }
-
16103 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
-
16104 
-
16105  size_t allocIndex;
-
16106  VkResult res = VK_SUCCESS;
-
16107  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
16108  {
-
16109  res = AllocateDedicatedMemoryPage(
-
16110  size,
-
16111  suballocType,
-
16112  memTypeIndex,
-
16113  allocInfo,
-
16114  map,
-
16115  isUserDataString,
-
16116  pUserData,
-
16117  pAllocations + allocIndex);
-
16118  if(res != VK_SUCCESS)
+
15803 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+
15804  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
+
15805  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
+
15806 #endif
+
15807 
+
15808 #if VMA_MEMORY_BUDGET
+
15809  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
+
15810 #endif
+
15811 
+
15812 #undef VMA_COPY_IF_NOT_NULL
+
15813 }
+
15814 
+
15815 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+
15816 
+
15817 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
+
15818 {
+
15819 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
+
15820  if(m_VulkanFunctions.memberName == VMA_NULL) \
+
15821  m_VulkanFunctions.memberName = \
+
15822  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
+
15823 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
+
15824  if(m_VulkanFunctions.memberName == VMA_NULL) \
+
15825  m_VulkanFunctions.memberName = \
+
15826  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
+
15827 
+
15828  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
+
15829  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
+
15830  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
+
15831  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
+
15832  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
+
15833  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
+
15834  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
+
15835  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
+
15836  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
+
15837  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
+
15838  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
+
15839  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
+
15840  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
+
15841  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
+
15842  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
+
15843  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
+
15844  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
+
15845 
+
15846 #if VMA_VULKAN_VERSION >= 1001000
+
15847  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15848  {
+
15849  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
+
15850  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
+
15851  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
+
15852  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
+
15853  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
+
15854  }
+
15855 #endif
+
15856 
+
15857 #if VMA_DEDICATED_ALLOCATION
+
15858  if(m_UseKhrDedicatedAllocation)
+
15859  {
+
15860  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
+
15861  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
+
15862  }
+
15863 #endif
+
15864 
+
15865 #if VMA_BIND_MEMORY2
+
15866  if(m_UseKhrBindMemory2)
+
15867  {
+
15868  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
+
15869  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
+
15870  }
+
15871 #endif // #if VMA_BIND_MEMORY2
+
15872 
+
15873 #if VMA_MEMORY_BUDGET
+
15874  if(m_UseExtMemoryBudget)
+
15875  {
+
15876  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
+
15877  }
+
15878 #endif // #if VMA_MEMORY_BUDGET
+
15879 
+
15880 #undef VMA_FETCH_DEVICE_FUNC
+
15881 #undef VMA_FETCH_INSTANCE_FUNC
+
15882 }
+
15883 
+
15884 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+
15885 
+
15886 void VmaAllocator_T::ValidateVulkanFunctions()
+
15887 {
+
15888  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
+
15889  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
+
15890  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
+
15891  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
+
15892  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
+
15893  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
+
15894  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
+
15895  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
+
15896  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
+
15897  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
+
15898  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
+
15899  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
+
15900  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
+
15901  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
+
15902  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
+
15903  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
+
15904  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+
15905 
+
15906 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15907  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
+
15908  {
+
15909  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
+
15910  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
+
15911  }
+
15912 #endif
+
15913 
+
15914 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+
15915  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
+
15916  {
+
15917  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
+
15918  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
+
15919  }
+
15920 #endif
+
15921 
+
15922 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+
15923  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15924  {
+
15925  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+
15926  }
+
15927 #endif
+
15928 }
+
15929 
+
15930 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
+
15931 {
+
15932  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
15933  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+
15934  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
+
15935  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
+
15936 }
+
15937 
+
15938 VkResult VmaAllocator_T::AllocateMemoryOfType(
+
15939  VkDeviceSize size,
+
15940  VkDeviceSize alignment,
+
15941  bool dedicatedAllocation,
+
15942  VkBuffer dedicatedBuffer,
+
15943  VkBufferUsageFlags dedicatedBufferUsage,
+
15944  VkImage dedicatedImage,
+
15945  const VmaAllocationCreateInfo& createInfo,
+
15946  uint32_t memTypeIndex,
+
15947  VmaSuballocationType suballocType,
+
15948  size_t allocationCount,
+
15949  VmaAllocation* pAllocations)
+
15950 {
+
15951  VMA_ASSERT(pAllocations != VMA_NULL);
+
15952  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
+
15953 
+
15954  VmaAllocationCreateInfo finalCreateInfo = createInfo;
+
15955 
+
15956  // If memory type is not HOST_VISIBLE, disable MAPPED.
+
15957  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+
15958  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
15959  {
+
15960  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
15961  }
+
15962  // If memory is lazily allocated, it should be always dedicated.
+
15963  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
+
15964  {
+ +
15966  }
+
15967 
+
15968  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
+
15969  VMA_ASSERT(blockVector);
+
15970 
+
15971  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
+
15972  bool preferDedicatedMemory =
+
15973  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
+
15974  dedicatedAllocation ||
+
15975  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
+
15976  size > preferredBlockSize / 2;
+
15977 
+
15978  if(preferDedicatedMemory &&
+
15979  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
+
15980  finalCreateInfo.pool == VK_NULL_HANDLE)
+
15981  {
+ +
15983  }
+
15984 
+
15985  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
+
15986  {
+
15987  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
15988  {
+
15989  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15990  }
+
15991  else
+
15992  {
+
15993  return AllocateDedicatedMemory(
+
15994  size,
+
15995  suballocType,
+
15996  memTypeIndex,
+
15997  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
+
15998  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+
15999  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+
16000  finalCreateInfo.pUserData,
+
16001  dedicatedBuffer,
+
16002  dedicatedBufferUsage,
+
16003  dedicatedImage,
+
16004  allocationCount,
+
16005  pAllocations);
+
16006  }
+
16007  }
+
16008  else
+
16009  {
+
16010  VkResult res = blockVector->Allocate(
+
16011  m_CurrentFrameIndex.load(),
+
16012  size,
+
16013  alignment,
+
16014  finalCreateInfo,
+
16015  suballocType,
+
16016  allocationCount,
+
16017  pAllocations);
+
16018  if(res == VK_SUCCESS)
+
16019  {
+
16020  return res;
+
16021  }
+
16022 
+
16023  // 5. Try dedicated memory.
+
16024  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
16025  {
+
16026  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16027  }
+
16028  else
+
16029  {
+
16030  res = AllocateDedicatedMemory(
+
16031  size,
+
16032  suballocType,
+
16033  memTypeIndex,
+
16034  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
+
16035  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+
16036  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+
16037  finalCreateInfo.pUserData,
+
16038  dedicatedBuffer,
+
16039  dedicatedBufferUsage,
+
16040  dedicatedImage,
+
16041  allocationCount,
+
16042  pAllocations);
+
16043  if(res == VK_SUCCESS)
+
16044  {
+
16045  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
+
16046  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+
16047  return VK_SUCCESS;
+
16048  }
+
16049  else
+
16050  {
+
16051  // Everything failed: Return error code.
+
16052  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+
16053  return res;
+
16054  }
+
16055  }
+
16056  }
+
16057 }
+
16058 
+
16059 VkResult VmaAllocator_T::AllocateDedicatedMemory(
+
16060  VkDeviceSize size,
+
16061  VmaSuballocationType suballocType,
+
16062  uint32_t memTypeIndex,
+
16063  bool withinBudget,
+
16064  bool map,
+
16065  bool isUserDataString,
+
16066  void* pUserData,
+
16067  VkBuffer dedicatedBuffer,
+
16068  VkBufferUsageFlags dedicatedBufferUsage,
+
16069  VkImage dedicatedImage,
+
16070  size_t allocationCount,
+
16071  VmaAllocation* pAllocations)
+
16072 {
+
16073  VMA_ASSERT(allocationCount > 0 && pAllocations);
+
16074 
+
16075  if(withinBudget)
+
16076  {
+
16077  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
16078  VmaBudget heapBudget = {};
+
16079  GetBudget(&heapBudget, heapIndex, 1);
+
16080  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
+
16081  {
+
16082  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16083  }
+
16084  }
+
16085 
+
16086  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+
16087  allocInfo.memoryTypeIndex = memTypeIndex;
+
16088  allocInfo.allocationSize = size;
+
16089 
+
16090 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16091  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+
16092  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
16093  {
+
16094  if(dedicatedBuffer != VK_NULL_HANDLE)
+
16095  {
+
16096  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
+
16097  dedicatedAllocInfo.buffer = dedicatedBuffer;
+
16098  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+
16099  }
+
16100  else if(dedicatedImage != VK_NULL_HANDLE)
+
16101  {
+
16102  dedicatedAllocInfo.image = dedicatedImage;
+
16103  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+
16104  }
+
16105  }
+
16106 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16107 
+
16108 #if VMA_BUFFER_DEVICE_ADDRESS
+
16109  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+
16110  if(m_UseKhrBufferDeviceAddress)
+
16111  {
+
16112  bool canContainBufferWithDeviceAddress = true;
+
16113  if(dedicatedBuffer != VK_NULL_HANDLE)
+
16114  {
+
16115  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
+
16116  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
+
16117  }
+
16118  else if(dedicatedImage != VK_NULL_HANDLE)
16119  {
-
16120  break;
+
16120  canContainBufferWithDeviceAddress = false;
16121  }
-
16122  }
-
16123 
-
16124  if(res == VK_SUCCESS)
-
16125  {
-
16126  // Register them in m_pDedicatedAllocations.
-
16127  {
-
16128  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
16129  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-
16130  VMA_ASSERT(pDedicatedAllocations);
-
16131  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
16132  {
-
16133  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
-
16134  }
-
16135  }
-
16136 
-
16137  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
-
16138  }
-
16139  else
-
16140  {
-
16141  // Free all already created allocations.
-
16142  while(allocIndex--)
-
16143  {
-
16144  VmaAllocation currAlloc = pAllocations[allocIndex];
-
16145  VkDeviceMemory hMemory = currAlloc->GetMemory();
-
16146 
-
16147  /*
-
16148  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-
16149  before vkFreeMemory.
-
16150 
-
16151  if(currAlloc->GetMappedData() != VMA_NULL)
-
16152  {
-
16153  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-
16154  }
-
16155  */
-
16156 
-
16157  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
-
16158  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
-
16159  currAlloc->SetUserData(this, VMA_NULL);
-
16160  m_AllocationObjectAllocator.Free(currAlloc);
-
16161  }
-
16162 
-
16163  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
16164  }
-
16165 
-
16166  return res;
-
16167 }
-
16168 
-
16169 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
-
16170  VkDeviceSize size,
-
16171  VmaSuballocationType suballocType,
-
16172  uint32_t memTypeIndex,
-
16173  const VkMemoryAllocateInfo& allocInfo,
-
16174  bool map,
-
16175  bool isUserDataString,
-
16176  void* pUserData,
-
16177  VmaAllocation* pAllocation)
-
16178 {
-
16179  VkDeviceMemory hMemory = VK_NULL_HANDLE;
-
16180  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
-
16181  if(res < 0)
-
16182  {
-
16183  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-
16184  return res;
-
16185  }
-
16186 
-
16187  void* pMappedData = VMA_NULL;
-
16188  if(map)
-
16189  {
-
16190  res = (*m_VulkanFunctions.vkMapMemory)(
-
16191  m_hDevice,
-
16192  hMemory,
-
16193  0,
-
16194  VK_WHOLE_SIZE,
-
16195  0,
-
16196  &pMappedData);
-
16197  if(res < 0)
-
16198  {
-
16199  VMA_DEBUG_LOG(" vkMapMemory FAILED");
-
16200  FreeVulkanMemory(memTypeIndex, size, hMemory);
-
16201  return res;
-
16202  }
-
16203  }
-
16204 
-
16205  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
-
16206  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
-
16207  (*pAllocation)->SetUserData(this, pUserData);
-
16208  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
-
16209  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
16210  {
-
16211  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
16212  }
-
16213 
-
16214  return VK_SUCCESS;
-
16215 }
-
16216 
-
16217 void VmaAllocator_T::GetBufferMemoryRequirements(
-
16218  VkBuffer hBuffer,
-
16219  VkMemoryRequirements& memReq,
-
16220  bool& requiresDedicatedAllocation,
-
16221  bool& prefersDedicatedAllocation) const
-
16222 {
-
16223 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16224  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
16225  {
-
16226  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
-
16227  memReqInfo.buffer = hBuffer;
-
16228 
-
16229  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
16230 
-
16231  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-
16232  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-
16233 
-
16234  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
16235 
-
16236  memReq = memReq2.memoryRequirements;
-
16237  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-
16238  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-
16239  }
-
16240  else
-
16241 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16242  {
-
16243  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
-
16244  requiresDedicatedAllocation = false;
-
16245  prefersDedicatedAllocation = false;
-
16246  }
-
16247 }
-
16248 
-
16249 void VmaAllocator_T::GetImageMemoryRequirements(
-
16250  VkImage hImage,
-
16251  VkMemoryRequirements& memReq,
-
16252  bool& requiresDedicatedAllocation,
-
16253  bool& prefersDedicatedAllocation) const
-
16254 {
-
16255 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16256  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
16257  {
-
16258  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
-
16259  memReqInfo.image = hImage;
+
16122  if(canContainBufferWithDeviceAddress)
+
16123  {
+
16124  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+
16125  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+
16126  }
+
16127  }
+
16128 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
+
16129 
+
16130  size_t allocIndex;
+
16131  VkResult res = VK_SUCCESS;
+
16132  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
16133  {
+
16134  res = AllocateDedicatedMemoryPage(
+
16135  size,
+
16136  suballocType,
+
16137  memTypeIndex,
+
16138  allocInfo,
+
16139  map,
+
16140  isUserDataString,
+
16141  pUserData,
+
16142  pAllocations + allocIndex);
+
16143  if(res != VK_SUCCESS)
+
16144  {
+
16145  break;
+
16146  }
+
16147  }
+
16148 
+
16149  if(res == VK_SUCCESS)
+
16150  {
+
16151  // Register them in m_pDedicatedAllocations.
+
16152  {
+
16153  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
16154  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+
16155  VMA_ASSERT(pDedicatedAllocations);
+
16156  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
16157  {
+
16158  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
+
16159  }
+
16160  }
+
16161 
+
16162  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
+
16163  }
+
16164  else
+
16165  {
+
16166  // Free all already created allocations.
+
16167  while(allocIndex--)
+
16168  {
+
16169  VmaAllocation currAlloc = pAllocations[allocIndex];
+
16170  VkDeviceMemory hMemory = currAlloc->GetMemory();
+
16171 
+
16172  /*
+
16173  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+
16174  before vkFreeMemory.
+
16175 
+
16176  if(currAlloc->GetMappedData() != VMA_NULL)
+
16177  {
+
16178  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+
16179  }
+
16180  */
+
16181 
+
16182  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
+
16183  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
+
16184  currAlloc->SetUserData(this, VMA_NULL);
+
16185  m_AllocationObjectAllocator.Free(currAlloc);
+
16186  }
+
16187 
+
16188  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
16189  }
+
16190 
+
16191  return res;
+
16192 }
+
16193 
+
16194 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
+
16195  VkDeviceSize size,
+
16196  VmaSuballocationType suballocType,
+
16197  uint32_t memTypeIndex,
+
16198  const VkMemoryAllocateInfo& allocInfo,
+
16199  bool map,
+
16200  bool isUserDataString,
+
16201  void* pUserData,
+
16202  VmaAllocation* pAllocation)
+
16203 {
+
16204  VkDeviceMemory hMemory = VK_NULL_HANDLE;
+
16205  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
+
16206  if(res < 0)
+
16207  {
+
16208  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+
16209  return res;
+
16210  }
+
16211 
+
16212  void* pMappedData = VMA_NULL;
+
16213  if(map)
+
16214  {
+
16215  res = (*m_VulkanFunctions.vkMapMemory)(
+
16216  m_hDevice,
+
16217  hMemory,
+
16218  0,
+
16219  VK_WHOLE_SIZE,
+
16220  0,
+
16221  &pMappedData);
+
16222  if(res < 0)
+
16223  {
+
16224  VMA_DEBUG_LOG(" vkMapMemory FAILED");
+
16225  FreeVulkanMemory(memTypeIndex, size, hMemory);
+
16226  return res;
+
16227  }
+
16228  }
+
16229 
+
16230  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
+
16231  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
+
16232  (*pAllocation)->SetUserData(this, pUserData);
+
16233  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
+
16234  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
16235  {
+
16236  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
16237  }
+
16238 
+
16239  return VK_SUCCESS;
+
16240 }
+
16241 
+
16242 void VmaAllocator_T::GetBufferMemoryRequirements(
+
16243  VkBuffer hBuffer,
+
16244  VkMemoryRequirements& memReq,
+
16245  bool& requiresDedicatedAllocation,
+
16246  bool& prefersDedicatedAllocation) const
+
16247 {
+
16248 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16249  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
16250  {
+
16251  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+
16252  memReqInfo.buffer = hBuffer;
+
16253 
+
16254  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
16255 
+
16256  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+
16257  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+
16258 
+
16259  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16260 
-
16261  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
16262 
-
16263  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-
16264  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-
16265 
-
16266  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
16267 
-
16268  memReq = memReq2.memoryRequirements;
-
16269  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-
16270  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
16261  memReq = memReq2.memoryRequirements;
+
16262  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+
16263  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
16264  }
+
16265  else
+
16266 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16267  {
+
16268  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
+
16269  requiresDedicatedAllocation = false;
+
16270  prefersDedicatedAllocation = false;
16271  }
-
16272  else
-
16273 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
16274  {
-
16275  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
-
16276  requiresDedicatedAllocation = false;
-
16277  prefersDedicatedAllocation = false;
-
16278  }
-
16279 }
-
16280 
-
16281 VkResult VmaAllocator_T::AllocateMemory(
-
16282  const VkMemoryRequirements& vkMemReq,
-
16283  bool requiresDedicatedAllocation,
-
16284  bool prefersDedicatedAllocation,
-
16285  VkBuffer dedicatedBuffer,
-
16286  VkBufferUsageFlags dedicatedBufferUsage,
-
16287  VkImage dedicatedImage,
-
16288  const VmaAllocationCreateInfo& createInfo,
-
16289  VmaSuballocationType suballocType,
-
16290  size_t allocationCount,
-
16291  VmaAllocation* pAllocations)
-
16292 {
-
16293  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
16294 
-
16295  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
-
16296 
-
16297  if(vkMemReq.size == 0)
-
16298  {
-
16299  return VK_ERROR_VALIDATION_FAILED_EXT;
-
16300  }
-
16301  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-
16302  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
16303  {
-
16304  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
-
16305  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16306  }
-
16307  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
- -
16309  {
-
16310  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
-
16311  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16312  }
-
16313  if(requiresDedicatedAllocation)
-
16314  {
-
16315  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
16316  {
-
16317  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
-
16318  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16319  }
-
16320  if(createInfo.pool != VK_NULL_HANDLE)
-
16321  {
-
16322  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
-
16323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16324  }
+
16272 }
+
16273 
+
16274 void VmaAllocator_T::GetImageMemoryRequirements(
+
16275  VkImage hImage,
+
16276  VkMemoryRequirements& memReq,
+
16277  bool& requiresDedicatedAllocation,
+
16278  bool& prefersDedicatedAllocation) const
+
16279 {
+
16280 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16281  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
16282  {
+
16283  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+
16284  memReqInfo.image = hImage;
+
16285 
+
16286  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
16287 
+
16288  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+
16289  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+
16290 
+
16291  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
16292 
+
16293  memReq = memReq2.memoryRequirements;
+
16294  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+
16295  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
16296  }
+
16297  else
+
16298 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
16299  {
+
16300  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
+
16301  requiresDedicatedAllocation = false;
+
16302  prefersDedicatedAllocation = false;
+
16303  }
+
16304 }
+
16305 
+
16306 VkResult VmaAllocator_T::AllocateMemory(
+
16307  const VkMemoryRequirements& vkMemReq,
+
16308  bool requiresDedicatedAllocation,
+
16309  bool prefersDedicatedAllocation,
+
16310  VkBuffer dedicatedBuffer,
+
16311  VkBufferUsageFlags dedicatedBufferUsage,
+
16312  VkImage dedicatedImage,
+
16313  const VmaAllocationCreateInfo& createInfo,
+
16314  VmaSuballocationType suballocType,
+
16315  size_t allocationCount,
+
16316  VmaAllocation* pAllocations)
+
16317 {
+
16318  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
16319 
+
16320  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
+
16321 
+
16322  if(vkMemReq.size == 0)
+
16323  {
+
16324  return VK_ERROR_VALIDATION_FAILED_EXT;
16325  }
-
16326  if((createInfo.pool != VK_NULL_HANDLE) &&
-
16327  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
+
16326  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
+
16327  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16328  {
-
16329  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
+
16329  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16330  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16331  }
-
16332 
-
16333  if(createInfo.pool != VK_NULL_HANDLE)
+
16332  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+
16334  {
-
16335  const VkDeviceSize alignmentForPool = VMA_MAX(
-
16336  vkMemReq.alignment,
-
16337  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
-
16338 
-
16339  VmaAllocationCreateInfo createInfoForPool = createInfo;
-
16340  // If memory type is not HOST_VISIBLE, disable MAPPED.
-
16341  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-
16342  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
16343  {
-
16344  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
16345  }
-
16346 
-
16347  return createInfo.pool->m_BlockVector.Allocate(
-
16348  m_CurrentFrameIndex.load(),
-
16349  vkMemReq.size,
-
16350  alignmentForPool,
-
16351  createInfoForPool,
-
16352  suballocType,
-
16353  allocationCount,
-
16354  pAllocations);
-
16355  }
-
16356  else
-
16357  {
-
16358  // Bit mask of memory Vulkan types acceptable for this allocation.
-
16359  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
-
16360  uint32_t memTypeIndex = UINT32_MAX;
-
16361  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
-
16362  if(res == VK_SUCCESS)
-
16363  {
-
16364  VkDeviceSize alignmentForMemType = VMA_MAX(
-
16365  vkMemReq.alignment,
-
16366  GetMemoryTypeMinAlignment(memTypeIndex));
-
16367 
-
16368  res = AllocateMemoryOfType(
-
16369  vkMemReq.size,
-
16370  alignmentForMemType,
-
16371  requiresDedicatedAllocation || prefersDedicatedAllocation,
-
16372  dedicatedBuffer,
-
16373  dedicatedBufferUsage,
-
16374  dedicatedImage,
-
16375  createInfo,
-
16376  memTypeIndex,
-
16377  suballocType,
-
16378  allocationCount,
-
16379  pAllocations);
-
16380  // Succeeded on first try.
-
16381  if(res == VK_SUCCESS)
-
16382  {
-
16383  return res;
-
16384  }
-
16385  // Allocation from this memory type failed. Try other compatible memory types.
-
16386  else
-
16387  {
-
16388  for(;;)
-
16389  {
-
16390  // Remove old memTypeIndex from list of possibilities.
-
16391  memoryTypeBits &= ~(1u << memTypeIndex);
-
16392  // Find alternative memTypeIndex.
-
16393  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
-
16394  if(res == VK_SUCCESS)
-
16395  {
-
16396  alignmentForMemType = VMA_MAX(
-
16397  vkMemReq.alignment,
-
16398  GetMemoryTypeMinAlignment(memTypeIndex));
-
16399 
-
16400  res = AllocateMemoryOfType(
-
16401  vkMemReq.size,
-
16402  alignmentForMemType,
-
16403  requiresDedicatedAllocation || prefersDedicatedAllocation,
-
16404  dedicatedBuffer,
-
16405  dedicatedBufferUsage,
-
16406  dedicatedImage,
-
16407  createInfo,
-
16408  memTypeIndex,
-
16409  suballocType,
-
16410  allocationCount,
-
16411  pAllocations);
-
16412  // Allocation from this alternative memory type succeeded.
-
16413  if(res == VK_SUCCESS)
-
16414  {
-
16415  return res;
-
16416  }
-
16417  // else: Allocation from this memory type failed. Try next one - next loop iteration.
-
16418  }
-
16419  // No other matching memory type index could be found.
-
16420  else
-
16421  {
-
16422  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
-
16423  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16424  }
-
16425  }
-
16426  }
-
16427  }
-
16428  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
-
16429  else
-
16430  return res;
-
16431  }
-
16432 }
-
16433 
-
16434 void VmaAllocator_T::FreeMemory(
-
16435  size_t allocationCount,
-
16436  const VmaAllocation* pAllocations)
-
16437 {
-
16438  VMA_ASSERT(pAllocations);
-
16439 
-
16440  for(size_t allocIndex = allocationCount; allocIndex--; )
-
16441  {
-
16442  VmaAllocation allocation = pAllocations[allocIndex];
-
16443 
-
16444  if(allocation != VK_NULL_HANDLE)
-
16445  {
-
16446  if(TouchAllocation(allocation))
-
16447  {
-
16448  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
16449  {
-
16450  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
-
16451  }
-
16452 
-
16453  switch(allocation->GetType())
-
16454  {
-
16455  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
16456  {
-
16457  VmaBlockVector* pBlockVector = VMA_NULL;
-
16458  VmaPool hPool = allocation->GetBlock()->GetParentPool();
-
16459  if(hPool != VK_NULL_HANDLE)
-
16460  {
-
16461  pBlockVector = &hPool->m_BlockVector;
-
16462  }
-
16463  else
-
16464  {
-
16465  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
16466  pBlockVector = m_pBlockVectors[memTypeIndex];
-
16467  }
-
16468  pBlockVector->Free(allocation);
-
16469  }
-
16470  break;
-
16471  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
16472  FreeDedicatedMemory(allocation);
-
16473  break;
-
16474  default:
-
16475  VMA_ASSERT(0);
+
16335  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
+
16336  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16337  }
+
16338  if(requiresDedicatedAllocation)
+
16339  {
+
16340  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
16341  {
+
16342  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
+
16343  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16344  }
+
16345  if(createInfo.pool != VK_NULL_HANDLE)
+
16346  {
+
16347  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
+
16348  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16349  }
+
16350  }
+
16351  if((createInfo.pool != VK_NULL_HANDLE) &&
+
16352  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
+
16353  {
+
16354  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
+
16355  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16356  }
+
16357 
+
16358  if(createInfo.pool != VK_NULL_HANDLE)
+
16359  {
+
16360  const VkDeviceSize alignmentForPool = VMA_MAX(
+
16361  vkMemReq.alignment,
+
16362  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
+
16363 
+
16364  VmaAllocationCreateInfo createInfoForPool = createInfo;
+
16365  // If memory type is not HOST_VISIBLE, disable MAPPED.
+
16366  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+
16367  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
16368  {
+
16369  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
16370  }
+
16371 
+
16372  return createInfo.pool->m_BlockVector.Allocate(
+
16373  m_CurrentFrameIndex.load(),
+
16374  vkMemReq.size,
+
16375  alignmentForPool,
+
16376  createInfoForPool,
+
16377  suballocType,
+
16378  allocationCount,
+
16379  pAllocations);
+
16380  }
+
16381  else
+
16382  {
+
16383  // Bit mask of memory Vulkan types acceptable for this allocation.
+
16384  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+
16385  uint32_t memTypeIndex = UINT32_MAX;
+
16386  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+
16387  if(res == VK_SUCCESS)
+
16388  {
+
16389  VkDeviceSize alignmentForMemType = VMA_MAX(
+
16390  vkMemReq.alignment,
+
16391  GetMemoryTypeMinAlignment(memTypeIndex));
+
16392 
+
16393  res = AllocateMemoryOfType(
+
16394  vkMemReq.size,
+
16395  alignmentForMemType,
+
16396  requiresDedicatedAllocation || prefersDedicatedAllocation,
+
16397  dedicatedBuffer,
+
16398  dedicatedBufferUsage,
+
16399  dedicatedImage,
+
16400  createInfo,
+
16401  memTypeIndex,
+
16402  suballocType,
+
16403  allocationCount,
+
16404  pAllocations);
+
16405  // Succeeded on first try.
+
16406  if(res == VK_SUCCESS)
+
16407  {
+
16408  return res;
+
16409  }
+
16410  // Allocation from this memory type failed. Try other compatible memory types.
+
16411  else
+
16412  {
+
16413  for(;;)
+
16414  {
+
16415  // Remove old memTypeIndex from list of possibilities.
+
16416  memoryTypeBits &= ~(1u << memTypeIndex);
+
16417  // Find alternative memTypeIndex.
+
16418  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+
16419  if(res == VK_SUCCESS)
+
16420  {
+
16421  alignmentForMemType = VMA_MAX(
+
16422  vkMemReq.alignment,
+
16423  GetMemoryTypeMinAlignment(memTypeIndex));
+
16424 
+
16425  res = AllocateMemoryOfType(
+
16426  vkMemReq.size,
+
16427  alignmentForMemType,
+
16428  requiresDedicatedAllocation || prefersDedicatedAllocation,
+
16429  dedicatedBuffer,
+
16430  dedicatedBufferUsage,
+
16431  dedicatedImage,
+
16432  createInfo,
+
16433  memTypeIndex,
+
16434  suballocType,
+
16435  allocationCount,
+
16436  pAllocations);
+
16437  // Allocation from this alternative memory type succeeded.
+
16438  if(res == VK_SUCCESS)
+
16439  {
+
16440  return res;
+
16441  }
+
16442  // else: Allocation from this memory type failed. Try next one - next loop iteration.
+
16443  }
+
16444  // No other matching memory type index could be found.
+
16445  else
+
16446  {
+
16447  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
+
16448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16449  }
+
16450  }
+
16451  }
+
16452  }
+
16453  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
+
16454  else
+
16455  return res;
+
16456  }
+
16457 }
+
16458 
+
16459 void VmaAllocator_T::FreeMemory(
+
16460  size_t allocationCount,
+
16461  const VmaAllocation* pAllocations)
+
16462 {
+
16463  VMA_ASSERT(pAllocations);
+
16464 
+
16465  for(size_t allocIndex = allocationCount; allocIndex--; )
+
16466  {
+
16467  VmaAllocation allocation = pAllocations[allocIndex];
+
16468 
+
16469  if(allocation != VK_NULL_HANDLE)
+
16470  {
+
16471  if(TouchAllocation(allocation))
+
16472  {
+
16473  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
16474  {
+
16475  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16476  }
-
16477  }
-
16478 
-
16479  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
-
16480  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
-
16481  allocation->SetUserData(this, VMA_NULL);
-
16482  m_AllocationObjectAllocator.Free(allocation);
-
16483  }
-
16484  }
-
16485 }
-
16486 
-
16487 VkResult VmaAllocator_T::ResizeAllocation(
-
16488  const VmaAllocation alloc,
-
16489  VkDeviceSize newSize)
-
16490 {
-
16491  // This function is deprecated and so it does nothing. It's left for backward compatibility.
-
16492  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
-
16493  {
-
16494  return VK_ERROR_VALIDATION_FAILED_EXT;
-
16495  }
-
16496  if(newSize == alloc->GetSize())
-
16497  {
-
16498  return VK_SUCCESS;
-
16499  }
-
16500  return VK_ERROR_OUT_OF_POOL_MEMORY;
-
16501 }
-
16502 
-
16503 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
-
16504 {
-
16505  // Initialize.
-
16506  InitStatInfo(pStats->total);
-
16507  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
-
16508  InitStatInfo(pStats->memoryType[i]);
-
16509  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
-
16510  InitStatInfo(pStats->memoryHeap[i]);
-
16511 
-
16512  // Process default pools.
-
16513  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
16514  {
-
16515  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-
16516  VMA_ASSERT(pBlockVector);
-
16517  pBlockVector->AddStats(pStats);
-
16518  }
-
16519 
-
16520  // Process custom pools.
-
16521  {
-
16522  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
16523  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
16524  {
-
16525  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
-
16526  }
-
16527  }
-
16528 
-
16529  // Process dedicated allocations.
-
16530  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
16531  {
-
16532  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
16533  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
16534  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-
16535  VMA_ASSERT(pDedicatedAllocVector);
-
16536  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
-
16537  {
-
16538  VmaStatInfo allocationStatInfo;
-
16539  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
-
16540  VmaAddStatInfo(pStats->total, allocationStatInfo);
-
16541  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
-
16542  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
-
16543  }
-
16544  }
-
16545 
-
16546  // Postprocess.
-
16547  VmaPostprocessCalcStatInfo(pStats->total);
-
16548  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
-
16549  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
-
16550  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
-
16551  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
-
16552 }
+
16477 
+
16478  switch(allocation->GetType())
+
16479  {
+
16480  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
16481  {
+
16482  VmaBlockVector* pBlockVector = VMA_NULL;
+
16483  VmaPool hPool = allocation->GetBlock()->GetParentPool();
+
16484  if(hPool != VK_NULL_HANDLE)
+
16485  {
+
16486  pBlockVector = &hPool->m_BlockVector;
+
16487  }
+
16488  else
+
16489  {
+
16490  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
16491  pBlockVector = m_pBlockVectors[memTypeIndex];
+
16492  }
+
16493  pBlockVector->Free(allocation);
+
16494  }
+
16495  break;
+
16496  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16497  FreeDedicatedMemory(allocation);
+
16498  break;
+
16499  default:
+
16500  VMA_ASSERT(0);
+
16501  }
+
16502  }
+
16503 
+
16504  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
+
16505  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
+
16506  allocation->SetUserData(this, VMA_NULL);
+
16507  m_AllocationObjectAllocator.Free(allocation);
+
16508  }
+
16509  }
+
16510 }
+
16511 
+
16512 VkResult VmaAllocator_T::ResizeAllocation(
+
16513  const VmaAllocation alloc,
+
16514  VkDeviceSize newSize)
+
16515 {
+
16516  // This function is deprecated and so it does nothing. It's left for backward compatibility.
+
16517  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
+
16518  {
+
16519  return VK_ERROR_VALIDATION_FAILED_EXT;
+
16520  }
+
16521  if(newSize == alloc->GetSize())
+
16522  {
+
16523  return VK_SUCCESS;
+
16524  }
+
16525  return VK_ERROR_OUT_OF_POOL_MEMORY;
+
16526 }
+
16527 
+
16528 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
+
16529 {
+
16530  // Initialize.
+
16531  InitStatInfo(pStats->total);
+
16532  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
+
16533  InitStatInfo(pStats->memoryType[i]);
+
16534  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
+
16535  InitStatInfo(pStats->memoryHeap[i]);
+
16536 
+
16537  // Process default pools.
+
16538  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
16539  {
+
16540  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+
16541  VMA_ASSERT(pBlockVector);
+
16542  pBlockVector->AddStats(pStats);
+
16543  }
+
16544 
+
16545  // Process custom pools.
+
16546  {
+
16547  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
16548  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+
16549  {
+
16550  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
+
16551  }
+
16552  }
16553 
-
16554 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
-
16555 {
-
16556 #if VMA_MEMORY_BUDGET
-
16557  if(m_UseExtMemoryBudget)
-
16558  {
-
16559  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
-
16560  {
-
16561  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
-
16562  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
16563  {
-
16564  const uint32_t heapIndex = firstHeap + i;
-
16565 
-
16566  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
16567  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-
16568 
-
16569  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
-
16570  {
-
16571  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
-
16572  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-
16573  }
-
16574  else
-
16575  {
-
16576  outBudget->usage = 0;
-
16577  }
+
16554  // Process dedicated allocations.
+
16555  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
16556  {
+
16557  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
16558  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
16559  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+
16560  VMA_ASSERT(pDedicatedAllocVector);
+
16561  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
+
16562  {
+
16563  VmaStatInfo allocationStatInfo;
+
16564  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
+
16565  VmaAddStatInfo(pStats->total, allocationStatInfo);
+
16566  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+
16567  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+
16568  }
+
16569  }
+
16570 
+
16571  // Postprocess.
+
16572  VmaPostprocessCalcStatInfo(pStats->total);
+
16573  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
+
16574  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
+
16575  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
+
16576  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
+
16577 }
16578 
-
16579  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
-
16580  outBudget->budget = VMA_MIN(
-
16581  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
-
16582  }
-
16583  }
-
16584  else
+
16579 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
+
16580 {
+
16581 #if VMA_MEMORY_BUDGET
+
16582  if(m_UseExtMemoryBudget)
+
16583  {
+
16584  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16585  {
-
16586  UpdateVulkanBudget(); // Outside of mutex lock
-
16587  GetBudget(outBudget, firstHeap, heapCount); // Recursion
-
16588  }
-
16589  }
-
16590  else
-
16591 #endif
-
16592  {
-
16593  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
16594  {
-
16595  const uint32_t heapIndex = firstHeap + i;
-
16596 
-
16597  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
16598  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-
16599 
-
16600  outBudget->usage = outBudget->blockBytes;
-
16601  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-
16602  }
-
16603  }
-
16604 }
-
16605 
-
16606 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
-
16607 
-
16608 VkResult VmaAllocator_T::DefragmentationBegin(
-
16609  const VmaDefragmentationInfo2& info,
-
16610  VmaDefragmentationStats* pStats,
-
16611  VmaDefragmentationContext* pContext)
-
16612 {
-
16613  if(info.pAllocationsChanged != VMA_NULL)
-
16614  {
-
16615  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
-
16616  }
-
16617 
-
16618  *pContext = vma_new(this, VmaDefragmentationContext_T)(
-
16619  this, m_CurrentFrameIndex.load(), info.flags, pStats);
-
16620 
-
16621  (*pContext)->AddPools(info.poolCount, info.pPools);
-
16622  (*pContext)->AddAllocations(
- +
16586  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
+
16587  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
16588  {
+
16589  const uint32_t heapIndex = firstHeap + i;
+
16590 
+
16591  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
16592  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
+
16593 
+
16594  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
+
16595  {
+
16596  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
+
16597  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+
16598  }
+
16599  else
+
16600  {
+
16601  outBudget->usage = 0;
+
16602  }
+
16603 
+
16604  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
+
16605  outBudget->budget = VMA_MIN(
+
16606  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
+
16607  }
+
16608  }
+
16609  else
+
16610  {
+
16611  UpdateVulkanBudget(); // Outside of mutex lock
+
16612  GetBudget(outBudget, firstHeap, heapCount); // Recursion
+
16613  }
+
16614  }
+
16615  else
+
16616 #endif
+
16617  {
+
16618  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
16619  {
+
16620  const uint32_t heapIndex = firstHeap + i;
+
16621 
+
16622  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
16623  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16624 
-
16625  VkResult res = (*pContext)->Defragment(
- - -
16628  info.commandBuffer, pStats, info.flags);
-
16629 
-
16630  if(res != VK_NOT_READY)
-
16631  {
-
16632  vma_delete(this, *pContext);
-
16633  *pContext = VMA_NULL;
-
16634  }
-
16635 
-
16636  return res;
-
16637 }
-
16638 
-
16639 VkResult VmaAllocator_T::DefragmentationEnd(
-
16640  VmaDefragmentationContext context)
-
16641 {
-
16642  vma_delete(this, context);
-
16643  return VK_SUCCESS;
-
16644 }
+
16625  outBudget->usage = outBudget->blockBytes;
+
16626  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+
16627  }
+
16628  }
+
16629 }
+
16630 
+
16631 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+
16632 
+
16633 VkResult VmaAllocator_T::DefragmentationBegin(
+
16634  const VmaDefragmentationInfo2& info,
+
16635  VmaDefragmentationStats* pStats,
+
16636  VmaDefragmentationContext* pContext)
+
16637 {
+
16638  if(info.pAllocationsChanged != VMA_NULL)
+
16639  {
+
16640  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
+
16641  }
+
16642 
+
16643  *pContext = vma_new(this, VmaDefragmentationContext_T)(
+
16644  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16645 
-
16646 VkResult VmaAllocator_T::DefragmentationPassBegin(
- -
16648  VmaDefragmentationContext context)
-
16649 {
-
16650  return context->DefragmentPassBegin(pInfo);
-
16651 }
-
16652 VkResult VmaAllocator_T::DefragmentationPassEnd(
-
16653  VmaDefragmentationContext context)
-
16654 {
-
16655  return context->DefragmentPassEnd();
-
16656 
-
16657 }
-
16658 
-
16659 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
-
16660 {
-
16661  if(hAllocation->CanBecomeLost())
-
16662  {
-
16663  /*
-
16664  Warning: This is a carefully designed algorithm.
-
16665  Do not modify unless you really know what you're doing :)
-
16666  */
-
16667  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
16668  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
16669  for(;;)
-
16670  {
-
16671  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
16672  {
-
16673  pAllocationInfo->memoryType = UINT32_MAX;
-
16674  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
-
16675  pAllocationInfo->offset = 0;
-
16676  pAllocationInfo->size = hAllocation->GetSize();
-
16677  pAllocationInfo->pMappedData = VMA_NULL;
-
16678  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
16679  return;
-
16680  }
-
16681  else if(localLastUseFrameIndex == localCurrFrameIndex)
-
16682  {
-
16683  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-
16684  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-
16685  pAllocationInfo->offset = hAllocation->GetOffset();
-
16686  pAllocationInfo->size = hAllocation->GetSize();
-
16687  pAllocationInfo->pMappedData = VMA_NULL;
-
16688  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
16689  return;
-
16690  }
-
16691  else // Last use time earlier than current time.
-
16692  {
-
16693  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
16694  {
-
16695  localLastUseFrameIndex = localCurrFrameIndex;
-
16696  }
-
16697  }
-
16698  }
-
16699  }
-
16700  else
-
16701  {
-
16702 #if VMA_STATS_STRING_ENABLED
-
16703  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
16704  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
16705  for(;;)
-
16706  {
-
16707  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-
16708  if(localLastUseFrameIndex == localCurrFrameIndex)
-
16709  {
-
16710  break;
-
16711  }
-
16712  else // Last use time earlier than current time.
-
16713  {
-
16714  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
16715  {
-
16716  localLastUseFrameIndex = localCurrFrameIndex;
-
16717  }
-
16718  }
-
16719  }
-
16720 #endif
-
16721 
-
16722  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-
16723  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-
16724  pAllocationInfo->offset = hAllocation->GetOffset();
-
16725  pAllocationInfo->size = hAllocation->GetSize();
-
16726  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
-
16727  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
16728  }
-
16729 }
-
16730 
-
16731 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
-
16732 {
-
16733  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
-
16734  if(hAllocation->CanBecomeLost())
-
16735  {
-
16736  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
16737  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
16738  for(;;)
-
16739  {
-
16740  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
16741  {
-
16742  return false;
+
16646  (*pContext)->AddPools(info.poolCount, info.pPools);
+
16647  (*pContext)->AddAllocations(
+ +
16649 
+
16650  VkResult res = (*pContext)->Defragment(
+ + +
16653  info.commandBuffer, pStats, info.flags);
+
16654 
+
16655  if(res != VK_NOT_READY)
+
16656  {
+
16657  vma_delete(this, *pContext);
+
16658  *pContext = VMA_NULL;
+
16659  }
+
16660 
+
16661  return res;
+
16662 }
+
16663 
+
16664 VkResult VmaAllocator_T::DefragmentationEnd(
+
16665  VmaDefragmentationContext context)
+
16666 {
+
16667  vma_delete(this, context);
+
16668  return VK_SUCCESS;
+
16669 }
+
16670 
+
16671 VkResult VmaAllocator_T::DefragmentationPassBegin(
+ +
16673  VmaDefragmentationContext context)
+
16674 {
+
16675  return context->DefragmentPassBegin(pInfo);
+
16676 }
+
16677 VkResult VmaAllocator_T::DefragmentationPassEnd(
+
16678  VmaDefragmentationContext context)
+
16679 {
+
16680  return context->DefragmentPassEnd();
+
16681 
+
16682 }
+
16683 
+
16684 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
+
16685 {
+
16686  if(hAllocation->CanBecomeLost())
+
16687  {
+
16688  /*
+
16689  Warning: This is a carefully designed algorithm.
+
16690  Do not modify unless you really know what you're doing :)
+
16691  */
+
16692  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
16693  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
16694  for(;;)
+
16695  {
+
16696  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
16697  {
+
16698  pAllocationInfo->memoryType = UINT32_MAX;
+
16699  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
+
16700  pAllocationInfo->offset = 0;
+
16701  pAllocationInfo->size = hAllocation->GetSize();
+
16702  pAllocationInfo->pMappedData = VMA_NULL;
+
16703  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
16704  return;
+
16705  }
+
16706  else if(localLastUseFrameIndex == localCurrFrameIndex)
+
16707  {
+
16708  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+
16709  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+
16710  pAllocationInfo->offset = hAllocation->GetOffset();
+
16711  pAllocationInfo->size = hAllocation->GetSize();
+
16712  pAllocationInfo->pMappedData = VMA_NULL;
+
16713  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
16714  return;
+
16715  }
+
16716  else // Last use time earlier than current time.
+
16717  {
+
16718  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
16719  {
+
16720  localLastUseFrameIndex = localCurrFrameIndex;
+
16721  }
+
16722  }
+
16723  }
+
16724  }
+
16725  else
+
16726  {
+
16727 #if VMA_STATS_STRING_ENABLED
+
16728  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
16729  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
16730  for(;;)
+
16731  {
+
16732  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+
16733  if(localLastUseFrameIndex == localCurrFrameIndex)
+
16734  {
+
16735  break;
+
16736  }
+
16737  else // Last use time earlier than current time.
+
16738  {
+
16739  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
16740  {
+
16741  localLastUseFrameIndex = localCurrFrameIndex;
+
16742  }
16743  }
-
16744  else if(localLastUseFrameIndex == localCurrFrameIndex)
-
16745  {
-
16746  return true;
-
16747  }
-
16748  else // Last use time earlier than current time.
-
16749  {
-
16750  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
16751  {
-
16752  localLastUseFrameIndex = localCurrFrameIndex;
-
16753  }
-
16754  }
-
16755  }
-
16756  }
-
16757  else
-
16758  {
-
16759 #if VMA_STATS_STRING_ENABLED
-
16760  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
16761  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
16762  for(;;)
-
16763  {
-
16764  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-
16765  if(localLastUseFrameIndex == localCurrFrameIndex)
+
16744  }
+
16745 #endif
+
16746 
+
16747  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+
16748  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+
16749  pAllocationInfo->offset = hAllocation->GetOffset();
+
16750  pAllocationInfo->size = hAllocation->GetSize();
+
16751  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
+
16752  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
16753  }
+
16754 }
+
16755 
+
16756 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
+
16757 {
+
16758  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
+
16759  if(hAllocation->CanBecomeLost())
+
16760  {
+
16761  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
16762  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
16763  for(;;)
+
16764  {
+
16765  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16766  {
-
16767  break;
+
16767  return false;
16768  }
-
16769  else // Last use time earlier than current time.
+
16769  else if(localLastUseFrameIndex == localCurrFrameIndex)
16770  {
-
16771  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
16772  {
-
16773  localLastUseFrameIndex = localCurrFrameIndex;
-
16774  }
-
16775  }
-
16776  }
-
16777 #endif
-
16778 
-
16779  return true;
-
16780  }
-
16781 }
-
16782 
-
16783 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
-
16784 {
-
16785  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
-
16786 
-
16787  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
-
16788 
-
16789  if(newCreateInfo.maxBlockCount == 0)
-
16790  {
-
16791  newCreateInfo.maxBlockCount = SIZE_MAX;
-
16792  }
-
16793  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
-
16794  {
-
16795  return VK_ERROR_INITIALIZATION_FAILED;
-
16796  }
-
16797  // Memory type index out of range or forbidden.
-
16798  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
-
16799  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
-
16800  {
-
16801  return VK_ERROR_FEATURE_NOT_PRESENT;
-
16802  }
+
16771  return true;
+
16772  }
+
16773  else // Last use time earlier than current time.
+
16774  {
+
16775  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
16776  {
+
16777  localLastUseFrameIndex = localCurrFrameIndex;
+
16778  }
+
16779  }
+
16780  }
+
16781  }
+
16782  else
+
16783  {
+
16784 #if VMA_STATS_STRING_ENABLED
+
16785  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
16786  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
16787  for(;;)
+
16788  {
+
16789  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+
16790  if(localLastUseFrameIndex == localCurrFrameIndex)
+
16791  {
+
16792  break;
+
16793  }
+
16794  else // Last use time earlier than current time.
+
16795  {
+
16796  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
16797  {
+
16798  localLastUseFrameIndex = localCurrFrameIndex;
+
16799  }
+
16800  }
+
16801  }
+
16802 #endif
16803 
-
16804  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
-
16805 
-
16806  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+
16804  return true;
+
16805  }
+
16806 }
16807 
-
16808  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
-
16809  if(res != VK_SUCCESS)
-
16810  {
-
16811  vma_delete(this, *pPool);
-
16812  *pPool = VMA_NULL;
-
16813  return res;
-
16814  }
-
16815 
-
16816  // Add to m_Pools.
-
16817  {
-
16818  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-
16819  (*pPool)->SetId(m_NextPoolId++);
-
16820  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
+
16808 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
+
16809 {
+
16810  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
+
16811 
+
16812  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+
16813 
+
16814  if(newCreateInfo.maxBlockCount == 0)
+
16815  {
+
16816  newCreateInfo.maxBlockCount = SIZE_MAX;
+
16817  }
+
16818  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
+
16819  {
+
16820  return VK_ERROR_INITIALIZATION_FAILED;
16821  }
-
16822 
-
16823  return VK_SUCCESS;
-
16824 }
-
16825 
-
16826 void VmaAllocator_T::DestroyPool(VmaPool pool)
-
16827 {
-
16828  // Remove from m_Pools.
-
16829  {
-
16830  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-
16831  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
-
16832  VMA_ASSERT(success && "Pool not found in Allocator.");
-
16833  }
-
16834 
-
16835  vma_delete(this, pool);
-
16836 }
-
16837 
-
16838 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
-
16839 {
-
16840  pool->m_BlockVector.GetPoolStats(pPoolStats);
-
16841 }
-
16842 
-
16843 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
-
16844 {
-
16845  m_CurrentFrameIndex.store(frameIndex);
-
16846 
-
16847 #if VMA_MEMORY_BUDGET
-
16848  if(m_UseExtMemoryBudget)
-
16849  {
-
16850  UpdateVulkanBudget();
-
16851  }
-
16852 #endif // #if VMA_MEMORY_BUDGET
-
16853 }
-
16854 
-
16855 void VmaAllocator_T::MakePoolAllocationsLost(
-
16856  VmaPool hPool,
-
16857  size_t* pLostAllocationCount)
-
16858 {
-
16859  hPool->m_BlockVector.MakePoolAllocationsLost(
-
16860  m_CurrentFrameIndex.load(),
-
16861  pLostAllocationCount);
-
16862 }
-
16863 
-
16864 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
-
16865 {
-
16866  return hPool->m_BlockVector.CheckCorruption();
-
16867 }
-
16868 
-
16869 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
-
16870 {
-
16871  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
-
16872 
-
16873  // Process default pools.
-
16874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
16875  {
-
16876  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
-
16877  {
-
16878  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-
16879  VMA_ASSERT(pBlockVector);
-
16880  VkResult localRes = pBlockVector->CheckCorruption();
-
16881  switch(localRes)
-
16882  {
-
16883  case VK_ERROR_FEATURE_NOT_PRESENT:
-
16884  break;
-
16885  case VK_SUCCESS:
-
16886  finalRes = VK_SUCCESS;
-
16887  break;
-
16888  default:
-
16889  return localRes;
-
16890  }
-
16891  }
-
16892  }
+
16822  // Memory type index out of range or forbidden.
+
16823  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
+
16824  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
+
16825  {
+
16826  return VK_ERROR_FEATURE_NOT_PRESENT;
+
16827  }
+
16828 
+
16829  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
+
16830 
+
16831  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+
16832 
+
16833  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
+
16834  if(res != VK_SUCCESS)
+
16835  {
+
16836  vma_delete(this, *pPool);
+
16837  *pPool = VMA_NULL;
+
16838  return res;
+
16839  }
+
16840 
+
16841  // Add to m_Pools.
+
16842  {
+
16843  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+
16844  (*pPool)->SetId(m_NextPoolId++);
+
16845  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
+
16846  }
+
16847 
+
16848  return VK_SUCCESS;
+
16849 }
+
16850 
+
16851 void VmaAllocator_T::DestroyPool(VmaPool pool)
+
16852 {
+
16853  // Remove from m_Pools.
+
16854  {
+
16855  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+
16856  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
+
16857  VMA_ASSERT(success && "Pool not found in Allocator.");
+
16858  }
+
16859 
+
16860  vma_delete(this, pool);
+
16861 }
+
16862 
+
16863 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
+
16864 {
+
16865  pool->m_BlockVector.GetPoolStats(pPoolStats);
+
16866 }
+
16867 
+
16868 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
+
16869 {
+
16870  m_CurrentFrameIndex.store(frameIndex);
+
16871 
+
16872 #if VMA_MEMORY_BUDGET
+
16873  if(m_UseExtMemoryBudget)
+
16874  {
+
16875  UpdateVulkanBudget();
+
16876  }
+
16877 #endif // #if VMA_MEMORY_BUDGET
+
16878 }
+
16879 
+
16880 void VmaAllocator_T::MakePoolAllocationsLost(
+
16881  VmaPool hPool,
+
16882  size_t* pLostAllocationCount)
+
16883 {
+
16884  hPool->m_BlockVector.MakePoolAllocationsLost(
+
16885  m_CurrentFrameIndex.load(),
+
16886  pLostAllocationCount);
+
16887 }
+
16888 
+
16889 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
+
16890 {
+
16891  return hPool->m_BlockVector.CheckCorruption();
+
16892 }
16893 
-
16894  // Process custom pools.
-
16895  {
-
16896  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
16897  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
16898  {
-
16899  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
-
16900  {
-
16901  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
-
16902  switch(localRes)
-
16903  {
-
16904  case VK_ERROR_FEATURE_NOT_PRESENT:
-
16905  break;
-
16906  case VK_SUCCESS:
-
16907  finalRes = VK_SUCCESS;
-
16908  break;
-
16909  default:
-
16910  return localRes;
-
16911  }
-
16912  }
-
16913  }
-
16914  }
-
16915 
-
16916  return finalRes;
-
16917 }
+
16894 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
+
16895 {
+
16896  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
+
16897 
+
16898  // Process default pools.
+
16899  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
16900  {
+
16901  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
+
16902  {
+
16903  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+
16904  VMA_ASSERT(pBlockVector);
+
16905  VkResult localRes = pBlockVector->CheckCorruption();
+
16906  switch(localRes)
+
16907  {
+
16908  case VK_ERROR_FEATURE_NOT_PRESENT:
+
16909  break;
+
16910  case VK_SUCCESS:
+
16911  finalRes = VK_SUCCESS;
+
16912  break;
+
16913  default:
+
16914  return localRes;
+
16915  }
+
16916  }
+
16917  }
16918 
-
16919 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
-
16920 {
-
16921  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
-
16922  (*pAllocation)->InitLost();
-
16923 }
-
16924 
-
16925 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
-
16926 {
-
16927  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
-
16928 
-
16929  // HeapSizeLimit is in effect for this heap.
-
16930  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
-
16931  {
-
16932  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-
16933  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
16934  for(;;)
-
16935  {
-
16936  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
-
16937  if(blockBytesAfterAllocation > heapSize)
-
16938  {
-
16939  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
16940  }
-
16941  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
-
16942  {
-
16943  break;
-
16944  }
-
16945  }
-
16946  }
-
16947  else
-
16948  {
-
16949  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
-
16950  }
-
16951 
-
16952  // VULKAN CALL vkAllocateMemory.
-
16953  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
-
16954 
-
16955  if(res == VK_SUCCESS)
+
16919  // Process custom pools.
+
16920  {
+
16921  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
16922  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+
16923  {
+
16924  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+
16925  {
+
16926  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
+
16927  switch(localRes)
+
16928  {
+
16929  case VK_ERROR_FEATURE_NOT_PRESENT:
+
16930  break;
+
16931  case VK_SUCCESS:
+
16932  finalRes = VK_SUCCESS;
+
16933  break;
+
16934  default:
+
16935  return localRes;
+
16936  }
+
16937  }
+
16938  }
+
16939  }
+
16940 
+
16941  return finalRes;
+
16942 }
+
16943 
+
16944 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
+
16945 {
+
16946  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
+
16947  (*pAllocation)->InitLost();
+
16948 }
+
16949 
+
16950 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
+
16951 {
+
16952  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
+
16953 
+
16954  // HeapSizeLimit is in effect for this heap.
+
16955  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16956  {
-
16957 #if VMA_MEMORY_BUDGET
-
16958  ++m_Budget.m_OperationsSinceBudgetFetch;
-
16959 #endif
-
16960 
-
16961  // Informative callback.
-
16962  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
-
16963  {
-
16964  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
-
16965  }
-
16966  }
-
16967  else
-
16968  {
-
16969  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
-
16970  }
-
16971 
-
16972  return res;
-
16973 }
-
16974 
-
16975 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
-
16976 {
-
16977  // Informative callback.
-
16978  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
-
16979  {
-
16980  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
-
16981  }
-
16982 
-
16983  // VULKAN CALL vkFreeMemory.
-
16984  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+
16957  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+
16958  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
16959  for(;;)
+
16960  {
+
16961  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
+
16962  if(blockBytesAfterAllocation > heapSize)
+
16963  {
+
16964  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
16965  }
+
16966  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
+
16967  {
+
16968  break;
+
16969  }
+
16970  }
+
16971  }
+
16972  else
+
16973  {
+
16974  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
+
16975  }
+
16976 
+
16977  // VULKAN CALL vkAllocateMemory.
+
16978  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+
16979 
+
16980  if(res == VK_SUCCESS)
+
16981  {
+
16982 #if VMA_MEMORY_BUDGET
+
16983  ++m_Budget.m_OperationsSinceBudgetFetch;
+
16984 #endif
16985 
-
16986  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
-
16987 }
-
16988 
-
16989 VkResult VmaAllocator_T::BindVulkanBuffer(
-
16990  VkDeviceMemory memory,
-
16991  VkDeviceSize memoryOffset,
-
16992  VkBuffer buffer,
-
16993  const void* pNext)
-
16994 {
-
16995  if(pNext != VMA_NULL)
-
16996  {
-
16997 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
16998  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-
16999  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
-
17000  {
-
17001  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
-
17002  bindBufferMemoryInfo.pNext = pNext;
-
17003  bindBufferMemoryInfo.buffer = buffer;
-
17004  bindBufferMemoryInfo.memory = memory;
-
17005  bindBufferMemoryInfo.memoryOffset = memoryOffset;
-
17006  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
17007  }
-
17008  else
-
17009 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
17010  {
-
17011  return VK_ERROR_EXTENSION_NOT_PRESENT;
-
17012  }
-
17013  }
-
17014  else
-
17015  {
-
17016  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
-
17017  }
-
17018 }
-
17019 
-
17020 VkResult VmaAllocator_T::BindVulkanImage(
-
17021  VkDeviceMemory memory,
-
17022  VkDeviceSize memoryOffset,
-
17023  VkImage image,
-
17024  const void* pNext)
-
17025 {
-
17026  if(pNext != VMA_NULL)
-
17027  {
-
17028 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
17029  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-
17030  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
-
17031  {
-
17032  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
-
17033  bindBufferMemoryInfo.pNext = pNext;
-
17034  bindBufferMemoryInfo.image = image;
-
17035  bindBufferMemoryInfo.memory = memory;
-
17036  bindBufferMemoryInfo.memoryOffset = memoryOffset;
-
17037  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
17038  }
-
17039  else
-
17040 #endif // #if VMA_BIND_MEMORY2
-
17041  {
-
17042  return VK_ERROR_EXTENSION_NOT_PRESENT;
-
17043  }
-
17044  }
-
17045  else
-
17046  {
-
17047  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
-
17048  }
-
17049 }
-
17050 
-
17051 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
-
17052 {
-
17053  if(hAllocation->CanBecomeLost())
-
17054  {
-
17055  return VK_ERROR_MEMORY_MAP_FAILED;
-
17056  }
-
17057 
-
17058  switch(hAllocation->GetType())
-
17059  {
-
17060  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
17061  {
-
17062  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
17063  char *pBytes = VMA_NULL;
-
17064  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
-
17065  if(res == VK_SUCCESS)
-
17066  {
-
17067  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
-
17068  hAllocation->BlockAllocMap();
-
17069  }
-
17070  return res;
-
17071  }
-
17072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
17073  return hAllocation->DedicatedAllocMap(this, ppData);
-
17074  default:
-
17075  VMA_ASSERT(0);
-
17076  return VK_ERROR_MEMORY_MAP_FAILED;
-
17077  }
-
17078 }
-
17079 
-
17080 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
-
17081 {
-
17082  switch(hAllocation->GetType())
-
17083  {
-
17084  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
17085  {
-
17086  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
17087  hAllocation->BlockAllocUnmap();
-
17088  pBlock->Unmap(this, 1);
-
17089  }
-
17090  break;
-
17091  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
17092  hAllocation->DedicatedAllocUnmap(this);
-
17093  break;
-
17094  default:
-
17095  VMA_ASSERT(0);
-
17096  }
-
17097 }
-
17098 
-
17099 VkResult VmaAllocator_T::BindBufferMemory(
-
17100  VmaAllocation hAllocation,
-
17101  VkDeviceSize allocationLocalOffset,
-
17102  VkBuffer hBuffer,
-
17103  const void* pNext)
-
17104 {
-
17105  VkResult res = VK_SUCCESS;
-
17106  switch(hAllocation->GetType())
-
17107  {
-
17108  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
17109  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
-
17110  break;
-
17111  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
17112  {
-
17113  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
17114  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
-
17115  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
-
17116  break;
-
17117  }
-
17118  default:
-
17119  VMA_ASSERT(0);
-
17120  }
-
17121  return res;
+
16986  // Informative callback.
+
16987  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
+
16988  {
+
16989  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
+
16990  }
+
16991  }
+
16992  else
+
16993  {
+
16994  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
+
16995  }
+
16996 
+
16997  return res;
+
16998 }
+
16999 
+
17000 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
+
17001 {
+
17002  // Informative callback.
+
17003  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
+
17004  {
+
17005  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
+
17006  }
+
17007 
+
17008  // VULKAN CALL vkFreeMemory.
+
17009  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+
17010 
+
17011  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
+
17012 }
+
17013 
+
17014 VkResult VmaAllocator_T::BindVulkanBuffer(
+
17015  VkDeviceMemory memory,
+
17016  VkDeviceSize memoryOffset,
+
17017  VkBuffer buffer,
+
17018  const void* pNext)
+
17019 {
+
17020  if(pNext != VMA_NULL)
+
17021  {
+
17022 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
17023  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+
17024  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
+
17025  {
+
17026  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
+
17027  bindBufferMemoryInfo.pNext = pNext;
+
17028  bindBufferMemoryInfo.buffer = buffer;
+
17029  bindBufferMemoryInfo.memory = memory;
+
17030  bindBufferMemoryInfo.memoryOffset = memoryOffset;
+
17031  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
17032  }
+
17033  else
+
17034 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
17035  {
+
17036  return VK_ERROR_EXTENSION_NOT_PRESENT;
+
17037  }
+
17038  }
+
17039  else
+
17040  {
+
17041  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
+
17042  }
+
17043 }
+
17044 
+
17045 VkResult VmaAllocator_T::BindVulkanImage(
+
17046  VkDeviceMemory memory,
+
17047  VkDeviceSize memoryOffset,
+
17048  VkImage image,
+
17049  const void* pNext)
+
17050 {
+
17051  if(pNext != VMA_NULL)
+
17052  {
+
17053 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
17054  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+
17055  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
+
17056  {
+
17057  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
+
17058  bindBufferMemoryInfo.pNext = pNext;
+
17059  bindBufferMemoryInfo.image = image;
+
17060  bindBufferMemoryInfo.memory = memory;
+
17061  bindBufferMemoryInfo.memoryOffset = memoryOffset;
+
17062  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
17063  }
+
17064  else
+
17065 #endif // #if VMA_BIND_MEMORY2
+
17066  {
+
17067  return VK_ERROR_EXTENSION_NOT_PRESENT;
+
17068  }
+
17069  }
+
17070  else
+
17071  {
+
17072  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
+
17073  }
+
17074 }
+
17075 
+
17076 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
+
17077 {
+
17078  if(hAllocation->CanBecomeLost())
+
17079  {
+
17080  return VK_ERROR_MEMORY_MAP_FAILED;
+
17081  }
+
17082 
+
17083  switch(hAllocation->GetType())
+
17084  {
+
17085  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
17086  {
+
17087  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
17088  char *pBytes = VMA_NULL;
+
17089  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
+
17090  if(res == VK_SUCCESS)
+
17091  {
+
17092  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
+
17093  hAllocation->BlockAllocMap();
+
17094  }
+
17095  return res;
+
17096  }
+
17097  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
17098  return hAllocation->DedicatedAllocMap(this, ppData);
+
17099  default:
+
17100  VMA_ASSERT(0);
+
17101  return VK_ERROR_MEMORY_MAP_FAILED;
+
17102  }
+
17103 }
+
17104 
+
17105 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
+
17106 {
+
17107  switch(hAllocation->GetType())
+
17108  {
+
17109  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
17110  {
+
17111  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
17112  hAllocation->BlockAllocUnmap();
+
17113  pBlock->Unmap(this, 1);
+
17114  }
+
17115  break;
+
17116  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
17117  hAllocation->DedicatedAllocUnmap(this);
+
17118  break;
+
17119  default:
+
17120  VMA_ASSERT(0);
+
17121  }
17122 }
17123 
-
17124 VkResult VmaAllocator_T::BindImageMemory(
+
17124 VkResult VmaAllocator_T::BindBufferMemory(
17125  VmaAllocation hAllocation,
17126  VkDeviceSize allocationLocalOffset,
-
17127  VkImage hImage,
+
17127  VkBuffer hBuffer,
17128  const void* pNext)
17129 {
17130  VkResult res = VK_SUCCESS;
17131  switch(hAllocation->GetType())
17132  {
17133  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
17134  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
+
17134  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17135  break;
17136  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17137  {
-
17138  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-
17139  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
-
17140  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
+
17138  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
17139  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
+
17140  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17141  break;
17142  }
17143  default:
@@ -14078,1906 +14078,1931 @@ $(function() {
17146  return res;
17147 }
17148 
-
17149 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
+
17149 VkResult VmaAllocator_T::BindImageMemory(
17150  VmaAllocation hAllocation,
-
17151  VkDeviceSize offset, VkDeviceSize size,
-
17152  VMA_CACHE_OPERATION op)
-
17153 {
-
17154  VkResult res = VK_SUCCESS;
-
17155 
-
17156  VkMappedMemoryRange memRange = {};
-
17157  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
-
17158  {
-
17159  switch(op)
-
17160  {
-
17161  case VMA_CACHE_FLUSH:
-
17162  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
17163  break;
-
17164  case VMA_CACHE_INVALIDATE:
-
17165  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
17166  break;
-
17167  default:
-
17168  VMA_ASSERT(0);
-
17169  }
+
17151  VkDeviceSize allocationLocalOffset,
+
17152  VkImage hImage,
+
17153  const void* pNext)
+
17154 {
+
17155  VkResult res = VK_SUCCESS;
+
17156  switch(hAllocation->GetType())
+
17157  {
+
17158  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
17159  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
+
17160  break;
+
17161  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
17162  {
+
17163  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
17164  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
+
17165  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
+
17166  break;
+
17167  }
+
17168  default:
+
17169  VMA_ASSERT(0);
17170  }
-
17171  // else: Just ignore this call.
-
17172  return res;
-
17173 }
-
17174 
-
17175 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
-
17176  uint32_t allocationCount,
-
17177  const VmaAllocation* allocations,
-
17178  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-
17179  VMA_CACHE_OPERATION op)
-
17180 {
-
17181  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
-
17182  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
-
17183  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
-
17184 
-
17185  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
17186  {
-
17187  const VmaAllocation alloc = allocations[allocIndex];
-
17188  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
-
17189  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
-
17190  VkMappedMemoryRange newRange;
-
17191  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
-
17192  {
-
17193  ranges.push_back(newRange);
+
17171  return res;
+
17172 }
+
17173 
+
17174 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
+
17175  VmaAllocation hAllocation,
+
17176  VkDeviceSize offset, VkDeviceSize size,
+
17177  VMA_CACHE_OPERATION op)
+
17178 {
+
17179  VkResult res = VK_SUCCESS;
+
17180 
+
17181  VkMappedMemoryRange memRange = {};
+
17182  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
+
17183  {
+
17184  switch(op)
+
17185  {
+
17186  case VMA_CACHE_FLUSH:
+
17187  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
17188  break;
+
17189  case VMA_CACHE_INVALIDATE:
+
17190  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
17191  break;
+
17192  default:
+
17193  VMA_ASSERT(0);
17194  }
17195  }
-
17196 
-
17197  VkResult res = VK_SUCCESS;
-
17198  if(!ranges.empty())
-
17199  {
-
17200  switch(op)
-
17201  {
-
17202  case VMA_CACHE_FLUSH:
-
17203  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-
17204  break;
-
17205  case VMA_CACHE_INVALIDATE:
-
17206  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-
17207  break;
-
17208  default:
-
17209  VMA_ASSERT(0);
-
17210  }
-
17211  }
-
17212  // else: Just ignore this call.
-
17213  return res;
-
17214 }
-
17215 
-
17216 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
-
17217 {
-
17218  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-
17219 
-
17220  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
17221  {
-
17222  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
17223  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-
17224  VMA_ASSERT(pDedicatedAllocations);
-
17225  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
-
17226  VMA_ASSERT(success);
-
17227  }
-
17228 
-
17229  VkDeviceMemory hMemory = allocation->GetMemory();
-
17230 
-
17231  /*
-
17232  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-
17233  before vkFreeMemory.
-
17234 
-
17235  if(allocation->GetMappedData() != VMA_NULL)
-
17236  {
-
17237  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-
17238  }
-
17239  */
-
17240 
-
17241  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
-
17242 
-
17243  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
-
17244 }
-
17245 
-
17246 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
-
17247 {
-
17248  VkBufferCreateInfo dummyBufCreateInfo;
-
17249  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
-
17250 
-
17251  uint32_t memoryTypeBits = 0;
-
17252 
-
17253  // Create buffer.
-
17254  VkBuffer buf = VK_NULL_HANDLE;
-
17255  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
-
17256  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
-
17257  if(res == VK_SUCCESS)
-
17258  {
-
17259  // Query for supported memory types.
-
17260  VkMemoryRequirements memReq;
-
17261  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
-
17262  memoryTypeBits = memReq.memoryTypeBits;
-
17263 
-
17264  // Destroy buffer.
-
17265  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
-
17266  }
+
17196  // else: Just ignore this call.
+
17197  return res;
+
17198 }
+
17199 
+
17200 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
+
17201  uint32_t allocationCount,
+
17202  const VmaAllocation* allocations,
+
17203  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+
17204  VMA_CACHE_OPERATION op)
+
17205 {
+
17206  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
+
17207  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
+
17208  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
+
17209 
+
17210  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
17211  {
+
17212  const VmaAllocation alloc = allocations[allocIndex];
+
17213  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
+
17214  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
+
17215  VkMappedMemoryRange newRange;
+
17216  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
+
17217  {
+
17218  ranges.push_back(newRange);
+
17219  }
+
17220  }
+
17221 
+
17222  VkResult res = VK_SUCCESS;
+
17223  if(!ranges.empty())
+
17224  {
+
17225  switch(op)
+
17226  {
+
17227  case VMA_CACHE_FLUSH:
+
17228  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+
17229  break;
+
17230  case VMA_CACHE_INVALIDATE:
+
17231  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+
17232  break;
+
17233  default:
+
17234  VMA_ASSERT(0);
+
17235  }
+
17236  }
+
17237  // else: Just ignore this call.
+
17238  return res;
+
17239 }
+
17240 
+
17241 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
+
17242 {
+
17243  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+
17244 
+
17245  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
17246  {
+
17247  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
17248  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+
17249  VMA_ASSERT(pDedicatedAllocations);
+
17250  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
+
17251  VMA_ASSERT(success);
+
17252  }
+
17253 
+
17254  VkDeviceMemory hMemory = allocation->GetMemory();
+
17255 
+
17256  /*
+
17257  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+
17258  before vkFreeMemory.
+
17259 
+
17260  if(allocation->GetMappedData() != VMA_NULL)
+
17261  {
+
17262  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+
17263  }
+
17264  */
+
17265 
+
17266  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17267 
-
17268  return memoryTypeBits;
+
17268  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17269 }
17270 
-
17271 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
+
17271 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17272 {
-
17273  // Make sure memory information is already fetched.
-
17274  VMA_ASSERT(GetMemoryTypeCount() > 0);
+
17273  VkBufferCreateInfo dummyBufCreateInfo;
+
17274  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17275 
-
17276  uint32_t memoryTypeBits = UINT32_MAX;
+
17276  uint32_t memoryTypeBits = 0;
17277 
-
17278  if(!m_UseAmdDeviceCoherentMemory)
-
17279  {
-
17280  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
-
17281  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
17282  {
-
17283  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-
17284  {
-
17285  memoryTypeBits &= ~(1u << memTypeIndex);
-
17286  }
-
17287  }
-
17288  }
-
17289 
-
17290  return memoryTypeBits;
-
17291 }
+
17278  // Create buffer.
+
17279  VkBuffer buf = VK_NULL_HANDLE;
+
17280  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
+
17281  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
+
17282  if(res == VK_SUCCESS)
+
17283  {
+
17284  // Query for supported memory types.
+
17285  VkMemoryRequirements memReq;
+
17286  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
+
17287  memoryTypeBits = memReq.memoryTypeBits;
+
17288 
+
17289  // Destroy buffer.
+
17290  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
+
17291  }
17292 
-
17293 bool VmaAllocator_T::GetFlushOrInvalidateRange(
-
17294  VmaAllocation allocation,
-
17295  VkDeviceSize offset, VkDeviceSize size,
-
17296  VkMappedMemoryRange& outRange) const
+
17293  return memoryTypeBits;
+
17294 }
+
17295 
+
17296 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17297 {
-
17298  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
17299  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
-
17300  {
-
17301  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-
17302  const VkDeviceSize allocationSize = allocation->GetSize();
-
17303  VMA_ASSERT(offset <= allocationSize);
-
17304 
-
17305  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
-
17306  outRange.pNext = VMA_NULL;
-
17307  outRange.memory = allocation->GetMemory();
-
17308 
-
17309  switch(allocation->GetType())
-
17310  {
-
17311  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
17312  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-
17313  if(size == VK_WHOLE_SIZE)
-
17314  {
-
17315  outRange.size = allocationSize - outRange.offset;
-
17316  }
-
17317  else
-
17318  {
-
17319  VMA_ASSERT(offset + size <= allocationSize);
-
17320  outRange.size = VMA_MIN(
-
17321  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
-
17322  allocationSize - outRange.offset);
-
17323  }
-
17324  break;
-
17325  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
17326  {
-
17327  // 1. Still within this allocation.
-
17328  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-
17329  if(size == VK_WHOLE_SIZE)
-
17330  {
-
17331  size = allocationSize - offset;
-
17332  }
-
17333  else
-
17334  {
-
17335  VMA_ASSERT(offset + size <= allocationSize);
-
17336  }
-
17337  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
-
17338 
-
17339  // 2. Adjust to whole block.
-
17340  const VkDeviceSize allocationOffset = allocation->GetOffset();
-
17341  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
-
17342  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
-
17343  outRange.offset += allocationOffset;
-
17344  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
-
17345 
-
17346  break;
-
17347  }
-
17348  default:
-
17349  VMA_ASSERT(0);
-
17350  }
-
17351  return true;
-
17352  }
-
17353  return false;
-
17354 }
-
17355 
-
17356 #if VMA_MEMORY_BUDGET
-
17357 
-
17358 void VmaAllocator_T::UpdateVulkanBudget()
-
17359 {
-
17360  VMA_ASSERT(m_UseExtMemoryBudget);
-
17361 
-
17362  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+
17298  // Make sure memory information is already fetched.
+
17299  VMA_ASSERT(GetMemoryTypeCount() > 0);
+
17300 
+
17301  uint32_t memoryTypeBits = UINT32_MAX;
+
17302 
+
17303  if(!m_UseAmdDeviceCoherentMemory)
+
17304  {
+
17305  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
+
17306  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
17307  {
+
17308  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+
17309  {
+
17310  memoryTypeBits &= ~(1u << memTypeIndex);
+
17311  }
+
17312  }
+
17313  }
+
17314 
+
17315  return memoryTypeBits;
+
17316 }
+
17317 
+
17318 bool VmaAllocator_T::GetFlushOrInvalidateRange(
+
17319  VmaAllocation allocation,
+
17320  VkDeviceSize offset, VkDeviceSize size,
+
17321  VkMappedMemoryRange& outRange) const
+
17322 {
+
17323  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
17324  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
+
17325  {
+
17326  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+
17327  const VkDeviceSize allocationSize = allocation->GetSize();
+
17328  VMA_ASSERT(offset <= allocationSize);
+
17329 
+
17330  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+
17331  outRange.pNext = VMA_NULL;
+
17332  outRange.memory = allocation->GetMemory();
+
17333 
+
17334  switch(allocation->GetType())
+
17335  {
+
17336  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
17337  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+
17338  if(size == VK_WHOLE_SIZE)
+
17339  {
+
17340  outRange.size = allocationSize - outRange.offset;
+
17341  }
+
17342  else
+
17343  {
+
17344  VMA_ASSERT(offset + size <= allocationSize);
+
17345  outRange.size = VMA_MIN(
+
17346  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
+
17347  allocationSize - outRange.offset);
+
17348  }
+
17349  break;
+
17350  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
17351  {
+
17352  // 1. Still within this allocation.
+
17353  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+
17354  if(size == VK_WHOLE_SIZE)
+
17355  {
+
17356  size = allocationSize - offset;
+
17357  }
+
17358  else
+
17359  {
+
17360  VMA_ASSERT(offset + size <= allocationSize);
+
17361  }
+
17362  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17363 
-
17364  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
-
17365  VmaPnextChainPushFront(&memProps, &budgetProps);
-
17366 
-
17367  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
-
17368 
-
17369  {
-
17370  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
-
17371 
-
17372  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
17373  {
-
17374  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
-
17375  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
-
17376  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
-
17377 
-
17378  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
-
17379  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
-
17380  {
-
17381  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-
17382  }
-
17383  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
-
17384  {
-
17385  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
-
17386  }
-
17387  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
-
17388  {
-
17389  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-
17390  }
-
17391  }
-
17392  m_Budget.m_OperationsSinceBudgetFetch = 0;
-
17393  }
-
17394 }
-
17395 
-
17396 #endif // #if VMA_MEMORY_BUDGET
-
17397 
-
17398 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
-
17399 {
-
17400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
-
17401  !hAllocation->CanBecomeLost() &&
-
17402  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
17403  {
-
17404  void* pData = VMA_NULL;
-
17405  VkResult res = Map(hAllocation, &pData);
-
17406  if(res == VK_SUCCESS)
-
17407  {
-
17408  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
-
17409  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
-
17410  Unmap(hAllocation);
-
17411  }
-
17412  else
-
17413  {
-
17414  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
-
17415  }
-
17416  }
-
17417 }
-
17418 
-
17419 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
-
17420 {
-
17421  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
-
17422  if(memoryTypeBits == UINT32_MAX)
-
17423  {
-
17424  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
-
17425  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
-
17426  }
-
17427  return memoryTypeBits;
-
17428 }
-
17429 
-
17430 #if VMA_STATS_STRING_ENABLED
-
17431 
-
17432 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
-
17433 {
-
17434  bool dedicatedAllocationsStarted = false;
-
17435  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
17436  {
-
17437  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
17438  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-
17439  VMA_ASSERT(pDedicatedAllocVector);
-
17440  if(pDedicatedAllocVector->empty() == false)
-
17441  {
-
17442  if(dedicatedAllocationsStarted == false)
-
17443  {
-
17444  dedicatedAllocationsStarted = true;
-
17445  json.WriteString("DedicatedAllocations");
-
17446  json.BeginObject();
-
17447  }
-
17448 
-
17449  json.BeginString("Type ");
-
17450  json.ContinueString(memTypeIndex);
-
17451  json.EndString();
-
17452 
-
17453  json.BeginArray();
+
17364  // 2. Adjust to whole block.
+
17365  const VkDeviceSize allocationOffset = allocation->GetOffset();
+
17366  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+
17367  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
+
17368  outRange.offset += allocationOffset;
+
17369  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
+
17370 
+
17371  break;
+
17372  }
+
17373  default:
+
17374  VMA_ASSERT(0);
+
17375  }
+
17376  return true;
+
17377  }
+
17378  return false;
+
17379 }
+
17380 
+
17381 #if VMA_MEMORY_BUDGET
+
17382 
+
17383 void VmaAllocator_T::UpdateVulkanBudget()
+
17384 {
+
17385  VMA_ASSERT(m_UseExtMemoryBudget);
+
17386 
+
17387  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+
17388 
+
17389  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
+
17390  VmaPnextChainPushFront(&memProps, &budgetProps);
+
17391 
+
17392  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
+
17393 
+
17394  {
+
17395  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
+
17396 
+
17397  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
17398  {
+
17399  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
+
17400  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
+
17401  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
+
17402 
+
17403  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
+
17404  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
+
17405  {
+
17406  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+
17407  }
+
17408  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
+
17409  {
+
17410  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
+
17411  }
+
17412  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
+
17413  {
+
17414  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+
17415  }
+
17416  }
+
17417  m_Budget.m_OperationsSinceBudgetFetch = 0;
+
17418  }
+
17419 }
+
17420 
+
17421 #endif // #if VMA_MEMORY_BUDGET
+
17422 
+
17423 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
+
17424 {
+
17425  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
+
17426  !hAllocation->CanBecomeLost() &&
+
17427  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
17428  {
+
17429  void* pData = VMA_NULL;
+
17430  VkResult res = Map(hAllocation, &pData);
+
17431  if(res == VK_SUCCESS)
+
17432  {
+
17433  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
+
17434  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
+
17435  Unmap(hAllocation);
+
17436  }
+
17437  else
+
17438  {
+
17439  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
+
17440  }
+
17441  }
+
17442 }
+
17443 
+
17444 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
+
17445 {
+
17446  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
+
17447  if(memoryTypeBits == UINT32_MAX)
+
17448  {
+
17449  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
+
17450  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
+
17451  }
+
17452  return memoryTypeBits;
+
17453 }
17454 
-
17455  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
-
17456  {
-
17457  json.BeginObject(true);
-
17458  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
-
17459  hAlloc->PrintParameters(json);
-
17460  json.EndObject();
-
17461  }
-
17462 
-
17463  json.EndArray();
-
17464  }
-
17465  }
-
17466  if(dedicatedAllocationsStarted)
-
17467  {
-
17468  json.EndObject();
-
17469  }
-
17470 
-
17471  {
-
17472  bool allocationsStarted = false;
-
17473  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
17474  {
-
17475  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
-
17476  {
-
17477  if(allocationsStarted == false)
-
17478  {
-
17479  allocationsStarted = true;
-
17480  json.WriteString("DefaultPools");
-
17481  json.BeginObject();
-
17482  }
-
17483 
-
17484  json.BeginString("Type ");
-
17485  json.ContinueString(memTypeIndex);
-
17486  json.EndString();
+
17455 #if VMA_STATS_STRING_ENABLED
+
17456 
+
17457 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
+
17458 {
+
17459  bool dedicatedAllocationsStarted = false;
+
17460  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
17461  {
+
17462  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
17463  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+
17464  VMA_ASSERT(pDedicatedAllocVector);
+
17465  if(pDedicatedAllocVector->empty() == false)
+
17466  {
+
17467  if(dedicatedAllocationsStarted == false)
+
17468  {
+
17469  dedicatedAllocationsStarted = true;
+
17470  json.WriteString("DedicatedAllocations");
+
17471  json.BeginObject();
+
17472  }
+
17473 
+
17474  json.BeginString("Type ");
+
17475  json.ContinueString(memTypeIndex);
+
17476  json.EndString();
+
17477 
+
17478  json.BeginArray();
+
17479 
+
17480  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
+
17481  {
+
17482  json.BeginObject(true);
+
17483  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
+
17484  hAlloc->PrintParameters(json);
+
17485  json.EndObject();
+
17486  }
17487 
-
17488  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
-
17489  }
-
17490  }
-
17491  if(allocationsStarted)
-
17492  {
-
17493  json.EndObject();
-
17494  }
-
17495  }
-
17496 
-
17497  // Custom pools
-
17498  {
-
17499  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
17500  const size_t poolCount = m_Pools.size();
-
17501  if(poolCount > 0)
-
17502  {
-
17503  json.WriteString("Pools");
-
17504  json.BeginObject();
-
17505  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
17506  {
-
17507  json.BeginString();
-
17508  json.ContinueString(m_Pools[poolIndex]->GetId());
-
17509  json.EndString();
-
17510 
-
17511  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
-
17512  }
-
17513  json.EndObject();
-
17514  }
-
17515  }
-
17516 }
-
17517 
-
17518 #endif // #if VMA_STATS_STRING_ENABLED
-
17519 
-
17521 // Public interface
-
17522 
-
17523 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-
17524  const VmaAllocatorCreateInfo* pCreateInfo,
-
17525  VmaAllocator* pAllocator)
-
17526 {
-
17527  VMA_ASSERT(pCreateInfo && pAllocator);
-
17528  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
-
17529  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
-
17530  VMA_DEBUG_LOG("vmaCreateAllocator");
-
17531  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
-
17532  return (*pAllocator)->Init(pCreateInfo);
-
17533 }
-
17534 
-
17535 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-
17536  VmaAllocator allocator)
-
17537 {
-
17538  if(allocator != VK_NULL_HANDLE)
-
17539  {
-
17540  VMA_DEBUG_LOG("vmaDestroyAllocator");
-
17541  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
-
17542  vma_delete(&allocationCallbacks, allocator);
-
17543  }
-
17544 }
-
17545 
-
17546 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
-
17547 {
-
17548  VMA_ASSERT(allocator && pAllocatorInfo);
-
17549  pAllocatorInfo->instance = allocator->m_hInstance;
-
17550  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
-
17551  pAllocatorInfo->device = allocator->m_hDevice;
-
17552 }
-
17553 
-
17554 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-
17555  VmaAllocator allocator,
-
17556  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
17557 {
-
17558  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
-
17559  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
-
17560 }
-
17561 
-
17562 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-
17563  VmaAllocator allocator,
-
17564  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
-
17565 {
-
17566  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
-
17567  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
-
17568 }
-
17569 
-
17570 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-
17571  VmaAllocator allocator,
-
17572  uint32_t memoryTypeIndex,
-
17573  VkMemoryPropertyFlags* pFlags)
-
17574 {
-
17575  VMA_ASSERT(allocator && pFlags);
-
17576  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
-
17577  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
-
17578 }
-
17579 
-
17580 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-
17581  VmaAllocator allocator,
-
17582  uint32_t frameIndex)
-
17583 {
-
17584  VMA_ASSERT(allocator);
-
17585  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
+
17488  json.EndArray();
+
17489  }
+
17490  }
+
17491  if(dedicatedAllocationsStarted)
+
17492  {
+
17493  json.EndObject();
+
17494  }
+
17495 
+
17496  {
+
17497  bool allocationsStarted = false;
+
17498  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
17499  {
+
17500  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
+
17501  {
+
17502  if(allocationsStarted == false)
+
17503  {
+
17504  allocationsStarted = true;
+
17505  json.WriteString("DefaultPools");
+
17506  json.BeginObject();
+
17507  }
+
17508 
+
17509  json.BeginString("Type ");
+
17510  json.ContinueString(memTypeIndex);
+
17511  json.EndString();
+
17512 
+
17513  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+
17514  }
+
17515  }
+
17516  if(allocationsStarted)
+
17517  {
+
17518  json.EndObject();
+
17519  }
+
17520  }
+
17521 
+
17522  // Custom pools
+
17523  {
+
17524  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
17525  const size_t poolCount = m_Pools.size();
+
17526  if(poolCount > 0)
+
17527  {
+
17528  json.WriteString("Pools");
+
17529  json.BeginObject();
+
17530  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+
17531  {
+
17532  json.BeginString();
+
17533  json.ContinueString(m_Pools[poolIndex]->GetId());
+
17534  json.EndString();
+
17535 
+
17536  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
+
17537  }
+
17538  json.EndObject();
+
17539  }
+
17540  }
+
17541 }
+
17542 
+
17543 #endif // #if VMA_STATS_STRING_ENABLED
+
17544 
+
17546 // Public interface
+
17547 
+
17548 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
+
17549  const VmaAllocatorCreateInfo* pCreateInfo,
+
17550  VmaAllocator* pAllocator)
+
17551 {
+
17552  VMA_ASSERT(pCreateInfo && pAllocator);
+
17553  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
+
17554  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
+
17555  VMA_DEBUG_LOG("vmaCreateAllocator");
+
17556  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
+
17557  return (*pAllocator)->Init(pCreateInfo);
+
17558 }
+
17559 
+
17560 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
+
17561  VmaAllocator allocator)
+
17562 {
+
17563  if(allocator != VK_NULL_HANDLE)
+
17564  {
+
17565  VMA_DEBUG_LOG("vmaDestroyAllocator");
+
17566  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
+
17567  vma_delete(&allocationCallbacks, allocator);
+
17568  }
+
17569 }
+
17570 
+
17571 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
+
17572 {
+
17573  VMA_ASSERT(allocator && pAllocatorInfo);
+
17574  pAllocatorInfo->instance = allocator->m_hInstance;
+
17575  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
+
17576  pAllocatorInfo->device = allocator->m_hDevice;
+
17577 }
+
17578 
+
17579 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
+
17580  VmaAllocator allocator,
+
17581  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
+
17582 {
+
17583  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
+
17584  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
+
17585 }
17586 
-
17587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17588 
-
17589  allocator->SetCurrentFrameIndex(frameIndex);
-
17590 }
-
17591 
-
17592 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
-
17593  VmaAllocator allocator,
-
17594  VmaStats* pStats)
-
17595 {
-
17596  VMA_ASSERT(allocator && pStats);
-
17597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17598  allocator->CalculateStats(pStats);
-
17599 }
-
17600 
-
17601 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
-
17602  VmaAllocator allocator,
-
17603  VmaBudget* pBudget)
-
17604 {
-
17605  VMA_ASSERT(allocator && pBudget);
-
17606  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17607  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
-
17608 }
-
17609 
-
17610 #if VMA_STATS_STRING_ENABLED
+
17587 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
+
17588  VmaAllocator allocator,
+
17589  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
+
17590 {
+
17591  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
+
17592  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
+
17593 }
+
17594 
+
17595 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
+
17596  VmaAllocator allocator,
+
17597  uint32_t memoryTypeIndex,
+
17598  VkMemoryPropertyFlags* pFlags)
+
17599 {
+
17600  VMA_ASSERT(allocator && pFlags);
+
17601  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
+
17602  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
+
17603 }
+
17604 
+
17605 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
+
17606  VmaAllocator allocator,
+
17607  uint32_t frameIndex)
+
17608 {
+
17609  VMA_ASSERT(allocator);
+
17610  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17611 
-
17612 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-
17613  VmaAllocator allocator,
-
17614  char** ppStatsString,
-
17615  VkBool32 detailedMap)
-
17616 {
-
17617  VMA_ASSERT(allocator && ppStatsString);
-
17618  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17619 
-
17620  VmaStringBuilder sb(allocator);
-
17621  {
-
17622  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
-
17623  json.BeginObject();
-
17624 
-
17625  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
-
17626  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
-
17627 
-
17628  VmaStats stats;
-
17629  allocator->CalculateStats(&stats);
-
17630 
-
17631  json.WriteString("Total");
-
17632  VmaPrintStatInfo(json, stats.total);
-
17633 
-
17634  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
-
17635  {
-
17636  json.BeginString("Heap ");
-
17637  json.ContinueString(heapIndex);
-
17638  json.EndString();
-
17639  json.BeginObject();
-
17640 
-
17641  json.WriteString("Size");
-
17642  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
-
17643 
-
17644  json.WriteString("Flags");
-
17645  json.BeginArray(true);
-
17646  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
-
17647  {
-
17648  json.WriteString("DEVICE_LOCAL");
-
17649  }
-
17650  json.EndArray();
-
17651 
-
17652  json.WriteString("Budget");
-
17653  json.BeginObject();
-
17654  {
-
17655  json.WriteString("BlockBytes");
-
17656  json.WriteNumber(budget[heapIndex].blockBytes);
-
17657  json.WriteString("AllocationBytes");
-
17658  json.WriteNumber(budget[heapIndex].allocationBytes);
-
17659  json.WriteString("Usage");
-
17660  json.WriteNumber(budget[heapIndex].usage);
-
17661  json.WriteString("Budget");
-
17662  json.WriteNumber(budget[heapIndex].budget);
-
17663  }
-
17664  json.EndObject();
+
17612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17613 
+
17614  allocator->SetCurrentFrameIndex(frameIndex);
+
17615 }
+
17616 
+
17617 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
+
17618  VmaAllocator allocator,
+
17619  VmaStats* pStats)
+
17620 {
+
17621  VMA_ASSERT(allocator && pStats);
+
17622  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17623  allocator->CalculateStats(pStats);
+
17624 }
+
17625 
+
17626 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
+
17627  VmaAllocator allocator,
+
17628  VmaBudget* pBudget)
+
17629 {
+
17630  VMA_ASSERT(allocator && pBudget);
+
17631  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17632  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
+
17633 }
+
17634 
+
17635 #if VMA_STATS_STRING_ENABLED
+
17636 
+
17637 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
+
17638  VmaAllocator allocator,
+
17639  char** ppStatsString,
+
17640  VkBool32 detailedMap)
+
17641 {
+
17642  VMA_ASSERT(allocator && ppStatsString);
+
17643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17644 
+
17645  VmaStringBuilder sb(allocator);
+
17646  {
+
17647  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
+
17648  json.BeginObject();
+
17649 
+
17650  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
+
17651  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
+
17652 
+
17653  VmaStats stats;
+
17654  allocator->CalculateStats(&stats);
+
17655 
+
17656  json.WriteString("Total");
+
17657  VmaPrintStatInfo(json, stats.total);
+
17658 
+
17659  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
+
17660  {
+
17661  json.BeginString("Heap ");
+
17662  json.ContinueString(heapIndex);
+
17663  json.EndString();
+
17664  json.BeginObject();
17665 
-
17666  if(stats.memoryHeap[heapIndex].blockCount > 0)
-
17667  {
-
17668  json.WriteString("Stats");
-
17669  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
-
17670  }
-
17671 
-
17672  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
-
17673  {
-
17674  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
-
17675  {
-
17676  json.BeginString("Type ");
-
17677  json.ContinueString(typeIndex);
-
17678  json.EndString();
-
17679 
-
17680  json.BeginObject();
-
17681 
-
17682  json.WriteString("Flags");
-
17683  json.BeginArray(true);
-
17684  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
-
17685  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
-
17686  {
-
17687  json.WriteString("DEVICE_LOCAL");
-
17688  }
-
17689  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
17690  {
-
17691  json.WriteString("HOST_VISIBLE");
-
17692  }
-
17693  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
-
17694  {
-
17695  json.WriteString("HOST_COHERENT");
-
17696  }
-
17697  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
-
17698  {
-
17699  json.WriteString("HOST_CACHED");
-
17700  }
-
17701  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
-
17702  {
-
17703  json.WriteString("LAZILY_ALLOCATED");
-
17704  }
-
17705  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
-
17706  {
-
17707  json.WriteString(" PROTECTED");
-
17708  }
-
17709  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-
17710  {
-
17711  json.WriteString(" DEVICE_COHERENT");
-
17712  }
-
17713  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
-
17714  {
-
17715  json.WriteString(" DEVICE_UNCACHED");
-
17716  }
-
17717  json.EndArray();
-
17718 
-
17719  if(stats.memoryType[typeIndex].blockCount > 0)
-
17720  {
-
17721  json.WriteString("Stats");
-
17722  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
-
17723  }
-
17724 
-
17725  json.EndObject();
-
17726  }
-
17727  }
-
17728 
-
17729  json.EndObject();
-
17730  }
-
17731  if(detailedMap == VK_TRUE)
-
17732  {
-
17733  allocator->PrintDetailedMap(json);
-
17734  }
-
17735 
-
17736  json.EndObject();
-
17737  }
-
17738 
-
17739  const size_t len = sb.GetLength();
-
17740  char* const pChars = vma_new_array(allocator, char, len + 1);
-
17741  if(len > 0)
-
17742  {
-
17743  memcpy(pChars, sb.GetData(), len);
-
17744  }
-
17745  pChars[len] = '\0';
-
17746  *ppStatsString = pChars;
-
17747 }
-
17748 
-
17749 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-
17750  VmaAllocator allocator,
-
17751  char* pStatsString)
-
17752 {
-
17753  if(pStatsString != VMA_NULL)
-
17754  {
-
17755  VMA_ASSERT(allocator);
-
17756  size_t len = strlen(pStatsString);
-
17757  vma_delete_array(allocator, pStatsString, len + 1);
-
17758  }
-
17759 }
+
17666  json.WriteString("Size");
+
17667  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
+
17668 
+
17669  json.WriteString("Flags");
+
17670  json.BeginArray(true);
+
17671  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
+
17672  {
+
17673  json.WriteString("DEVICE_LOCAL");
+
17674  }
+
17675  json.EndArray();
+
17676 
+
17677  json.WriteString("Budget");
+
17678  json.BeginObject();
+
17679  {
+
17680  json.WriteString("BlockBytes");
+
17681  json.WriteNumber(budget[heapIndex].blockBytes);
+
17682  json.WriteString("AllocationBytes");
+
17683  json.WriteNumber(budget[heapIndex].allocationBytes);
+
17684  json.WriteString("Usage");
+
17685  json.WriteNumber(budget[heapIndex].usage);
+
17686  json.WriteString("Budget");
+
17687  json.WriteNumber(budget[heapIndex].budget);
+
17688  }
+
17689  json.EndObject();
+
17690 
+
17691  if(stats.memoryHeap[heapIndex].blockCount > 0)
+
17692  {
+
17693  json.WriteString("Stats");
+
17694  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
+
17695  }
+
17696 
+
17697  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
+
17698  {
+
17699  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
+
17700  {
+
17701  json.BeginString("Type ");
+
17702  json.ContinueString(typeIndex);
+
17703  json.EndString();
+
17704 
+
17705  json.BeginObject();
+
17706 
+
17707  json.WriteString("Flags");
+
17708  json.BeginArray(true);
+
17709  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
+
17710  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
+
17711  {
+
17712  json.WriteString("DEVICE_LOCAL");
+
17713  }
+
17714  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
17715  {
+
17716  json.WriteString("HOST_VISIBLE");
+
17717  }
+
17718  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
+
17719  {
+
17720  json.WriteString("HOST_COHERENT");
+
17721  }
+
17722  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
+
17723  {
+
17724  json.WriteString("HOST_CACHED");
+
17725  }
+
17726  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
+
17727  {
+
17728  json.WriteString("LAZILY_ALLOCATED");
+
17729  }
+
17730  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
+
17731  {
+
17732  json.WriteString(" PROTECTED");
+
17733  }
+
17734  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+
17735  {
+
17736  json.WriteString(" DEVICE_COHERENT");
+
17737  }
+
17738  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
+
17739  {
+
17740  json.WriteString(" DEVICE_UNCACHED");
+
17741  }
+
17742  json.EndArray();
+
17743 
+
17744  if(stats.memoryType[typeIndex].blockCount > 0)
+
17745  {
+
17746  json.WriteString("Stats");
+
17747  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
+
17748  }
+
17749 
+
17750  json.EndObject();
+
17751  }
+
17752  }
+
17753 
+
17754  json.EndObject();
+
17755  }
+
17756  if(detailedMap == VK_TRUE)
+
17757  {
+
17758  allocator->PrintDetailedMap(json);
+
17759  }
17760 
-
17761 #endif // #if VMA_STATS_STRING_ENABLED
-
17762 
-
17763 /*
-
17764 This function is not protected by any mutex because it just reads immutable data.
-
17765 */
-
17766 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-
17767  VmaAllocator allocator,
-
17768  uint32_t memoryTypeBits,
-
17769  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
17770  uint32_t* pMemoryTypeIndex)
-
17771 {
-
17772  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
17773  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
17774  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
17775 
-
17776  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
-
17777 
-
17778  if(pAllocationCreateInfo->memoryTypeBits != 0)
+
17761  json.EndObject();
+
17762  }
+
17763 
+
17764  const size_t len = sb.GetLength();
+
17765  char* const pChars = vma_new_array(allocator, char, len + 1);
+
17766  if(len > 0)
+
17767  {
+
17768  memcpy(pChars, sb.GetData(), len);
+
17769  }
+
17770  pChars[len] = '\0';
+
17771  *ppStatsString = pChars;
+
17772 }
+
17773 
+
17774 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
+
17775  VmaAllocator allocator,
+
17776  char* pStatsString)
+
17777 {
+
17778  if(pStatsString != VMA_NULL)
17779  {
-
17780  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
-
17781  }
-
17782 
-
17783  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
-
17784  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
-
17785  uint32_t notPreferredFlags = 0;
-
17786 
-
17787  // Convert usage to requiredFlags and preferredFlags.
-
17788  switch(pAllocationCreateInfo->usage)
-
17789  {
- -
17791  break;
- -
17793  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
17794  {
-
17795  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
17796  }
-
17797  break;
- -
17799  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-
17800  break;
- -
17802  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
17803  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
17804  {
-
17805  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
17806  }
-
17807  break;
- -
17809  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
17810  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-
17811  break;
- -
17813  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
17814  break;
- -
17816  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
-
17817  break;
-
17818  default:
-
17819  VMA_ASSERT(0);
-
17820  break;
-
17821  }
-
17822 
-
17823  // Avoid DEVICE_COHERENT unless explicitly requested.
-
17824  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
-
17825  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
-
17826  {
-
17827  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
-
17828  }
-
17829 
-
17830  *pMemoryTypeIndex = UINT32_MAX;
-
17831  uint32_t minCost = UINT32_MAX;
-
17832  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
-
17833  memTypeIndex < allocator->GetMemoryTypeCount();
-
17834  ++memTypeIndex, memTypeBit <<= 1)
-
17835  {
-
17836  // This memory type is acceptable according to memoryTypeBits bitmask.
-
17837  if((memTypeBit & memoryTypeBits) != 0)
-
17838  {
-
17839  const VkMemoryPropertyFlags currFlags =
-
17840  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-
17841  // This memory type contains requiredFlags.
-
17842  if((requiredFlags & ~currFlags) == 0)
-
17843  {
-
17844  // Calculate cost as number of bits from preferredFlags not present in this memory type.
-
17845  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
-
17846  VmaCountBitsSet(currFlags & notPreferredFlags);
-
17847  // Remember memory type with lowest cost.
-
17848  if(currCost < minCost)
-
17849  {
-
17850  *pMemoryTypeIndex = memTypeIndex;
-
17851  if(currCost == 0)
-
17852  {
-
17853  return VK_SUCCESS;
-
17854  }
-
17855  minCost = currCost;
-
17856  }
-
17857  }
-
17858  }
-
17859  }
-
17860  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
-
17861 }
-
17862 
-
17863 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-
17864  VmaAllocator allocator,
-
17865  const VkBufferCreateInfo* pBufferCreateInfo,
-
17866  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
17867  uint32_t* pMemoryTypeIndex)
-
17868 {
-
17869  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
17870  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
-
17871  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
17872  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
17873 
-
17874  const VkDevice hDev = allocator->m_hDevice;
-
17875  VkBuffer hBuffer = VK_NULL_HANDLE;
-
17876  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
-
17877  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
-
17878  if(res == VK_SUCCESS)
-
17879  {
-
17880  VkMemoryRequirements memReq = {};
-
17881  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
-
17882  hDev, hBuffer, &memReq);
-
17883 
-
17884  res = vmaFindMemoryTypeIndex(
-
17885  allocator,
-
17886  memReq.memoryTypeBits,
-
17887  pAllocationCreateInfo,
-
17888  pMemoryTypeIndex);
-
17889 
-
17890  allocator->GetVulkanFunctions().vkDestroyBuffer(
-
17891  hDev, hBuffer, allocator->GetAllocationCallbacks());
-
17892  }
-
17893  return res;
-
17894 }
-
17895 
-
17896 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-
17897  VmaAllocator allocator,
-
17898  const VkImageCreateInfo* pImageCreateInfo,
-
17899  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
17900  uint32_t* pMemoryTypeIndex)
-
17901 {
-
17902  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
17903  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
-
17904  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
17905  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
17906 
-
17907  const VkDevice hDev = allocator->m_hDevice;
-
17908  VkImage hImage = VK_NULL_HANDLE;
-
17909  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
-
17910  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
-
17911  if(res == VK_SUCCESS)
-
17912  {
-
17913  VkMemoryRequirements memReq = {};
-
17914  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
-
17915  hDev, hImage, &memReq);
-
17916 
-
17917  res = vmaFindMemoryTypeIndex(
-
17918  allocator,
-
17919  memReq.memoryTypeBits,
-
17920  pAllocationCreateInfo,
-
17921  pMemoryTypeIndex);
-
17922 
-
17923  allocator->GetVulkanFunctions().vkDestroyImage(
-
17924  hDev, hImage, allocator->GetAllocationCallbacks());
-
17925  }
-
17926  return res;
-
17927 }
-
17928 
-
17929 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-
17930  VmaAllocator allocator,
-
17931  const VmaPoolCreateInfo* pCreateInfo,
-
17932  VmaPool* pPool)
-
17933 {
-
17934  VMA_ASSERT(allocator && pCreateInfo && pPool);
-
17935 
-
17936  VMA_DEBUG_LOG("vmaCreatePool");
-
17937 
-
17938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17939 
-
17940  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
-
17941 
-
17942 #if VMA_RECORDING_ENABLED
-
17943  if(allocator->GetRecorder() != VMA_NULL)
-
17944  {
-
17945  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
-
17946  }
-
17947 #endif
-
17948 
-
17949  return res;
-
17950 }
-
17951 
-
17952 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-
17953  VmaAllocator allocator,
-
17954  VmaPool pool)
-
17955 {
-
17956  VMA_ASSERT(allocator);
-
17957 
-
17958  if(pool == VK_NULL_HANDLE)
-
17959  {
-
17960  return;
-
17961  }
+
17780  VMA_ASSERT(allocator);
+
17781  size_t len = strlen(pStatsString);
+
17782  vma_delete_array(allocator, pStatsString, len + 1);
+
17783  }
+
17784 }
+
17785 
+
17786 #endif // #if VMA_STATS_STRING_ENABLED
+
17787 
+
17788 /*
+
17789 This function is not protected by any mutex because it just reads immutable data.
+
17790 */
+
17791 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
+
17792  VmaAllocator allocator,
+
17793  uint32_t memoryTypeBits,
+
17794  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
17795  uint32_t* pMemoryTypeIndex)
+
17796 {
+
17797  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
17798  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
17799  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
17800 
+
17801  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
+
17802 
+
17803  if(pAllocationCreateInfo->memoryTypeBits != 0)
+
17804  {
+
17805  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
+
17806  }
+
17807 
+
17808  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
+
17809  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
+
17810  uint32_t notPreferredFlags = 0;
+
17811 
+
17812  // Convert usage to requiredFlags and preferredFlags.
+
17813  switch(pAllocationCreateInfo->usage)
+
17814  {
+ +
17816  break;
+ +
17818  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
17819  {
+
17820  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
17821  }
+
17822  break;
+ +
17824  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
17825  break;
+ +
17827  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
17828  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
17829  {
+
17830  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
17831  }
+
17832  break;
+ +
17834  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
17835  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+
17836  break;
+ +
17838  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
17839  break;
+ +
17841  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+
17842  break;
+
17843  default:
+
17844  VMA_ASSERT(0);
+
17845  break;
+
17846  }
+
17847 
+
17848  // Avoid DEVICE_COHERENT unless explicitly requested.
+
17849  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
+
17850  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
+
17851  {
+
17852  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
+
17853  }
+
17854 
+
17855  *pMemoryTypeIndex = UINT32_MAX;
+
17856  uint32_t minCost = UINT32_MAX;
+
17857  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
+
17858  memTypeIndex < allocator->GetMemoryTypeCount();
+
17859  ++memTypeIndex, memTypeBit <<= 1)
+
17860  {
+
17861  // This memory type is acceptable according to memoryTypeBits bitmask.
+
17862  if((memTypeBit & memoryTypeBits) != 0)
+
17863  {
+
17864  const VkMemoryPropertyFlags currFlags =
+
17865  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+
17866  // This memory type contains requiredFlags.
+
17867  if((requiredFlags & ~currFlags) == 0)
+
17868  {
+
17869  // Calculate cost as number of bits from preferredFlags not present in this memory type.
+
17870  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
+
17871  VmaCountBitsSet(currFlags & notPreferredFlags);
+
17872  // Remember memory type with lowest cost.
+
17873  if(currCost < minCost)
+
17874  {
+
17875  *pMemoryTypeIndex = memTypeIndex;
+
17876  if(currCost == 0)
+
17877  {
+
17878  return VK_SUCCESS;
+
17879  }
+
17880  minCost = currCost;
+
17881  }
+
17882  }
+
17883  }
+
17884  }
+
17885  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
+
17886 }
+
17887 
+
17888 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
+
17889  VmaAllocator allocator,
+
17890  const VkBufferCreateInfo* pBufferCreateInfo,
+
17891  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
17892  uint32_t* pMemoryTypeIndex)
+
17893 {
+
17894  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
17895  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
+
17896  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
17897  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
17898 
+
17899  const VkDevice hDev = allocator->m_hDevice;
+
17900  VkBuffer hBuffer = VK_NULL_HANDLE;
+
17901  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
+
17902  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
+
17903  if(res == VK_SUCCESS)
+
17904  {
+
17905  VkMemoryRequirements memReq = {};
+
17906  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
+
17907  hDev, hBuffer, &memReq);
+
17908 
+
17909  res = vmaFindMemoryTypeIndex(
+
17910  allocator,
+
17911  memReq.memoryTypeBits,
+
17912  pAllocationCreateInfo,
+
17913  pMemoryTypeIndex);
+
17914 
+
17915  allocator->GetVulkanFunctions().vkDestroyBuffer(
+
17916  hDev, hBuffer, allocator->GetAllocationCallbacks());
+
17917  }
+
17918  return res;
+
17919 }
+
17920 
+
17921 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
+
17922  VmaAllocator allocator,
+
17923  const VkImageCreateInfo* pImageCreateInfo,
+
17924  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
17925  uint32_t* pMemoryTypeIndex)
+
17926 {
+
17927  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
17928  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
+
17929  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
17930  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
17931 
+
17932  const VkDevice hDev = allocator->m_hDevice;
+
17933  VkImage hImage = VK_NULL_HANDLE;
+
17934  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
+
17935  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
+
17936  if(res == VK_SUCCESS)
+
17937  {
+
17938  VkMemoryRequirements memReq = {};
+
17939  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
+
17940  hDev, hImage, &memReq);
+
17941 
+
17942  res = vmaFindMemoryTypeIndex(
+
17943  allocator,
+
17944  memReq.memoryTypeBits,
+
17945  pAllocationCreateInfo,
+
17946  pMemoryTypeIndex);
+
17947 
+
17948  allocator->GetVulkanFunctions().vkDestroyImage(
+
17949  hDev, hImage, allocator->GetAllocationCallbacks());
+
17950  }
+
17951  return res;
+
17952 }
+
17953 
+
17954 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
+
17955  VmaAllocator allocator,
+
17956  const VmaPoolCreateInfo* pCreateInfo,
+
17957  VmaPool* pPool)
+
17958 {
+
17959  VMA_ASSERT(allocator && pCreateInfo && pPool);
+
17960 
+
17961  VMA_DEBUG_LOG("vmaCreatePool");
17962 
-
17963  VMA_DEBUG_LOG("vmaDestroyPool");
+
17963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17964 
-
17965  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17965  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17966 
17967 #if VMA_RECORDING_ENABLED
17968  if(allocator->GetRecorder() != VMA_NULL)
17969  {
-
17970  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
+
17970  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17971  }
17972 #endif
-
17973 
-
17974  allocator->DestroyPool(pool);
+
17973 
+
17974  return res;
17975 }
17976 
-
17977 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
+
17977 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17978  VmaAllocator allocator,
-
17979  VmaPool pool,
-
17980  VmaPoolStats* pPoolStats)
-
17981 {
-
17982  VMA_ASSERT(allocator && pool && pPoolStats);
-
17983 
-
17984  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17985 
-
17986  allocator->GetPoolStats(pool, pPoolStats);
-
17987 }
-
17988 
-
17989 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
-
17990  VmaAllocator allocator,
-
17991  VmaPool pool,
-
17992  size_t* pLostAllocationCount)
-
17993 {
-
17994  VMA_ASSERT(allocator && pool);
-
17995 
-
17996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17997 
-
17998 #if VMA_RECORDING_ENABLED
-
17999  if(allocator->GetRecorder() != VMA_NULL)
-
18000  {
-
18001  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
-
18002  }
-
18003 #endif
-
18004 
-
18005  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
-
18006 }
-
18007 
-
18008 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
-
18009 {
-
18010  VMA_ASSERT(allocator && pool);
-
18011 
-
18012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17979  VmaPool pool)
+
17980 {
+
17981  VMA_ASSERT(allocator);
+
17982 
+
17983  if(pool == VK_NULL_HANDLE)
+
17984  {
+
17985  return;
+
17986  }
+
17987 
+
17988  VMA_DEBUG_LOG("vmaDestroyPool");
+
17989 
+
17990  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17991 
+
17992 #if VMA_RECORDING_ENABLED
+
17993  if(allocator->GetRecorder() != VMA_NULL)
+
17994  {
+
17995  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
+
17996  }
+
17997 #endif
+
17998 
+
17999  allocator->DestroyPool(pool);
+
18000 }
+
18001 
+
18002 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
+
18003  VmaAllocator allocator,
+
18004  VmaPool pool,
+
18005  VmaPoolStats* pPoolStats)
+
18006 {
+
18007  VMA_ASSERT(allocator && pool && pPoolStats);
+
18008 
+
18009  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18010 
+
18011  allocator->GetPoolStats(pool, pPoolStats);
+
18012 }
18013 
-
18014  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
-
18015 
-
18016  return allocator->CheckPoolCorruption(pool);
-
18017 }
-
18018 
-
18019 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-
18020  VmaAllocator allocator,
-
18021  VmaPool pool,
-
18022  const char** ppName)
-
18023 {
-
18024  VMA_ASSERT(allocator && pool && ppName);
-
18025 
-
18026  VMA_DEBUG_LOG("vmaGetPoolName");
-
18027 
-
18028  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18014 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
+
18015  VmaAllocator allocator,
+
18016  VmaPool pool,
+
18017  size_t* pLostAllocationCount)
+
18018 {
+
18019  VMA_ASSERT(allocator && pool);
+
18020 
+
18021  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18022 
+
18023 #if VMA_RECORDING_ENABLED
+
18024  if(allocator->GetRecorder() != VMA_NULL)
+
18025  {
+
18026  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
+
18027  }
+
18028 #endif
18029 
-
18030  *ppName = pool->GetName();
+
18030  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18031 }
18032 
-
18033 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-
18034  VmaAllocator allocator,
-
18035  VmaPool pool,
-
18036  const char* pName)
-
18037 {
-
18038  VMA_ASSERT(allocator && pool);
-
18039 
-
18040  VMA_DEBUG_LOG("vmaSetPoolName");
-
18041 
-
18042  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18033 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
+
18034 {
+
18035  VMA_ASSERT(allocator && pool);
+
18036 
+
18037  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18038 
+
18039  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
+
18040 
+
18041  return allocator->CheckPoolCorruption(pool);
+
18042 }
18043 
-
18044  pool->SetName(pName);
-
18045 
-
18046 #if VMA_RECORDING_ENABLED
-
18047  if(allocator->GetRecorder() != VMA_NULL)
-
18048  {
-
18049  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
-
18050  }
-
18051 #endif
-
18052 }
-
18053 
-
18054 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-
18055  VmaAllocator allocator,
-
18056  const VkMemoryRequirements* pVkMemoryRequirements,
-
18057  const VmaAllocationCreateInfo* pCreateInfo,
-
18058  VmaAllocation* pAllocation,
-
18059  VmaAllocationInfo* pAllocationInfo)
-
18060 {
-
18061  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
-
18062 
-
18063  VMA_DEBUG_LOG("vmaAllocateMemory");
+
18044 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
+
18045  VmaAllocator allocator,
+
18046  VmaPool pool,
+
18047  const char** ppName)
+
18048 {
+
18049  VMA_ASSERT(allocator && pool && ppName);
+
18050 
+
18051  VMA_DEBUG_LOG("vmaGetPoolName");
+
18052 
+
18053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18054 
+
18055  *ppName = pool->GetName();
+
18056 }
+
18057 
+
18058 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
+
18059  VmaAllocator allocator,
+
18060  VmaPool pool,
+
18061  const char* pName)
+
18062 {
+
18063  VMA_ASSERT(allocator && pool);
18064 
-
18065  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18065  VMA_DEBUG_LOG("vmaSetPoolName");
18066 
-
18067  VkResult result = allocator->AllocateMemory(
-
18068  *pVkMemoryRequirements,
-
18069  false, // requiresDedicatedAllocation
-
18070  false, // prefersDedicatedAllocation
-
18071  VK_NULL_HANDLE, // dedicatedBuffer
-
18072  UINT32_MAX, // dedicatedBufferUsage
-
18073  VK_NULL_HANDLE, // dedicatedImage
-
18074  *pCreateInfo,
-
18075  VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
18076  1, // allocationCount
-
18077  pAllocation);
+
18067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18068 
+
18069  pool->SetName(pName);
+
18070 
+
18071 #if VMA_RECORDING_ENABLED
+
18072  if(allocator->GetRecorder() != VMA_NULL)
+
18073  {
+
18074  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
+
18075  }
+
18076 #endif
+
18077 }
18078 
-
18079 #if VMA_RECORDING_ENABLED
-
18080  if(allocator->GetRecorder() != VMA_NULL)
-
18081  {
-
18082  allocator->GetRecorder()->RecordAllocateMemory(
-
18083  allocator->GetCurrentFrameIndex(),
-
18084  *pVkMemoryRequirements,
-
18085  *pCreateInfo,
-
18086  *pAllocation);
-
18087  }
-
18088 #endif
-
18089 
-
18090  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
18091  {
-
18092  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
18093  }
-
18094 
-
18095  return result;
-
18096 }
-
18097 
-
18098 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-
18099  VmaAllocator allocator,
-
18100  const VkMemoryRequirements* pVkMemoryRequirements,
-
18101  const VmaAllocationCreateInfo* pCreateInfo,
-
18102  size_t allocationCount,
-
18103  VmaAllocation* pAllocations,
-
18104  VmaAllocationInfo* pAllocationInfo)
-
18105 {
-
18106  if(allocationCount == 0)
-
18107  {
-
18108  return VK_SUCCESS;
-
18109  }
-
18110 
-
18111  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
-
18112 
-
18113  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
-
18114 
-
18115  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18116 
-
18117  VkResult result = allocator->AllocateMemory(
-
18118  *pVkMemoryRequirements,
-
18119  false, // requiresDedicatedAllocation
-
18120  false, // prefersDedicatedAllocation
-
18121  VK_NULL_HANDLE, // dedicatedBuffer
-
18122  UINT32_MAX, // dedicatedBufferUsage
-
18123  VK_NULL_HANDLE, // dedicatedImage
-
18124  *pCreateInfo,
-
18125  VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
18126  allocationCount,
-
18127  pAllocations);
-
18128 
-
18129 #if VMA_RECORDING_ENABLED
-
18130  if(allocator->GetRecorder() != VMA_NULL)
-
18131  {
-
18132  allocator->GetRecorder()->RecordAllocateMemoryPages(
-
18133  allocator->GetCurrentFrameIndex(),
-
18134  *pVkMemoryRequirements,
-
18135  *pCreateInfo,
-
18136  (uint64_t)allocationCount,
-
18137  pAllocations);
-
18138  }
-
18139 #endif
-
18140 
-
18141  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
18142  {
-
18143  for(size_t i = 0; i < allocationCount; ++i)
-
18144  {
-
18145  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
-
18146  }
-
18147  }
-
18148 
-
18149  return result;
-
18150 }
-
18151 
-
18152 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-
18153  VmaAllocator allocator,
-
18154  VkBuffer buffer,
-
18155  const VmaAllocationCreateInfo* pCreateInfo,
-
18156  VmaAllocation* pAllocation,
-
18157  VmaAllocationInfo* pAllocationInfo)
-
18158 {
-
18159  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
18160 
-
18161  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
-
18162 
-
18163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18164 
-
18165  VkMemoryRequirements vkMemReq = {};
-
18166  bool requiresDedicatedAllocation = false;
-
18167  bool prefersDedicatedAllocation = false;
-
18168  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
-
18169  requiresDedicatedAllocation,
-
18170  prefersDedicatedAllocation);
-
18171 
-
18172  VkResult result = allocator->AllocateMemory(
-
18173  vkMemReq,
-
18174  requiresDedicatedAllocation,
-
18175  prefersDedicatedAllocation,
-
18176  buffer, // dedicatedBuffer
-
18177  UINT32_MAX, // dedicatedBufferUsage
-
18178  VK_NULL_HANDLE, // dedicatedImage
-
18179  *pCreateInfo,
-
18180  VMA_SUBALLOCATION_TYPE_BUFFER,
-
18181  1, // allocationCount
-
18182  pAllocation);
-
18183 
-
18184 #if VMA_RECORDING_ENABLED
-
18185  if(allocator->GetRecorder() != VMA_NULL)
-
18186  {
-
18187  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
-
18188  allocator->GetCurrentFrameIndex(),
-
18189  vkMemReq,
-
18190  requiresDedicatedAllocation,
-
18191  prefersDedicatedAllocation,
-
18192  *pCreateInfo,
-
18193  *pAllocation);
-
18194  }
-
18195 #endif
+
18079 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
+
18080  VmaAllocator allocator,
+
18081  const VkMemoryRequirements* pVkMemoryRequirements,
+
18082  const VmaAllocationCreateInfo* pCreateInfo,
+
18083  VmaAllocation* pAllocation,
+
18084  VmaAllocationInfo* pAllocationInfo)
+
18085 {
+
18086  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
+
18087 
+
18088  VMA_DEBUG_LOG("vmaAllocateMemory");
+
18089 
+
18090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18091 
+
18092  VkResult result = allocator->AllocateMemory(
+
18093  *pVkMemoryRequirements,
+
18094  false, // requiresDedicatedAllocation
+
18095  false, // prefersDedicatedAllocation
+
18096  VK_NULL_HANDLE, // dedicatedBuffer
+
18097  UINT32_MAX, // dedicatedBufferUsage
+
18098  VK_NULL_HANDLE, // dedicatedImage
+
18099  *pCreateInfo,
+
18100  VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
18101  1, // allocationCount
+
18102  pAllocation);
+
18103 
+
18104 #if VMA_RECORDING_ENABLED
+
18105  if(allocator->GetRecorder() != VMA_NULL)
+
18106  {
+
18107  allocator->GetRecorder()->RecordAllocateMemory(
+
18108  allocator->GetCurrentFrameIndex(),
+
18109  *pVkMemoryRequirements,
+
18110  *pCreateInfo,
+
18111  *pAllocation);
+
18112  }
+
18113 #endif
+
18114 
+
18115  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
18116  {
+
18117  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
18118  }
+
18119 
+
18120  return result;
+
18121 }
+
18122 
+
18123 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
+
18124  VmaAllocator allocator,
+
18125  const VkMemoryRequirements* pVkMemoryRequirements,
+
18126  const VmaAllocationCreateInfo* pCreateInfo,
+
18127  size_t allocationCount,
+
18128  VmaAllocation* pAllocations,
+
18129  VmaAllocationInfo* pAllocationInfo)
+
18130 {
+
18131  if(allocationCount == 0)
+
18132  {
+
18133  return VK_SUCCESS;
+
18134  }
+
18135 
+
18136  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
+
18137 
+
18138  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
+
18139 
+
18140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18141 
+
18142  VkResult result = allocator->AllocateMemory(
+
18143  *pVkMemoryRequirements,
+
18144  false, // requiresDedicatedAllocation
+
18145  false, // prefersDedicatedAllocation
+
18146  VK_NULL_HANDLE, // dedicatedBuffer
+
18147  UINT32_MAX, // dedicatedBufferUsage
+
18148  VK_NULL_HANDLE, // dedicatedImage
+
18149  *pCreateInfo,
+
18150  VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
18151  allocationCount,
+
18152  pAllocations);
+
18153 
+
18154 #if VMA_RECORDING_ENABLED
+
18155  if(allocator->GetRecorder() != VMA_NULL)
+
18156  {
+
18157  allocator->GetRecorder()->RecordAllocateMemoryPages(
+
18158  allocator->GetCurrentFrameIndex(),
+
18159  *pVkMemoryRequirements,
+
18160  *pCreateInfo,
+
18161  (uint64_t)allocationCount,
+
18162  pAllocations);
+
18163  }
+
18164 #endif
+
18165 
+
18166  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
18167  {
+
18168  for(size_t i = 0; i < allocationCount; ++i)
+
18169  {
+
18170  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
+
18171  }
+
18172  }
+
18173 
+
18174  return result;
+
18175 }
+
18176 
+
18177 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
+
18178  VmaAllocator allocator,
+
18179  VkBuffer buffer,
+
18180  const VmaAllocationCreateInfo* pCreateInfo,
+
18181  VmaAllocation* pAllocation,
+
18182  VmaAllocationInfo* pAllocationInfo)
+
18183 {
+
18184  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
18185 
+
18186  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
+
18187 
+
18188  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18189 
+
18190  VkMemoryRequirements vkMemReq = {};
+
18191  bool requiresDedicatedAllocation = false;
+
18192  bool prefersDedicatedAllocation = false;
+
18193  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
+
18194  requiresDedicatedAllocation,
+
18195  prefersDedicatedAllocation);
18196 
-
18197  if(pAllocationInfo && result == VK_SUCCESS)
-
18198  {
-
18199  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
18200  }
-
18201 
-
18202  return result;
-
18203 }
-
18204 
-
18205 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-
18206  VmaAllocator allocator,
-
18207  VkImage image,
-
18208  const VmaAllocationCreateInfo* pCreateInfo,
-
18209  VmaAllocation* pAllocation,
-
18210  VmaAllocationInfo* pAllocationInfo)
-
18211 {
-
18212  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
18213 
-
18214  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
-
18215 
-
18216  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18217 
-
18218  VkMemoryRequirements vkMemReq = {};
-
18219  bool requiresDedicatedAllocation = false;
-
18220  bool prefersDedicatedAllocation = false;
-
18221  allocator->GetImageMemoryRequirements(image, vkMemReq,
-
18222  requiresDedicatedAllocation, prefersDedicatedAllocation);
-
18223 
-
18224  VkResult result = allocator->AllocateMemory(
-
18225  vkMemReq,
-
18226  requiresDedicatedAllocation,
-
18227  prefersDedicatedAllocation,
-
18228  VK_NULL_HANDLE, // dedicatedBuffer
-
18229  UINT32_MAX, // dedicatedBufferUsage
-
18230  image, // dedicatedImage
-
18231  *pCreateInfo,
-
18232  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
-
18233  1, // allocationCount
-
18234  pAllocation);
-
18235 
-
18236 #if VMA_RECORDING_ENABLED
-
18237  if(allocator->GetRecorder() != VMA_NULL)
-
18238  {
-
18239  allocator->GetRecorder()->RecordAllocateMemoryForImage(
-
18240  allocator->GetCurrentFrameIndex(),
-
18241  vkMemReq,
-
18242  requiresDedicatedAllocation,
-
18243  prefersDedicatedAllocation,
-
18244  *pCreateInfo,
-
18245  *pAllocation);
-
18246  }
-
18247 #endif
+
18197  VkResult result = allocator->AllocateMemory(
+
18198  vkMemReq,
+
18199  requiresDedicatedAllocation,
+
18200  prefersDedicatedAllocation,
+
18201  buffer, // dedicatedBuffer
+
18202  UINT32_MAX, // dedicatedBufferUsage
+
18203  VK_NULL_HANDLE, // dedicatedImage
+
18204  *pCreateInfo,
+
18205  VMA_SUBALLOCATION_TYPE_BUFFER,
+
18206  1, // allocationCount
+
18207  pAllocation);
+
18208 
+
18209 #if VMA_RECORDING_ENABLED
+
18210  if(allocator->GetRecorder() != VMA_NULL)
+
18211  {
+
18212  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
+
18213  allocator->GetCurrentFrameIndex(),
+
18214  vkMemReq,
+
18215  requiresDedicatedAllocation,
+
18216  prefersDedicatedAllocation,
+
18217  *pCreateInfo,
+
18218  *pAllocation);
+
18219  }
+
18220 #endif
+
18221 
+
18222  if(pAllocationInfo && result == VK_SUCCESS)
+
18223  {
+
18224  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
18225  }
+
18226 
+
18227  return result;
+
18228 }
+
18229 
+
18230 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
+
18231  VmaAllocator allocator,
+
18232  VkImage image,
+
18233  const VmaAllocationCreateInfo* pCreateInfo,
+
18234  VmaAllocation* pAllocation,
+
18235  VmaAllocationInfo* pAllocationInfo)
+
18236 {
+
18237  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
18238 
+
18239  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
+
18240 
+
18241  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18242 
+
18243  VkMemoryRequirements vkMemReq = {};
+
18244  bool requiresDedicatedAllocation = false;
+
18245  bool prefersDedicatedAllocation = false;
+
18246  allocator->GetImageMemoryRequirements(image, vkMemReq,
+
18247  requiresDedicatedAllocation, prefersDedicatedAllocation);
18248 
-
18249  if(pAllocationInfo && result == VK_SUCCESS)
-
18250  {
-
18251  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
18252  }
-
18253 
-
18254  return result;
-
18255 }
-
18256 
-
18257 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-
18258  VmaAllocator allocator,
-
18259  VmaAllocation allocation)
-
18260 {
-
18261  VMA_ASSERT(allocator);
-
18262 
-
18263  if(allocation == VK_NULL_HANDLE)
-
18264  {
-
18265  return;
-
18266  }
-
18267 
-
18268  VMA_DEBUG_LOG("vmaFreeMemory");
-
18269 
-
18270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18271 
-
18272 #if VMA_RECORDING_ENABLED
-
18273  if(allocator->GetRecorder() != VMA_NULL)
-
18274  {
-
18275  allocator->GetRecorder()->RecordFreeMemory(
-
18276  allocator->GetCurrentFrameIndex(),
-
18277  allocation);
-
18278  }
-
18279 #endif
-
18280 
-
18281  allocator->FreeMemory(
-
18282  1, // allocationCount
-
18283  &allocation);
-
18284 }
-
18285 
-
18286 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-
18287  VmaAllocator allocator,
-
18288  size_t allocationCount,
-
18289  const VmaAllocation* pAllocations)
-
18290 {
-
18291  if(allocationCount == 0)
-
18292  {
-
18293  return;
-
18294  }
-
18295 
-
18296  VMA_ASSERT(allocator);
-
18297 
-
18298  VMA_DEBUG_LOG("vmaFreeMemoryPages");
-
18299 
-
18300  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18301 
-
18302 #if VMA_RECORDING_ENABLED
-
18303  if(allocator->GetRecorder() != VMA_NULL)
-
18304  {
-
18305  allocator->GetRecorder()->RecordFreeMemoryPages(
-
18306  allocator->GetCurrentFrameIndex(),
-
18307  (uint64_t)allocationCount,
-
18308  pAllocations);
-
18309  }
-
18310 #endif
-
18311 
-
18312  allocator->FreeMemory(allocationCount, pAllocations);
-
18313 }
-
18314 
-
18315 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
-
18316  VmaAllocator allocator,
-
18317  VmaAllocation allocation,
-
18318  VkDeviceSize newSize)
-
18319 {
-
18320  VMA_ASSERT(allocator && allocation);
-
18321 
-
18322  VMA_DEBUG_LOG("vmaResizeAllocation");
-
18323 
-
18324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18325 
-
18326  return allocator->ResizeAllocation(allocation, newSize);
-
18327 }
-
18328 
-
18329 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-
18330  VmaAllocator allocator,
-
18331  VmaAllocation allocation,
-
18332  VmaAllocationInfo* pAllocationInfo)
-
18333 {
-
18334  VMA_ASSERT(allocator && allocation && pAllocationInfo);
-
18335 
-
18336  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18337 
-
18338 #if VMA_RECORDING_ENABLED
-
18339  if(allocator->GetRecorder() != VMA_NULL)
-
18340  {
-
18341  allocator->GetRecorder()->RecordGetAllocationInfo(
-
18342  allocator->GetCurrentFrameIndex(),
-
18343  allocation);
-
18344  }
-
18345 #endif
-
18346 
-
18347  allocator->GetAllocationInfo(allocation, pAllocationInfo);
-
18348 }
-
18349 
-
18350 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
-
18351  VmaAllocator allocator,
-
18352  VmaAllocation allocation)
-
18353 {
-
18354  VMA_ASSERT(allocator && allocation);
-
18355 
-
18356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18357 
-
18358 #if VMA_RECORDING_ENABLED
-
18359  if(allocator->GetRecorder() != VMA_NULL)
-
18360  {
-
18361  allocator->GetRecorder()->RecordTouchAllocation(
-
18362  allocator->GetCurrentFrameIndex(),
-
18363  allocation);
-
18364  }
-
18365 #endif
-
18366 
-
18367  return allocator->TouchAllocation(allocation);
-
18368 }
-
18369 
-
18370 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-
18371  VmaAllocator allocator,
-
18372  VmaAllocation allocation,
-
18373  void* pUserData)
-
18374 {
-
18375  VMA_ASSERT(allocator && allocation);
-
18376 
-
18377  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18378 
-
18379  allocation->SetUserData(allocator, pUserData);
+
18249  VkResult result = allocator->AllocateMemory(
+
18250  vkMemReq,
+
18251  requiresDedicatedAllocation,
+
18252  prefersDedicatedAllocation,
+
18253  VK_NULL_HANDLE, // dedicatedBuffer
+
18254  UINT32_MAX, // dedicatedBufferUsage
+
18255  image, // dedicatedImage
+
18256  *pCreateInfo,
+
18257  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
+
18258  1, // allocationCount
+
18259  pAllocation);
+
18260 
+
18261 #if VMA_RECORDING_ENABLED
+
18262  if(allocator->GetRecorder() != VMA_NULL)
+
18263  {
+
18264  allocator->GetRecorder()->RecordAllocateMemoryForImage(
+
18265  allocator->GetCurrentFrameIndex(),
+
18266  vkMemReq,
+
18267  requiresDedicatedAllocation,
+
18268  prefersDedicatedAllocation,
+
18269  *pCreateInfo,
+
18270  *pAllocation);
+
18271  }
+
18272 #endif
+
18273 
+
18274  if(pAllocationInfo && result == VK_SUCCESS)
+
18275  {
+
18276  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
18277  }
+
18278 
+
18279  return result;
+
18280 }
+
18281 
+
18282 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
+
18283  VmaAllocator allocator,
+
18284  VmaAllocation allocation)
+
18285 {
+
18286  VMA_ASSERT(allocator);
+
18287 
+
18288  if(allocation == VK_NULL_HANDLE)
+
18289  {
+
18290  return;
+
18291  }
+
18292 
+
18293  VMA_DEBUG_LOG("vmaFreeMemory");
+
18294 
+
18295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18296 
+
18297 #if VMA_RECORDING_ENABLED
+
18298  if(allocator->GetRecorder() != VMA_NULL)
+
18299  {
+
18300  allocator->GetRecorder()->RecordFreeMemory(
+
18301  allocator->GetCurrentFrameIndex(),
+
18302  allocation);
+
18303  }
+
18304 #endif
+
18305 
+
18306  allocator->FreeMemory(
+
18307  1, // allocationCount
+
18308  &allocation);
+
18309 }
+
18310 
+
18311 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
+
18312  VmaAllocator allocator,
+
18313  size_t allocationCount,
+
18314  const VmaAllocation* pAllocations)
+
18315 {
+
18316  if(allocationCount == 0)
+
18317  {
+
18318  return;
+
18319  }
+
18320 
+
18321  VMA_ASSERT(allocator);
+
18322 
+
18323  VMA_DEBUG_LOG("vmaFreeMemoryPages");
+
18324 
+
18325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18326 
+
18327 #if VMA_RECORDING_ENABLED
+
18328  if(allocator->GetRecorder() != VMA_NULL)
+
18329  {
+
18330  allocator->GetRecorder()->RecordFreeMemoryPages(
+
18331  allocator->GetCurrentFrameIndex(),
+
18332  (uint64_t)allocationCount,
+
18333  pAllocations);
+
18334  }
+
18335 #endif
+
18336 
+
18337  allocator->FreeMemory(allocationCount, pAllocations);
+
18338 }
+
18339 
+
18340 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
+
18341  VmaAllocator allocator,
+
18342  VmaAllocation allocation,
+
18343  VkDeviceSize newSize)
+
18344 {
+
18345  VMA_ASSERT(allocator && allocation);
+
18346 
+
18347  VMA_DEBUG_LOG("vmaResizeAllocation");
+
18348 
+
18349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18350 
+
18351  return allocator->ResizeAllocation(allocation, newSize);
+
18352 }
+
18353 
+
18354 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
+
18355  VmaAllocator allocator,
+
18356  VmaAllocation allocation,
+
18357  VmaAllocationInfo* pAllocationInfo)
+
18358 {
+
18359  VMA_ASSERT(allocator && allocation && pAllocationInfo);
+
18360 
+
18361  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18362 
+
18363 #if VMA_RECORDING_ENABLED
+
18364  if(allocator->GetRecorder() != VMA_NULL)
+
18365  {
+
18366  allocator->GetRecorder()->RecordGetAllocationInfo(
+
18367  allocator->GetCurrentFrameIndex(),
+
18368  allocation);
+
18369  }
+
18370 #endif
+
18371 
+
18372  allocator->GetAllocationInfo(allocation, pAllocationInfo);
+
18373 }
+
18374 
+
18375 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
+
18376  VmaAllocator allocator,
+
18377  VmaAllocation allocation)
+
18378 {
+
18379  VMA_ASSERT(allocator && allocation);
18380 
-
18381 #if VMA_RECORDING_ENABLED
-
18382  if(allocator->GetRecorder() != VMA_NULL)
-
18383  {
-
18384  allocator->GetRecorder()->RecordSetAllocationUserData(
-
18385  allocator->GetCurrentFrameIndex(),
-
18386  allocation,
-
18387  pUserData);
-
18388  }
-
18389 #endif
-
18390 }
+
18381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18382 
+
18383 #if VMA_RECORDING_ENABLED
+
18384  if(allocator->GetRecorder() != VMA_NULL)
+
18385  {
+
18386  allocator->GetRecorder()->RecordTouchAllocation(
+
18387  allocator->GetCurrentFrameIndex(),
+
18388  allocation);
+
18389  }
+
18390 #endif
18391 
-
18392 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
-
18393  VmaAllocator allocator,
-
18394  VmaAllocation* pAllocation)
-
18395 {
-
18396  VMA_ASSERT(allocator && pAllocation);
-
18397 
-
18398  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-
18399 
-
18400  allocator->CreateLostAllocation(pAllocation);
+
18392  return allocator->TouchAllocation(allocation);
+
18393 }
+
18394 
+
18395 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
+
18396  VmaAllocator allocator,
+
18397  VmaAllocation allocation,
+
18398  void* pUserData)
+
18399 {
+
18400  VMA_ASSERT(allocator && allocation);
18401 
-
18402 #if VMA_RECORDING_ENABLED
-
18403  if(allocator->GetRecorder() != VMA_NULL)
-
18404  {
-
18405  allocator->GetRecorder()->RecordCreateLostAllocation(
-
18406  allocator->GetCurrentFrameIndex(),
-
18407  *pAllocation);
-
18408  }
-
18409 #endif
-
18410 }
-
18411 
-
18412 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-
18413  VmaAllocator allocator,
-
18414  VmaAllocation allocation,
-
18415  void** ppData)
-
18416 {
-
18417  VMA_ASSERT(allocator && allocation && ppData);
-
18418 
-
18419  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18420 
-
18421  VkResult res = allocator->Map(allocation, ppData);
+
18402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18403 
+
18404  allocation->SetUserData(allocator, pUserData);
+
18405 
+
18406 #if VMA_RECORDING_ENABLED
+
18407  if(allocator->GetRecorder() != VMA_NULL)
+
18408  {
+
18409  allocator->GetRecorder()->RecordSetAllocationUserData(
+
18410  allocator->GetCurrentFrameIndex(),
+
18411  allocation,
+
18412  pUserData);
+
18413  }
+
18414 #endif
+
18415 }
+
18416 
+
18417 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
+
18418  VmaAllocator allocator,
+
18419  VmaAllocation* pAllocation)
+
18420 {
+
18421  VMA_ASSERT(allocator && pAllocation);
18422 
-
18423 #if VMA_RECORDING_ENABLED
-
18424  if(allocator->GetRecorder() != VMA_NULL)
-
18425  {
-
18426  allocator->GetRecorder()->RecordMapMemory(
-
18427  allocator->GetCurrentFrameIndex(),
-
18428  allocation);
-
18429  }
-
18430 #endif
-
18431 
-
18432  return res;
-
18433 }
-
18434 
-
18435 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-
18436  VmaAllocator allocator,
-
18437  VmaAllocation allocation)
-
18438 {
-
18439  VMA_ASSERT(allocator && allocation);
-
18440 
-
18441  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18442 
-
18443 #if VMA_RECORDING_ENABLED
-
18444  if(allocator->GetRecorder() != VMA_NULL)
-
18445  {
-
18446  allocator->GetRecorder()->RecordUnmapMemory(
-
18447  allocator->GetCurrentFrameIndex(),
-
18448  allocation);
-
18449  }
-
18450 #endif
-
18451 
-
18452  allocator->Unmap(allocation);
-
18453 }
-
18454 
-
18455 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
18456 {
-
18457  VMA_ASSERT(allocator && allocation);
-
18458 
-
18459  VMA_DEBUG_LOG("vmaFlushAllocation");
-
18460 
-
18461  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18462 
-
18463  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
-
18464 
-
18465 #if VMA_RECORDING_ENABLED
-
18466  if(allocator->GetRecorder() != VMA_NULL)
-
18467  {
-
18468  allocator->GetRecorder()->RecordFlushAllocation(
-
18469  allocator->GetCurrentFrameIndex(),
-
18470  allocation, offset, size);
-
18471  }
-
18472 #endif
-
18473 
-
18474  return res;
-
18475 }
+
18423  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+
18424 
+
18425  allocator->CreateLostAllocation(pAllocation);
+
18426 
+
18427 #if VMA_RECORDING_ENABLED
+
18428  if(allocator->GetRecorder() != VMA_NULL)
+
18429  {
+
18430  allocator->GetRecorder()->RecordCreateLostAllocation(
+
18431  allocator->GetCurrentFrameIndex(),
+
18432  *pAllocation);
+
18433  }
+
18434 #endif
+
18435 }
+
18436 
+
18437 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
+
18438  VmaAllocator allocator,
+
18439  VmaAllocation allocation,
+
18440  void** ppData)
+
18441 {
+
18442  VMA_ASSERT(allocator && allocation && ppData);
+
18443 
+
18444  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18445 
+
18446  VkResult res = allocator->Map(allocation, ppData);
+
18447 
+
18448 #if VMA_RECORDING_ENABLED
+
18449  if(allocator->GetRecorder() != VMA_NULL)
+
18450  {
+
18451  allocator->GetRecorder()->RecordMapMemory(
+
18452  allocator->GetCurrentFrameIndex(),
+
18453  allocation);
+
18454  }
+
18455 #endif
+
18456 
+
18457  return res;
+
18458 }
+
18459 
+
18460 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
+
18461  VmaAllocator allocator,
+
18462  VmaAllocation allocation)
+
18463 {
+
18464  VMA_ASSERT(allocator && allocation);
+
18465 
+
18466  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18467 
+
18468 #if VMA_RECORDING_ENABLED
+
18469  if(allocator->GetRecorder() != VMA_NULL)
+
18470  {
+
18471  allocator->GetRecorder()->RecordUnmapMemory(
+
18472  allocator->GetCurrentFrameIndex(),
+
18473  allocation);
+
18474  }
+
18475 #endif
18476 
-
18477 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
18478 {
-
18479  VMA_ASSERT(allocator && allocation);
-
18480 
-
18481  VMA_DEBUG_LOG("vmaInvalidateAllocation");
-
18482 
-
18483  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18484 
-
18485  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
-
18486 
-
18487 #if VMA_RECORDING_ENABLED
-
18488  if(allocator->GetRecorder() != VMA_NULL)
-
18489  {
-
18490  allocator->GetRecorder()->RecordInvalidateAllocation(
-
18491  allocator->GetCurrentFrameIndex(),
-
18492  allocation, offset, size);
-
18493  }
-
18494 #endif
-
18495 
-
18496  return res;
-
18497 }
+
18477  allocator->Unmap(allocation);
+
18478 }
+
18479 
+
18480 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
18481 {
+
18482  VMA_ASSERT(allocator && allocation);
+
18483 
+
18484  VMA_DEBUG_LOG("vmaFlushAllocation");
+
18485 
+
18486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18487 
+
18488  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+
18489 
+
18490 #if VMA_RECORDING_ENABLED
+
18491  if(allocator->GetRecorder() != VMA_NULL)
+
18492  {
+
18493  allocator->GetRecorder()->RecordFlushAllocation(
+
18494  allocator->GetCurrentFrameIndex(),
+
18495  allocation, offset, size);
+
18496  }
+
18497 #endif
18498 
-
18499 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
-
18500  VmaAllocator allocator,
-
18501  uint32_t allocationCount,
-
18502  const VmaAllocation* allocations,
-
18503  const VkDeviceSize* offsets,
-
18504  const VkDeviceSize* sizes)
-
18505 {
-
18506  VMA_ASSERT(allocator);
+
18499  return res;
+
18500 }
+
18501 
+
18502 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
18503 {
+
18504  VMA_ASSERT(allocator && allocation);
+
18505 
+
18506  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18507 
-
18508  if(allocationCount == 0)
-
18509  {
-
18510  return VK_SUCCESS;
-
18511  }
-
18512 
-
18513  VMA_ASSERT(allocations);
-
18514 
-
18515  VMA_DEBUG_LOG("vmaFlushAllocations");
-
18516 
-
18517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18518 
-
18519  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
+
18508  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18509 
+
18510  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+
18511 
+
18512 #if VMA_RECORDING_ENABLED
+
18513  if(allocator->GetRecorder() != VMA_NULL)
+
18514  {
+
18515  allocator->GetRecorder()->RecordInvalidateAllocation(
+
18516  allocator->GetCurrentFrameIndex(),
+
18517  allocation, offset, size);
+
18518  }
+
18519 #endif
18520 
-
18521 #if VMA_RECORDING_ENABLED
-
18522  if(allocator->GetRecorder() != VMA_NULL)
-
18523  {
-
18524  //TODO
-
18525  }
-
18526 #endif
-
18527 
-
18528  return res;
-
18529 }
-
18530 
-
18531 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
-
18532  VmaAllocator allocator,
-
18533  uint32_t allocationCount,
-
18534  const VmaAllocation* allocations,
-
18535  const VkDeviceSize* offsets,
-
18536  const VkDeviceSize* sizes)
-
18537 {
-
18538  VMA_ASSERT(allocator);
+
18521  return res;
+
18522 }
+
18523 
+
18524 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+
18525  VmaAllocator allocator,
+
18526  uint32_t allocationCount,
+
18527  const VmaAllocation* allocations,
+
18528  const VkDeviceSize* offsets,
+
18529  const VkDeviceSize* sizes)
+
18530 {
+
18531  VMA_ASSERT(allocator);
+
18532 
+
18533  if(allocationCount == 0)
+
18534  {
+
18535  return VK_SUCCESS;
+
18536  }
+
18537 
+
18538  VMA_ASSERT(allocations);
18539 
-
18540  if(allocationCount == 0)
-
18541  {
-
18542  return VK_SUCCESS;
-
18543  }
-
18544 
-
18545  VMA_ASSERT(allocations);
-
18546 
-
18547  VMA_DEBUG_LOG("vmaInvalidateAllocations");
-
18548 
-
18549  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18550 
-
18551  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
+
18540  VMA_DEBUG_LOG("vmaFlushAllocations");
+
18541 
+
18542  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18543 
+
18544  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
+
18545 
+
18546 #if VMA_RECORDING_ENABLED
+
18547  if(allocator->GetRecorder() != VMA_NULL)
+
18548  {
+
18549  //TODO
+
18550  }
+
18551 #endif
18552 
-
18553 #if VMA_RECORDING_ENABLED
-
18554  if(allocator->GetRecorder() != VMA_NULL)
-
18555  {
-
18556  //TODO
-
18557  }
-
18558 #endif
-
18559 
-
18560  return res;
-
18561 }
-
18562 
-
18563 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
-
18564 {
-
18565  VMA_ASSERT(allocator);
-
18566 
-
18567  VMA_DEBUG_LOG("vmaCheckCorruption");
-
18568 
-
18569  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18570 
-
18571  return allocator->CheckCorruption(memoryTypeBits);
-
18572 }
+
18553  return res;
+
18554 }
+
18555 
+
18556 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+
18557  VmaAllocator allocator,
+
18558  uint32_t allocationCount,
+
18559  const VmaAllocation* allocations,
+
18560  const VkDeviceSize* offsets,
+
18561  const VkDeviceSize* sizes)
+
18562 {
+
18563  VMA_ASSERT(allocator);
+
18564 
+
18565  if(allocationCount == 0)
+
18566  {
+
18567  return VK_SUCCESS;
+
18568  }
+
18569 
+
18570  VMA_ASSERT(allocations);
+
18571 
+
18572  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18573 
-
18574 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
-
18575  VmaAllocator allocator,
-
18576  const VmaAllocation* pAllocations,
-
18577  size_t allocationCount,
-
18578  VkBool32* pAllocationsChanged,
-
18579  const VmaDefragmentationInfo *pDefragmentationInfo,
-
18580  VmaDefragmentationStats* pDefragmentationStats)
-
18581 {
-
18582  // Deprecated interface, reimplemented using new one.
-
18583 
-
18584  VmaDefragmentationInfo2 info2 = {};
-
18585  info2.allocationCount = (uint32_t)allocationCount;
-
18586  info2.pAllocations = pAllocations;
-
18587  info2.pAllocationsChanged = pAllocationsChanged;
-
18588  if(pDefragmentationInfo != VMA_NULL)
-
18589  {
-
18590  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
-
18591  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
-
18592  }
-
18593  else
-
18594  {
-
18595  info2.maxCpuAllocationsToMove = UINT32_MAX;
-
18596  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
-
18597  }
-
18598  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
-
18599 
- -
18601  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
-
18602  if(res == VK_NOT_READY)
-
18603  {
-
18604  res = vmaDefragmentationEnd( allocator, ctx);
-
18605  }
-
18606  return res;
-
18607 }
+
18574  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18575 
+
18576  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
+
18577 
+
18578 #if VMA_RECORDING_ENABLED
+
18579  if(allocator->GetRecorder() != VMA_NULL)
+
18580  {
+
18581  //TODO
+
18582  }
+
18583 #endif
+
18584 
+
18585  return res;
+
18586 }
+
18587 
+
18588 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
+
18589 {
+
18590  VMA_ASSERT(allocator);
+
18591 
+
18592  VMA_DEBUG_LOG("vmaCheckCorruption");
+
18593 
+
18594  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18595 
+
18596  return allocator->CheckCorruption(memoryTypeBits);
+
18597 }
+
18598 
+
18599 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
+
18600  VmaAllocator allocator,
+
18601  const VmaAllocation* pAllocations,
+
18602  size_t allocationCount,
+
18603  VkBool32* pAllocationsChanged,
+
18604  const VmaDefragmentationInfo *pDefragmentationInfo,
+
18605  VmaDefragmentationStats* pDefragmentationStats)
+
18606 {
+
18607  // Deprecated interface, reimplemented using new one.
18608 
-
18609 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
-
18610  VmaAllocator allocator,
-
18611  const VmaDefragmentationInfo2* pInfo,
-
18612  VmaDefragmentationStats* pStats,
-
18613  VmaDefragmentationContext *pContext)
-
18614 {
-
18615  VMA_ASSERT(allocator && pInfo && pContext);
-
18616 
-
18617  // Degenerate case: Nothing to defragment.
-
18618  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
+
18609  VmaDefragmentationInfo2 info2 = {};
+
18610  info2.allocationCount = (uint32_t)allocationCount;
+
18611  info2.pAllocations = pAllocations;
+
18612  info2.pAllocationsChanged = pAllocationsChanged;
+
18613  if(pDefragmentationInfo != VMA_NULL)
+
18614  {
+
18615  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
+
18616  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
+
18617  }
+
18618  else
18619  {
-
18620  return VK_SUCCESS;
-
18621  }
-
18622 
-
18623  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
-
18624  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
-
18625  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
-
18626  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
-
18627 
-
18628  VMA_DEBUG_LOG("vmaDefragmentationBegin");
-
18629 
-
18630  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18631 
-
18632  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
18620  info2.maxCpuAllocationsToMove = UINT32_MAX;
+
18621  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
+
18622  }
+
18623  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
+
18624 
+ +
18626  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
+
18627  if(res == VK_NOT_READY)
+
18628  {
+
18629  res = vmaDefragmentationEnd( allocator, ctx);
+
18630  }
+
18631  return res;
+
18632 }
18633 
-
18634 #if VMA_RECORDING_ENABLED
-
18635  if(allocator->GetRecorder() != VMA_NULL)
-
18636  {
-
18637  allocator->GetRecorder()->RecordDefragmentationBegin(
-
18638  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
-
18639  }
-
18640 #endif
+
18634 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
+
18635  VmaAllocator allocator,
+
18636  const VmaDefragmentationInfo2* pInfo,
+
18637  VmaDefragmentationStats* pStats,
+
18638  VmaDefragmentationContext *pContext)
+
18639 {
+
18640  VMA_ASSERT(allocator && pInfo && pContext);
18641 
-
18642  return res;
-
18643 }
-
18644 
-
18645 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
-
18646  VmaAllocator allocator,
-
18647  VmaDefragmentationContext context)
-
18648 {
-
18649  VMA_ASSERT(allocator);
-
18650 
-
18651  VMA_DEBUG_LOG("vmaDefragmentationEnd");
+
18642  // Degenerate case: Nothing to defragment.
+
18643  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
+
18644  {
+
18645  return VK_SUCCESS;
+
18646  }
+
18647 
+
18648  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
+
18649  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
+
18650  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
+
18651  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18652 
-
18653  if(context != VK_NULL_HANDLE)
-
18654  {
-
18655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18653  VMA_DEBUG_LOG("vmaDefragmentationBegin");
+
18654 
+
18655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18656 
-
18657 #if VMA_RECORDING_ENABLED
-
18658  if(allocator->GetRecorder() != VMA_NULL)
-
18659  {
-
18660  allocator->GetRecorder()->RecordDefragmentationEnd(
-
18661  allocator->GetCurrentFrameIndex(), context);
-
18662  }
-
18663 #endif
-
18664 
-
18665  return allocator->DefragmentationEnd(context);
-
18666  }
-
18667  else
-
18668  {
-
18669  return VK_SUCCESS;
-
18670  }
-
18671 }
-
18672 
-
18673 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
-
18674  VmaAllocator allocator,
-
18675  VmaDefragmentationContext context,
- -
18677  )
-
18678 {
-
18679  VMA_ASSERT(allocator);
-
18680  VMA_ASSERT(pInfo);
+
18657  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
18658 
+
18659 #if VMA_RECORDING_ENABLED
+
18660  if(allocator->GetRecorder() != VMA_NULL)
+
18661  {
+
18662  allocator->GetRecorder()->RecordDefragmentationBegin(
+
18663  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
+
18664  }
+
18665 #endif
+
18666 
+
18667  return res;
+
18668 }
+
18669 
+
18670 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
+
18671  VmaAllocator allocator,
+
18672  VmaDefragmentationContext context)
+
18673 {
+
18674  VMA_ASSERT(allocator);
+
18675 
+
18676  VMA_DEBUG_LOG("vmaDefragmentationEnd");
+
18677 
+
18678  if(context != VK_NULL_HANDLE)
+
18679  {
+
18680  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18681 
-
18682  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
-
18683 
-
18684  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18685 
-
18686  if(context == VK_NULL_HANDLE)
-
18687  {
-
18688  pInfo->moveCount = 0;
-
18689  return VK_SUCCESS;
-
18690  }
-
18691 
-
18692  return allocator->DefragmentationPassBegin(pInfo, context);
-
18693 }
-
18694 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
-
18695  VmaAllocator allocator,
-
18696  VmaDefragmentationContext context)
-
18697 {
-
18698  VMA_ASSERT(allocator);
-
18699 
-
18700  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
-
18701  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18702 
-
18703  if(context == VK_NULL_HANDLE)
-
18704  return VK_SUCCESS;
-
18705 
-
18706  return allocator->DefragmentationPassEnd(context);
-
18707 }
+
18682 #if VMA_RECORDING_ENABLED
+
18683  if(allocator->GetRecorder() != VMA_NULL)
+
18684  {
+
18685  allocator->GetRecorder()->RecordDefragmentationEnd(
+
18686  allocator->GetCurrentFrameIndex(), context);
+
18687  }
+
18688 #endif
+
18689 
+
18690  return allocator->DefragmentationEnd(context);
+
18691  }
+
18692  else
+
18693  {
+
18694  return VK_SUCCESS;
+
18695  }
+
18696 }
+
18697 
+
18698 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+
18699  VmaAllocator allocator,
+
18700  VmaDefragmentationContext context,
+ +
18702  )
+
18703 {
+
18704  VMA_ASSERT(allocator);
+
18705  VMA_ASSERT(pInfo);
+
18706 
+
18707  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18708 
-
18709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-
18710  VmaAllocator allocator,
-
18711  VmaAllocation allocation,
-
18712  VkBuffer buffer)
-
18713 {
-
18714  VMA_ASSERT(allocator && allocation && buffer);
-
18715 
-
18716  VMA_DEBUG_LOG("vmaBindBufferMemory");
-
18717 
-
18718  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18719 
-
18720  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
-
18721 }
-
18722 
-
18723 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-
18724  VmaAllocator allocator,
-
18725  VmaAllocation allocation,
-
18726  VkDeviceSize allocationLocalOffset,
-
18727  VkBuffer buffer,
-
18728  const void* pNext)
-
18729 {
-
18730  VMA_ASSERT(allocator && allocation && buffer);
-
18731 
-
18732  VMA_DEBUG_LOG("vmaBindBufferMemory2");
+
18709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18710 
+
18711  if(context == VK_NULL_HANDLE)
+
18712  {
+
18713  pInfo->moveCount = 0;
+
18714  return VK_SUCCESS;
+
18715  }
+
18716 
+
18717  return allocator->DefragmentationPassBegin(pInfo, context);
+
18718 }
+
18719 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+
18720  VmaAllocator allocator,
+
18721  VmaDefragmentationContext context)
+
18722 {
+
18723  VMA_ASSERT(allocator);
+
18724 
+
18725  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
+
18726  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18727 
+
18728  if(context == VK_NULL_HANDLE)
+
18729  return VK_SUCCESS;
+
18730 
+
18731  return allocator->DefragmentationPassEnd(context);
+
18732 }
18733 
-
18734  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18735 
-
18736  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
-
18737 }
-
18738 
-
18739 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-
18740  VmaAllocator allocator,
-
18741  VmaAllocation allocation,
-
18742  VkImage image)
-
18743 {
-
18744  VMA_ASSERT(allocator && allocation && image);
-
18745 
-
18746  VMA_DEBUG_LOG("vmaBindImageMemory");
+
18734 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
+
18735  VmaAllocator allocator,
+
18736  VmaAllocation allocation,
+
18737  VkBuffer buffer)
+
18738 {
+
18739  VMA_ASSERT(allocator && allocation && buffer);
+
18740 
+
18741  VMA_DEBUG_LOG("vmaBindBufferMemory");
+
18742 
+
18743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18744 
+
18745  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
+
18746 }
18747 
-
18748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18749 
-
18750  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
-
18751 }
-
18752 
-
18753 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-
18754  VmaAllocator allocator,
-
18755  VmaAllocation allocation,
-
18756  VkDeviceSize allocationLocalOffset,
-
18757  VkImage image,
-
18758  const void* pNext)
-
18759 {
-
18760  VMA_ASSERT(allocator && allocation && image);
-
18761 
-
18762  VMA_DEBUG_LOG("vmaBindImageMemory2");
+
18748 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
+
18749  VmaAllocator allocator,
+
18750  VmaAllocation allocation,
+
18751  VkDeviceSize allocationLocalOffset,
+
18752  VkBuffer buffer,
+
18753  const void* pNext)
+
18754 {
+
18755  VMA_ASSERT(allocator && allocation && buffer);
+
18756 
+
18757  VMA_DEBUG_LOG("vmaBindBufferMemory2");
+
18758 
+
18759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18760 
+
18761  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
+
18762 }
18763 
-
18764  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18765 
-
18766  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
-
18767 }
-
18768 
-
18769 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-
18770  VmaAllocator allocator,
-
18771  const VkBufferCreateInfo* pBufferCreateInfo,
-
18772  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
18773  VkBuffer* pBuffer,
-
18774  VmaAllocation* pAllocation,
-
18775  VmaAllocationInfo* pAllocationInfo)
-
18776 {
-
18777  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
-
18778 
-
18779  if(pBufferCreateInfo->size == 0)
-
18780  {
-
18781  return VK_ERROR_VALIDATION_FAILED_EXT;
-
18782  }
-
18783  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-
18784  !allocator->m_UseKhrBufferDeviceAddress)
-
18785  {
-
18786  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-
18787  return VK_ERROR_VALIDATION_FAILED_EXT;
-
18788  }
-
18789 
-
18790  VMA_DEBUG_LOG("vmaCreateBuffer");
-
18791 
-
18792  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18764 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
+
18765  VmaAllocator allocator,
+
18766  VmaAllocation allocation,
+
18767  VkImage image)
+
18768 {
+
18769  VMA_ASSERT(allocator && allocation && image);
+
18770 
+
18771  VMA_DEBUG_LOG("vmaBindImageMemory");
+
18772 
+
18773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18774 
+
18775  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
+
18776 }
+
18777 
+
18778 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
+
18779  VmaAllocator allocator,
+
18780  VmaAllocation allocation,
+
18781  VkDeviceSize allocationLocalOffset,
+
18782  VkImage image,
+
18783  const void* pNext)
+
18784 {
+
18785  VMA_ASSERT(allocator && allocation && image);
+
18786 
+
18787  VMA_DEBUG_LOG("vmaBindImageMemory2");
+
18788 
+
18789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18790 
+
18791  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
+
18792 }
18793 
-
18794  *pBuffer = VK_NULL_HANDLE;
-
18795  *pAllocation = VK_NULL_HANDLE;
-
18796 
-
18797  // 1. Create VkBuffer.
-
18798  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-
18799  allocator->m_hDevice,
-
18800  pBufferCreateInfo,
-
18801  allocator->GetAllocationCallbacks(),
-
18802  pBuffer);
-
18803  if(res >= 0)
-
18804  {
-
18805  // 2. vkGetBufferMemoryRequirements.
-
18806  VkMemoryRequirements vkMemReq = {};
-
18807  bool requiresDedicatedAllocation = false;
-
18808  bool prefersDedicatedAllocation = false;
-
18809  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-
18810  requiresDedicatedAllocation, prefersDedicatedAllocation);
-
18811 
-
18812  // 3. Allocate memory using allocator.
-
18813  res = allocator->AllocateMemory(
-
18814  vkMemReq,
-
18815  requiresDedicatedAllocation,
-
18816  prefersDedicatedAllocation,
-
18817  *pBuffer, // dedicatedBuffer
-
18818  pBufferCreateInfo->usage, // dedicatedBufferUsage
-
18819  VK_NULL_HANDLE, // dedicatedImage
-
18820  *pAllocationCreateInfo,
-
18821  VMA_SUBALLOCATION_TYPE_BUFFER,
-
18822  1, // allocationCount
-
18823  pAllocation);
-
18824 
-
18825 #if VMA_RECORDING_ENABLED
-
18826  if(allocator->GetRecorder() != VMA_NULL)
-
18827  {
-
18828  allocator->GetRecorder()->RecordCreateBuffer(
-
18829  allocator->GetCurrentFrameIndex(),
-
18830  *pBufferCreateInfo,
-
18831  *pAllocationCreateInfo,
-
18832  *pAllocation);
-
18833  }
-
18834 #endif
-
18835 
-
18836  if(res >= 0)
-
18837  {
-
18838  // 3. Bind buffer with memory.
-
18839  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-
18840  {
-
18841  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-
18842  }
-
18843  if(res >= 0)
-
18844  {
-
18845  // All steps succeeded.
-
18846  #if VMA_STATS_STRING_ENABLED
-
18847  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-
18848  #endif
-
18849  if(pAllocationInfo != VMA_NULL)
-
18850  {
-
18851  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
18852  }
-
18853 
-
18854  return VK_SUCCESS;
-
18855  }
-
18856  allocator->FreeMemory(
-
18857  1, // allocationCount
-
18858  pAllocation);
-
18859  *pAllocation = VK_NULL_HANDLE;
-
18860  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-
18861  *pBuffer = VK_NULL_HANDLE;
-
18862  return res;
-
18863  }
-
18864  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-
18865  *pBuffer = VK_NULL_HANDLE;
-
18866  return res;
-
18867  }
-
18868  return res;
-
18869 }
-
18870 
-
18871 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-
18872  VmaAllocator allocator,
-
18873  VkBuffer buffer,
-
18874  VmaAllocation allocation)
-
18875 {
-
18876  VMA_ASSERT(allocator);
-
18877 
-
18878  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
18879  {
-
18880  return;
-
18881  }
-
18882 
-
18883  VMA_DEBUG_LOG("vmaDestroyBuffer");
-
18884 
-
18885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18886 
-
18887 #if VMA_RECORDING_ENABLED
-
18888  if(allocator->GetRecorder() != VMA_NULL)
-
18889  {
-
18890  allocator->GetRecorder()->RecordDestroyBuffer(
-
18891  allocator->GetCurrentFrameIndex(),
-
18892  allocation);
-
18893  }
-
18894 #endif
+
18794 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
+
18795  VmaAllocator allocator,
+
18796  const VkBufferCreateInfo* pBufferCreateInfo,
+
18797  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
18798  VkBuffer* pBuffer,
+
18799  VmaAllocation* pAllocation,
+
18800  VmaAllocationInfo* pAllocationInfo)
+
18801 {
+
18802  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
+
18803 
+
18804  if(pBufferCreateInfo->size == 0)
+
18805  {
+
18806  return VK_ERROR_VALIDATION_FAILED_EXT;
+
18807  }
+
18808  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+
18809  !allocator->m_UseKhrBufferDeviceAddress)
+
18810  {
+
18811  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+
18812  return VK_ERROR_VALIDATION_FAILED_EXT;
+
18813  }
+
18814 
+
18815  VMA_DEBUG_LOG("vmaCreateBuffer");
+
18816 
+
18817  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18818 
+
18819  *pBuffer = VK_NULL_HANDLE;
+
18820  *pAllocation = VK_NULL_HANDLE;
+
18821 
+
18822  // 1. Create VkBuffer.
+
18823  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+
18824  allocator->m_hDevice,
+
18825  pBufferCreateInfo,
+
18826  allocator->GetAllocationCallbacks(),
+
18827  pBuffer);
+
18828  if(res >= 0)
+
18829  {
+
18830  // 2. vkGetBufferMemoryRequirements.
+
18831  VkMemoryRequirements vkMemReq = {};
+
18832  bool requiresDedicatedAllocation = false;
+
18833  bool prefersDedicatedAllocation = false;
+
18834  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+
18835  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
18836 
+
18837  // 3. Allocate memory using allocator.
+
18838  res = allocator->AllocateMemory(
+
18839  vkMemReq,
+
18840  requiresDedicatedAllocation,
+
18841  prefersDedicatedAllocation,
+
18842  *pBuffer, // dedicatedBuffer
+
18843  pBufferCreateInfo->usage, // dedicatedBufferUsage
+
18844  VK_NULL_HANDLE, // dedicatedImage
+
18845  *pAllocationCreateInfo,
+
18846  VMA_SUBALLOCATION_TYPE_BUFFER,
+
18847  1, // allocationCount
+
18848  pAllocation);
+
18849 
+
18850 #if VMA_RECORDING_ENABLED
+
18851  if(allocator->GetRecorder() != VMA_NULL)
+
18852  {
+
18853  allocator->GetRecorder()->RecordCreateBuffer(
+
18854  allocator->GetCurrentFrameIndex(),
+
18855  *pBufferCreateInfo,
+
18856  *pAllocationCreateInfo,
+
18857  *pAllocation);
+
18858  }
+
18859 #endif
+
18860 
+
18861  if(res >= 0)
+
18862  {
+
18863  // 3. Bind buffer with memory.
+
18864  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+
18865  {
+
18866  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
+
18867  }
+
18868  if(res >= 0)
+
18869  {
+
18870  // All steps succeeded.
+
18871  #if VMA_STATS_STRING_ENABLED
+
18872  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+
18873  #endif
+
18874  if(pAllocationInfo != VMA_NULL)
+
18875  {
+
18876  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
18877  }
+
18878 
+
18879  return VK_SUCCESS;
+
18880  }
+
18881  allocator->FreeMemory(
+
18882  1, // allocationCount
+
18883  pAllocation);
+
18884  *pAllocation = VK_NULL_HANDLE;
+
18885  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+
18886  *pBuffer = VK_NULL_HANDLE;
+
18887  return res;
+
18888  }
+
18889  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+
18890  *pBuffer = VK_NULL_HANDLE;
+
18891  return res;
+
18892  }
+
18893  return res;
+
18894 }
18895 
-
18896  if(buffer != VK_NULL_HANDLE)
-
18897  {
-
18898  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
-
18899  }
-
18900 
-
18901  if(allocation != VK_NULL_HANDLE)
-
18902  {
-
18903  allocator->FreeMemory(
-
18904  1, // allocationCount
-
18905  &allocation);
+
18896 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
+
18897  VmaAllocator allocator,
+
18898  VkBuffer buffer,
+
18899  VmaAllocation allocation)
+
18900 {
+
18901  VMA_ASSERT(allocator);
+
18902 
+
18903  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
18904  {
+
18905  return;
18906  }
-
18907 }
-
18908 
-
18909 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-
18910  VmaAllocator allocator,
-
18911  const VkImageCreateInfo* pImageCreateInfo,
-
18912  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
18913  VkImage* pImage,
-
18914  VmaAllocation* pAllocation,
-
18915  VmaAllocationInfo* pAllocationInfo)
-
18916 {
-
18917  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
-
18918 
-
18919  if(pImageCreateInfo->extent.width == 0 ||
-
18920  pImageCreateInfo->extent.height == 0 ||
-
18921  pImageCreateInfo->extent.depth == 0 ||
-
18922  pImageCreateInfo->mipLevels == 0 ||
-
18923  pImageCreateInfo->arrayLayers == 0)
-
18924  {
-
18925  return VK_ERROR_VALIDATION_FAILED_EXT;
-
18926  }
-
18927 
-
18928  VMA_DEBUG_LOG("vmaCreateImage");
-
18929 
-
18930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
18931 
-
18932  *pImage = VK_NULL_HANDLE;
-
18933  *pAllocation = VK_NULL_HANDLE;
-
18934 
-
18935  // 1. Create VkImage.
-
18936  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-
18937  allocator->m_hDevice,
-
18938  pImageCreateInfo,
-
18939  allocator->GetAllocationCallbacks(),
-
18940  pImage);
-
18941  if(res >= 0)
-
18942  {
-
18943  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
-
18944  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
-
18945  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
-
18946 
-
18947  // 2. Allocate memory using allocator.
-
18948  VkMemoryRequirements vkMemReq = {};
-
18949  bool requiresDedicatedAllocation = false;
-
18950  bool prefersDedicatedAllocation = false;
-
18951  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
-
18952  requiresDedicatedAllocation, prefersDedicatedAllocation);
-
18953 
-
18954  res = allocator->AllocateMemory(
-
18955  vkMemReq,
-
18956  requiresDedicatedAllocation,
-
18957  prefersDedicatedAllocation,
-
18958  VK_NULL_HANDLE, // dedicatedBuffer
-
18959  UINT32_MAX, // dedicatedBufferUsage
-
18960  *pImage, // dedicatedImage
-
18961  *pAllocationCreateInfo,
-
18962  suballocType,
-
18963  1, // allocationCount
-
18964  pAllocation);
-
18965 
-
18966 #if VMA_RECORDING_ENABLED
-
18967  if(allocator->GetRecorder() != VMA_NULL)
-
18968  {
-
18969  allocator->GetRecorder()->RecordCreateImage(
-
18970  allocator->GetCurrentFrameIndex(),
-
18971  *pImageCreateInfo,
-
18972  *pAllocationCreateInfo,
-
18973  *pAllocation);
-
18974  }
-
18975 #endif
-
18976 
-
18977  if(res >= 0)
-
18978  {
-
18979  // 3. Bind image with memory.
-
18980  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-
18981  {
-
18982  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
-
18983  }
-
18984  if(res >= 0)
-
18985  {
-
18986  // All steps succeeded.
-
18987  #if VMA_STATS_STRING_ENABLED
-
18988  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
-
18989  #endif
-
18990  if(pAllocationInfo != VMA_NULL)
-
18991  {
-
18992  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
18993  }
-
18994 
-
18995  return VK_SUCCESS;
-
18996  }
-
18997  allocator->FreeMemory(
-
18998  1, // allocationCount
-
18999  pAllocation);
-
19000  *pAllocation = VK_NULL_HANDLE;
-
19001  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-
19002  *pImage = VK_NULL_HANDLE;
-
19003  return res;
-
19004  }
-
19005  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-
19006  *pImage = VK_NULL_HANDLE;
-
19007  return res;
-
19008  }
-
19009  return res;
-
19010 }
-
19011 
-
19012 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-
19013  VmaAllocator allocator,
-
19014  VkImage image,
-
19015  VmaAllocation allocation)
-
19016 {
-
19017  VMA_ASSERT(allocator);
-
19018 
-
19019  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
19020  {
-
19021  return;
-
19022  }
-
19023 
-
19024  VMA_DEBUG_LOG("vmaDestroyImage");
-
19025 
-
19026  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
19027 
-
19028 #if VMA_RECORDING_ENABLED
-
19029  if(allocator->GetRecorder() != VMA_NULL)
-
19030  {
-
19031  allocator->GetRecorder()->RecordDestroyImage(
-
19032  allocator->GetCurrentFrameIndex(),
-
19033  allocation);
-
19034  }
-
19035 #endif
+
18907 
+
18908  VMA_DEBUG_LOG("vmaDestroyBuffer");
+
18909 
+
18910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18911 
+
18912 #if VMA_RECORDING_ENABLED
+
18913  if(allocator->GetRecorder() != VMA_NULL)
+
18914  {
+
18915  allocator->GetRecorder()->RecordDestroyBuffer(
+
18916  allocator->GetCurrentFrameIndex(),
+
18917  allocation);
+
18918  }
+
18919 #endif
+
18920 
+
18921  if(buffer != VK_NULL_HANDLE)
+
18922  {
+
18923  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+
18924  }
+
18925 
+
18926  if(allocation != VK_NULL_HANDLE)
+
18927  {
+
18928  allocator->FreeMemory(
+
18929  1, // allocationCount
+
18930  &allocation);
+
18931  }
+
18932 }
+
18933 
+
18934 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
+
18935  VmaAllocator allocator,
+
18936  const VkImageCreateInfo* pImageCreateInfo,
+
18937  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
18938  VkImage* pImage,
+
18939  VmaAllocation* pAllocation,
+
18940  VmaAllocationInfo* pAllocationInfo)
+
18941 {
+
18942  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
+
18943 
+
18944  if(pImageCreateInfo->extent.width == 0 ||
+
18945  pImageCreateInfo->extent.height == 0 ||
+
18946  pImageCreateInfo->extent.depth == 0 ||
+
18947  pImageCreateInfo->mipLevels == 0 ||
+
18948  pImageCreateInfo->arrayLayers == 0)
+
18949  {
+
18950  return VK_ERROR_VALIDATION_FAILED_EXT;
+
18951  }
+
18952 
+
18953  VMA_DEBUG_LOG("vmaCreateImage");
+
18954 
+
18955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
18956 
+
18957  *pImage = VK_NULL_HANDLE;
+
18958  *pAllocation = VK_NULL_HANDLE;
+
18959 
+
18960  // 1. Create VkImage.
+
18961  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+
18962  allocator->m_hDevice,
+
18963  pImageCreateInfo,
+
18964  allocator->GetAllocationCallbacks(),
+
18965  pImage);
+
18966  if(res >= 0)
+
18967  {
+
18968  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
+
18969  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
+
18970  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
+
18971 
+
18972  // 2. Allocate memory using allocator.
+
18973  VkMemoryRequirements vkMemReq = {};
+
18974  bool requiresDedicatedAllocation = false;
+
18975  bool prefersDedicatedAllocation = false;
+
18976  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
+
18977  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
18978 
+
18979  res = allocator->AllocateMemory(
+
18980  vkMemReq,
+
18981  requiresDedicatedAllocation,
+
18982  prefersDedicatedAllocation,
+
18983  VK_NULL_HANDLE, // dedicatedBuffer
+
18984  UINT32_MAX, // dedicatedBufferUsage
+
18985  *pImage, // dedicatedImage
+
18986  *pAllocationCreateInfo,
+
18987  suballocType,
+
18988  1, // allocationCount
+
18989  pAllocation);
+
18990 
+
18991 #if VMA_RECORDING_ENABLED
+
18992  if(allocator->GetRecorder() != VMA_NULL)
+
18993  {
+
18994  allocator->GetRecorder()->RecordCreateImage(
+
18995  allocator->GetCurrentFrameIndex(),
+
18996  *pImageCreateInfo,
+
18997  *pAllocationCreateInfo,
+
18998  *pAllocation);
+
18999  }
+
19000 #endif
+
19001 
+
19002  if(res >= 0)
+
19003  {
+
19004  // 3. Bind image with memory.
+
19005  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+
19006  {
+
19007  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
+
19008  }
+
19009  if(res >= 0)
+
19010  {
+
19011  // All steps succeeded.
+
19012  #if VMA_STATS_STRING_ENABLED
+
19013  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
+
19014  #endif
+
19015  if(pAllocationInfo != VMA_NULL)
+
19016  {
+
19017  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
19018  }
+
19019 
+
19020  return VK_SUCCESS;
+
19021  }
+
19022  allocator->FreeMemory(
+
19023  1, // allocationCount
+
19024  pAllocation);
+
19025  *pAllocation = VK_NULL_HANDLE;
+
19026  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+
19027  *pImage = VK_NULL_HANDLE;
+
19028  return res;
+
19029  }
+
19030  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+
19031  *pImage = VK_NULL_HANDLE;
+
19032  return res;
+
19033  }
+
19034  return res;
+
19035 }
19036 
-
19037  if(image != VK_NULL_HANDLE)
-
19038  {
-
19039  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
-
19040  }
-
19041  if(allocation != VK_NULL_HANDLE)
-
19042  {
-
19043  allocator->FreeMemory(
-
19044  1, // allocationCount
-
19045  &allocation);
-
19046  }
-
19047 }
+
19037 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
+
19038  VmaAllocator allocator,
+
19039  VkImage image,
+
19040  VmaAllocation allocation)
+
19041 {
+
19042  VMA_ASSERT(allocator);
+
19043 
+
19044  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
19045  {
+
19046  return;
+
19047  }
19048 
-
19049 #endif // #ifdef VMA_IMPLEMENTATION
+
19049  VMA_DEBUG_LOG("vmaDestroyImage");
+
19050 
+
19051  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
19052 
+
19053 #if VMA_RECORDING_ENABLED
+
19054  if(allocator->GetRecorder() != VMA_NULL)
+
19055  {
+
19056  allocator->GetRecorder()->RecordDestroyImage(
+
19057  allocator->GetCurrentFrameIndex(),
+
19058  allocation);
+
19059  }
+
19060 #endif
+
19061 
+
19062  if(image != VK_NULL_HANDLE)
+
19063  {
+
19064  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
+
19065  }
+
19066  if(allocation != VK_NULL_HANDLE)
+
19067  {
+
19068  allocator->FreeMemory(
+
19069  1, // allocationCount
+
19070  &allocation);
+
19071  }
+
19072 }
+
19073 
+
19074 #endif // #ifdef VMA_IMPLEMENTATION
struct VmaStats VmaStats
General statistics from current state of Allocator.
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2255
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 33b6db4..aaaa778 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -513,7 +513,7 @@ VmaAllocation alloc; VmaAllocationInfo allocInfo; vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); -if(allocInfo.pUserData != nullptr) +if(allocInfo.pMappedData != nullptr) { // Allocation ended up in mappable memory. // It's persistently mapped. You can access it directly.