diff --git a/bin/VmaReplay_Release_vs2019.exe b/bin/VmaReplay_Release_vs2019.exe
index f8712c8..68c4280 100644
Binary files a/bin/VmaReplay_Release_vs2019.exe and b/bin/VmaReplay_Release_vs2019.exe differ
diff --git a/bin/VulkanSample_Release_vs2019.exe b/bin/VulkanSample_Release_vs2019.exe
index fa83c3a..39f91a5 100644
Binary files a/bin/VulkanSample_Release_vs2019.exe and b/bin/VulkanSample_Release_vs2019.exe differ
diff --git a/docs/html/defragmentation.html b/docs/html/defragmentation.html
index 2d15f36..b0ae912 100644
--- a/docs/html/defragmentation.html
+++ b/docs/html/defragmentation.html
@@ -132,12 +132,12 @@ Defragmenting CPU memory
You may try not to block your entire program to wait until defragmentation finishes, but do it in the background, as long as you carefully fullfill requirements described in function vmaDefragmentationBegin().
- Only allocations made in memory types that have
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
and VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
flags can be compacted. You may pass other allocations but it makes no sense - these will never be moved.
@@ -3313,43 +3310,6 @@ Functions
This function always fails when called for allocation that was created with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be mapped.
This function doesn't automatically flush or invalidate caches. If the allocation is made from a memory types that is not HOST_COHERENT
, you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
-
-
-
-◆ vmaResizeAllocation()
-
-
-
-
-
- VkResult vmaResizeAllocation |
- ( |
- VmaAllocator |
- allocator, |
-
-
- |
- |
- VmaAllocation |
- allocation, |
-
-
- |
- |
- VkDeviceSize |
- newSize |
-
-
- |
- ) |
- | |
-
-
-
-
-
Deprecated.
-
- Deprecated:
- In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. In current version it returns
VK_SUCCESS
only if newSize
equals current allocation's size. Otherwise returns VK_ERROR_OUT_OF_POOL_MEMORY
, indicating that allocation's size could not be changed.
-
diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html
index 5aef1d2..1d862db 100644
--- a/docs/html/vk__mem__alloc_8h_source.html
+++ b/docs/html/vk__mem__alloc_8h_source.html
@@ -636,15431 +636,15693 @@ $(function() {
3368 size_t allocationCount,
3369 const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
-
-
-
- 3381 VkDeviceSize newSize);
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3438 void* VMA_NULLABLE pUserData);
-
-
-
-
-
-
-
-
- 3495 void* VMA_NULLABLE * VMA_NOT_NULL ppData);
+
+
+
+
+
+
+
+
+
+
+
+
+ 3426 void* VMA_NULLABLE pUserData);
+
+
+
+
+
+
+
+
+ 3483 void* VMA_NULLABLE * VMA_NOT_NULL ppData);
+
+
+
+
-
-
-
-
-
-
-
- 3533 VkDeviceSize offset,
-
-
-
-
-
- 3560 VkDeviceSize offset,
-
-
-
-
- 3579 uint32_t allocationCount,
- 3580 const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
- 3581 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
- 3582 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
-
-
-
- 3600 uint32_t allocationCount,
- 3601 const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
- 3602 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
- 3603 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3682 const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount)
pPools;
-
-
-
-
-
-
-
-
-
- 3716 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE
memory;
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+ 3521 VkDeviceSize offset,
+
+
+
+
+
+ 3548 VkDeviceSize offset,
+
+
+
+
+ 3567 uint32_t allocationCount,
+ 3568 const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ 3569 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ 3570 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+
+
+
+ 3588 uint32_t allocationCount,
+ 3589 const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ 3590 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ 3591 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 3670 const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount)
pPools;
+
+
+
+
+
+
+
+
+
+ 3704 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE
memory;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3854 const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
- 3855 size_t allocationCount,
- 3856 VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
-
-
-
-
-
-
- 3875 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
-
-
-
-
- 3890 VkDeviceSize allocationLocalOffset,
- 3891 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
- 3892 const void* VMA_NULLABLE pNext);
-
-
-
-
- 3909 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
-
-
-
-
- 3924 VkDeviceSize allocationLocalOffset,
- 3925 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
- 3926 const void* VMA_NULLABLE pNext);
-
-
-
- 3960 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-
- 3962 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
-
-
-
-
-
- 3979 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
-
-
-
-
- 3985 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-
- 3987 VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
-
-
-
-
-
- 4004 VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
-
-
-
-
-
-
-
-
-
- 4014 #if defined(__cplusplus) && defined(__INTELLISENSE__)
- 4015 #define VMA_IMPLEMENTATION
-
-
- 4018 #ifdef VMA_IMPLEMENTATION
- 4019 #undef VMA_IMPLEMENTATION
-
-
-
-
-
-
- 4026 #if VMA_RECORDING_ENABLED
-
-
- 4029 #include <windows.h>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4049 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
- 4050 #define VMA_STATIC_VULKAN_FUNCTIONS 1
-
-
-
-
-
-
-
-
- 4059 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
- 4060 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
- 4061 #if defined(VK_NO_PROTOTYPES)
- 4062 extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
- 4063 extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 3842 const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
+ 3843 size_t allocationCount,
+ 3844 VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
+
+
+
+
+
+
+ 3863 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
+
+
+
+
+ 3878 VkDeviceSize allocationLocalOffset,
+ 3879 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+ 3880 const void* VMA_NULLABLE pNext);
+
+
+
+
+ 3897 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
+
+
+
+
+ 3912 VkDeviceSize allocationLocalOffset,
+ 3913 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+ 3914 const void* VMA_NULLABLE pNext);
+
+
+
+ 3948 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+
+ 3950 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
+
+
+
+
+
+ 3967 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
+
+
+
+
+ 3973 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+
+ 3975 VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
+
+
+
+
+
+ 3992 VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
+
+
+
+
+
+
+
+
+
+ 4002 #if defined(__cplusplus) && defined(__INTELLISENSE__)
+ 4003 #define VMA_IMPLEMENTATION
+
+
+ 4006 #ifdef VMA_IMPLEMENTATION
+ 4007 #undef VMA_IMPLEMENTATION
+
+
+
+
+
+
+ 4014 #if VMA_RECORDING_ENABLED
+
+
+ 4017 #include <windows.h>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 4037 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
+ 4038 #define VMA_STATIC_VULKAN_FUNCTIONS 1
+
+
+
+
+
+
+
+
+ 4047 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
+ 4048 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
+ 4049 #if defined(VK_NO_PROTOTYPES)
+ 4050 extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+ 4051 extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
+
+
+
+
+
+
+
+
+
+
+
+
+ 4064 #if VMA_USE_STL_CONTAINERS
+ 4065 #define VMA_USE_STL_VECTOR 1
+ 4066 #define VMA_USE_STL_UNORDERED_MAP 1
+ 4067 #define VMA_USE_STL_LIST 1
+
-
-
-
-
-
-
- 4076 #if VMA_USE_STL_CONTAINERS
- 4077 #define VMA_USE_STL_VECTOR 1
- 4078 #define VMA_USE_STL_UNORDERED_MAP 1
- 4079 #define VMA_USE_STL_LIST 1
-
-
- 4082 #ifndef VMA_USE_STL_SHARED_MUTEX
-
- 4084 #if __cplusplus >= 201703L
- 4085 #define VMA_USE_STL_SHARED_MUTEX 1
-
-
-
- 4089 #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
- 4090 #define VMA_USE_STL_SHARED_MUTEX 1
-
- 4092 #define VMA_USE_STL_SHARED_MUTEX 0
-
+ 4070 #ifndef VMA_USE_STL_SHARED_MUTEX
+
+ 4072 #if __cplusplus >= 201703L
+ 4073 #define VMA_USE_STL_SHARED_MUTEX 1
+
+
+
+ 4077 #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
+ 4078 #define VMA_USE_STL_SHARED_MUTEX 1
+
+ 4080 #define VMA_USE_STL_SHARED_MUTEX 0
+
+
+
+
+
+
+
+ 4088 #if VMA_USE_STL_VECTOR
+
+
+
+ 4092 #if VMA_USE_STL_UNORDERED_MAP
+ 4093 #include <unordered_map>
-
-
-
-
- 4100 #if VMA_USE_STL_VECTOR
-
-
-
- 4104 #if VMA_USE_STL_UNORDERED_MAP
- 4105 #include <unordered_map>
-
+ 4096 #if VMA_USE_STL_LIST
+
+
+
+
+
+
+
+
+ 4105 #include <algorithm>
+
- 4108 #if VMA_USE_STL_LIST
-
-
-
-
-
-
-
-
- 4117 #include <algorithm>
-
-
-
-
- 4122 #define VMA_NULL nullptr
-
-
- 4125 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
+
+
+ 4110 #define VMA_NULL nullptr
+
+
+ 4113 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
+
+ 4115 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+
+
+ 4118 if(alignment <
sizeof(
void*))
+
+ 4120 alignment =
sizeof(
void*);
+
+
+ 4123 return memalign(alignment, size);
+
+ 4125 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
- 4127 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
-
-
- 4130 if(alignment <
sizeof(
void*))
-
- 4132 alignment =
sizeof(
void*);
-
-
- 4135 return memalign(alignment, size);
-
- 4137 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
-
-
- 4140 #if defined(__APPLE__)
- 4141 #include <AvailabilityMacros.h>
-
-
- 4144 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
-
- 4146 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
- 4147 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
-
-
-
-
-
-
- 4154 if (__builtin_available(macOS 10.15, iOS 13, *))
- 4155 return aligned_alloc(alignment, size);
-
-
-
- 4159 if(alignment <
sizeof(
void*))
-
- 4161 alignment =
sizeof(
void*);
-
-
-
- 4165 if(posix_memalign(&pointer, alignment, size) == 0)
-
-
-
- 4169 #elif defined(_WIN32)
- 4170 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+
+ 4128 #if defined(__APPLE__)
+ 4129 #include <AvailabilityMacros.h>
+
+
+ 4132 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+
+ 4134 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
+ 4135 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
+
+
+
+
+
+
+ 4142 if (__builtin_available(macOS 10.15, iOS 13, *))
+ 4143 return aligned_alloc(alignment, size);
+
+
+
+ 4147 if(alignment <
sizeof(
void*))
+
+ 4149 alignment =
sizeof(
void*);
+
+
+
+ 4153 if(posix_memalign(&pointer, alignment, size) == 0)
+
+
+
+ 4157 #elif defined(_WIN32)
+ 4158 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+
+ 4160 return _aligned_malloc(size, alignment);
+
+
+ 4163 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+
+ 4165 return aligned_alloc(alignment, size);
+
+
+
+
+ 4170 static void vma_aligned_free(
void* ptr)
- 4172 return _aligned_malloc(size, alignment);
+
- 4175 static void* vma_aligned_alloc(
size_t alignment,
size_t size)
+ 4175 static void vma_aligned_free(
void* ptr)
- 4177 return aligned_alloc(alignment, size);
+
-
- 4182 static void vma_aligned_free(
void* ptr)
-
-
-
-
- 4187 static void vma_aligned_free(
void* ptr)
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4201 #define VMA_ASSERT(expr)
-
- 4203 #define VMA_ASSERT(expr) assert(expr)
-
-
-
-
-
- 4209 #ifndef VMA_HEAVY_ASSERT
-
- 4211 #define VMA_HEAVY_ASSERT(expr)
-
- 4213 #define VMA_HEAVY_ASSERT(expr)
-
-
-
- 4217 #ifndef VMA_ALIGN_OF
- 4218 #define VMA_ALIGN_OF(type) (__alignof(type))
-
-
- 4221 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
- 4222 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
-
-
- 4225 #ifndef VMA_SYSTEM_ALIGNED_FREE
-
- 4227 #if defined(VMA_SYSTEM_FREE)
- 4228 #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
-
- 4230 #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
-
+
+
+
+
+
+
+
+
+ 4189 #define VMA_ASSERT(expr)
+
+ 4191 #define VMA_ASSERT(expr) assert(expr)
+
+
+
+
+
+ 4197 #ifndef VMA_HEAVY_ASSERT
+
+ 4199 #define VMA_HEAVY_ASSERT(expr)
+
+ 4201 #define VMA_HEAVY_ASSERT(expr)
+
+
+
+ 4205 #ifndef VMA_ALIGN_OF
+ 4206 #define VMA_ALIGN_OF(type) (__alignof(type))
+
+
+ 4209 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
+ 4210 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
+
+
+ 4213 #ifndef VMA_SYSTEM_ALIGNED_FREE
+
+ 4215 #if defined(VMA_SYSTEM_FREE)
+ 4216 #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
+
+ 4218 #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
+
+
+
+
+ 4223 #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
+
+
+
+ 4227 #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
+
+
+
+ 4231 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
-
- 4235 #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
+
+ 4235 #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
-
- 4239 #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
-
-
-
- 4243 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
-
-
-
- 4247 #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
-
-
- 4250 #ifndef VMA_DEBUG_LOG
- 4251 #define VMA_DEBUG_LOG(format, ...)
-
-
-
-
-
-
-
-
-
- 4261 #if VMA_STATS_STRING_ENABLED
- 4262 static inline void VmaUint32ToStr(
char* outStr,
size_t strLen, uint32_t num)
-
- 4264 snprintf(outStr, strLen,
"%u",
static_cast<unsigned int>(num));
-
- 4266 static inline void VmaUint64ToStr(
char* outStr,
size_t strLen, uint64_t num)
-
- 4268 snprintf(outStr, strLen,
"%llu",
static_cast<unsigned long long>(num));
-
- 4270 static inline void VmaPtrToStr(
char* outStr,
size_t strLen,
const void* ptr)
-
- 4272 snprintf(outStr, strLen,
"%p", ptr);
-
-
-
-
-
-
-
- 4280 void Lock() { m_Mutex.lock(); }
- 4281 void Unlock() { m_Mutex.unlock(); }
- 4282 bool TryLock() {
return m_Mutex.try_lock(); }
-
-
-
- 4286 #define VMA_MUTEX VmaMutex
-
-
-
- 4290 #ifndef VMA_RW_MUTEX
- 4291 #if VMA_USE_STL_SHARED_MUTEX
-
- 4293 #include <shared_mutex>
-
-
-
- 4297 void LockRead() { m_Mutex.lock_shared(); }
- 4298 void UnlockRead() { m_Mutex.unlock_shared(); }
- 4299 bool TryLockRead() {
return m_Mutex.try_lock_shared(); }
- 4300 void LockWrite() { m_Mutex.lock(); }
- 4301 void UnlockWrite() { m_Mutex.unlock(); }
- 4302 bool TryLockWrite() {
return m_Mutex.try_lock(); }
-
- 4304 std::shared_mutex m_Mutex;
-
- 4306 #define VMA_RW_MUTEX VmaRWMutex
- 4307 #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
-
-
-
-
-
- 4313 VmaRWMutex() { InitializeSRWLock(&m_Lock); }
- 4314 void LockRead() { AcquireSRWLockShared(&m_Lock); }
- 4315 void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
- 4316 bool TryLockRead() {
return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
- 4317 void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
- 4318 void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
- 4319 bool TryLockWrite() {
return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
-
-
-
- 4323 #define VMA_RW_MUTEX VmaRWMutex
-
-
-
-
-
- 4329 void LockRead() { m_Mutex.Lock(); }
- 4330 void UnlockRead() { m_Mutex.Unlock(); }
- 4331 bool TryLockRead() {
return m_Mutex.TryLock(); }
- 4332 void LockWrite() { m_Mutex.Lock(); }
- 4333 void UnlockWrite() { m_Mutex.Unlock(); }
- 4334 bool TryLockWrite() {
return m_Mutex.TryLock(); }
-
-
-
- 4338 #define VMA_RW_MUTEX VmaRWMutex
-
-
-
-
-
-
- 4345 #ifndef VMA_ATOMIC_UINT32
-
- 4347 #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
-
-
- 4350 #ifndef VMA_ATOMIC_UINT64
-
- 4352 #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
-
-
- 4355 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
- 4360 #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
-
-
- 4363 #ifndef VMA_DEBUG_ALIGNMENT
- 4368 #define VMA_DEBUG_ALIGNMENT (1)
-
-
- 4371 #ifndef VMA_DEBUG_MARGIN
- 4376 #define VMA_DEBUG_MARGIN (0)
-
-
- 4379 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
- 4384 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
-
-
- 4387 #ifndef VMA_DEBUG_DETECT_CORRUPTION
- 4393 #define VMA_DEBUG_DETECT_CORRUPTION (0)
-
-
- 4396 #ifndef VMA_DEBUG_GLOBAL_MUTEX
- 4401 #define VMA_DEBUG_GLOBAL_MUTEX (0)
-
-
- 4404 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
- 4409 #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
-
-
- 4412 #ifndef VMA_SMALL_HEAP_MAX_SIZE
- 4414 #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
-
-
- 4417 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
- 4419 #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
-
-
- 4422 #ifndef VMA_CLASS_NO_COPY
- 4423 #define VMA_CLASS_NO_COPY(className) \
-
- 4425 className(const className&) = delete; \
- 4426 className& operator=(const className&) = delete;
-
-
- 4429 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
-
-
- 4432 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
-
- 4434 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
- 4435 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+ 4238 #ifndef VMA_DEBUG_LOG
+ 4239 #define VMA_DEBUG_LOG(format, ...)
+
+
+
+
+
+
+
+
+
+ 4249 #if VMA_STATS_STRING_ENABLED
+ 4250 static inline void VmaUint32ToStr(
char* outStr,
size_t strLen, uint32_t num)
+
+ 4252 snprintf(outStr, strLen,
"%u",
static_cast<unsigned int>(num));
+
+ 4254 static inline void VmaUint64ToStr(
char* outStr,
size_t strLen, uint64_t num)
+
+ 4256 snprintf(outStr, strLen,
"%llu",
static_cast<unsigned long long>(num));
+
+ 4258 static inline void VmaPtrToStr(
char* outStr,
size_t strLen,
const void* ptr)
+
+ 4260 snprintf(outStr, strLen,
"%p", ptr);
+
+
+
+
+
+
+
+ 4268 void Lock() { m_Mutex.lock(); }
+ 4269 void Unlock() { m_Mutex.unlock(); }
+ 4270 bool TryLock() {
return m_Mutex.try_lock(); }
+
+
+
+ 4274 #define VMA_MUTEX VmaMutex
+
+
+
+ 4278 #ifndef VMA_RW_MUTEX
+ 4279 #if VMA_USE_STL_SHARED_MUTEX
+
+ 4281 #include <shared_mutex>
+
+
+
+ 4285 void LockRead() { m_Mutex.lock_shared(); }
+ 4286 void UnlockRead() { m_Mutex.unlock_shared(); }
+ 4287 bool TryLockRead() {
return m_Mutex.try_lock_shared(); }
+ 4288 void LockWrite() { m_Mutex.lock(); }
+ 4289 void UnlockWrite() { m_Mutex.unlock(); }
+ 4290 bool TryLockWrite() {
return m_Mutex.try_lock(); }
+
+ 4292 std::shared_mutex m_Mutex;
+
+ 4294 #define VMA_RW_MUTEX VmaRWMutex
+ 4295 #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
+
+
+
+
+
+ 4301 VmaRWMutex() { InitializeSRWLock(&m_Lock); }
+ 4302 void LockRead() { AcquireSRWLockShared(&m_Lock); }
+ 4303 void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+ 4304 bool TryLockRead() {
return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
+ 4305 void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
+ 4306 void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+ 4307 bool TryLockWrite() {
return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
+
+
+
+ 4311 #define VMA_RW_MUTEX VmaRWMutex
+
+
+
+
+
+ 4317 void LockRead() { m_Mutex.Lock(); }
+ 4318 void UnlockRead() { m_Mutex.Unlock(); }
+ 4319 bool TryLockRead() {
return m_Mutex.TryLock(); }
+ 4320 void LockWrite() { m_Mutex.Lock(); }
+ 4321 void UnlockWrite() { m_Mutex.Unlock(); }
+ 4322 bool TryLockWrite() {
return m_Mutex.TryLock(); }
+
+
+
+ 4326 #define VMA_RW_MUTEX VmaRWMutex
+
+
+
+
+
+
+ 4333 #ifndef VMA_ATOMIC_UINT32
+
+ 4335 #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
+
+
+ 4338 #ifndef VMA_ATOMIC_UINT64
+
+ 4340 #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
+
+
+ 4343 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
+ 4348 #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
+
+
+ 4351 #ifndef VMA_DEBUG_ALIGNMENT
+ 4356 #define VMA_DEBUG_ALIGNMENT (1)
+
+
+ 4359 #ifndef VMA_DEBUG_MARGIN
+ 4364 #define VMA_DEBUG_MARGIN (0)
+
+
+ 4367 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
+ 4372 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
+
+
+ 4375 #ifndef VMA_DEBUG_DETECT_CORRUPTION
+ 4381 #define VMA_DEBUG_DETECT_CORRUPTION (0)
+
+
+ 4384 #ifndef VMA_DEBUG_GLOBAL_MUTEX
+ 4389 #define VMA_DEBUG_GLOBAL_MUTEX (0)
+
+
+ 4392 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
+ 4397 #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
+
+
+ 4400 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+
+
+
+
+ 4405 #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
+
+
+ 4408 #ifndef VMA_SMALL_HEAP_MAX_SIZE
+ 4410 #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
+
+
+ 4413 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
+ 4415 #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
+
+
+ 4418 #ifndef VMA_CLASS_NO_COPY
+ 4419 #define VMA_CLASS_NO_COPY(className) \
+
+ 4421 className(const className&) = delete; \
+ 4422 className& operator=(const className&) = delete;
+
+
+ 4425 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
+
+
+ 4428 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+
+ 4430 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+ 4431 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+
+
+
+
-
-
-
-
-
+
+
+ 4439 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+ 4440 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+ 4441 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
- 4443 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
- 4444 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
- 4445 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
-
- 4447 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
-
- 4449 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
- 4450 VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
-
-
- 4453 static inline uint32_t VmaCountBitsSet(uint32_t v)
-
- 4455 uint32_t c = v - ((v >> 1) & 0x55555555);
- 4456 c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
- 4457 c = ((c >> 4) + c) & 0x0F0F0F0F;
- 4458 c = ((c >> 8) + c) & 0x00FF00FF;
- 4459 c = ((c >> 16) + c) & 0x0000FFFF;
-
-
-
-
-
-
-
-
- 4468 template <
typename T>
- 4469 inline bool VmaIsPow2(T x)
-
- 4471 return (x & (x-1)) == 0;
-
-
-
-
- 4476 template <
typename T>
- 4477 static inline T VmaAlignUp(T val, T alignment)
-
- 4479 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
- 4480 return (val + alignment - 1) & ~(alignment - 1);
-
-
-
- 4484 template <
typename T>
- 4485 static inline T VmaAlignDown(T val, T alignment)
-
- 4487 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
- 4488 return val & ~(alignment - 1);
-
-
-
- 4492 template <
typename T>
- 4493 static inline T VmaRoundDiv(T x, T y)
-
- 4495 return (x + (y / (T)2)) / y;
-
-
-
- 4499 static inline uint32_t VmaNextPow2(uint32_t v)
-
-
-
-
-
-
-
-
-
-
- 4510 static inline uint64_t VmaNextPow2(uint64_t v)
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4524 static inline uint32_t VmaPrevPow2(uint32_t v)
-
-
-
-
-
-
-
-
-
- 4534 static inline uint64_t VmaPrevPow2(uint64_t v)
-
-
-
-
-
-
-
-
-
-
-
- 4546 static inline bool VmaStrIsEmpty(
const char* pStr)
-
- 4548 return pStr == VMA_NULL || *pStr ==
'\0';
-
-
- 4551 #if VMA_STATS_STRING_ENABLED
-
- 4553 static const char* VmaAlgorithmToStr(uint32_t algorithm)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+ 4443 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
+
+ 4445 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
+ 4446 VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
+
+
+ 4449 static inline uint32_t VmaCountBitsSet(uint32_t v)
+
+ 4451 uint32_t c = v - ((v >> 1) & 0x55555555);
+ 4452 c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+ 4453 c = ((c >> 4) + c) & 0x0F0F0F0F;
+ 4454 c = ((c >> 8) + c) & 0x00FF00FF;
+ 4455 c = ((c >> 16) + c) & 0x0000FFFF;
+
+
+
+
+
+
+
+
+ 4464 template <
typename T>
+ 4465 inline bool VmaIsPow2(T x)
+
+ 4467 return (x & (x-1)) == 0;
+
+
+
+
+ 4472 template <
typename T>
+ 4473 static inline T VmaAlignUp(T val, T alignment)
+
+ 4475 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ 4476 return (val + alignment - 1) & ~(alignment - 1);
+
+
+
+ 4480 template <
typename T>
+ 4481 static inline T VmaAlignDown(T val, T alignment)
+
+ 4483 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ 4484 return val & ~(alignment - 1);
+
+
+
+ 4488 template <
typename T>
+ 4489 static inline T VmaRoundDiv(T x, T y)
+
+ 4491 return (x + (y / (T)2)) / y;
+
+
+
+ 4495 static inline uint32_t VmaNextPow2(uint32_t v)
+
+
+
+
+
+
+
+
+
+
+ 4506 static inline uint64_t VmaNextPow2(uint64_t v)
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 4520 static inline uint32_t VmaPrevPow2(uint32_t v)
+
+
+
+
+
+
+
+
+
+ 4530 static inline uint64_t VmaPrevPow2(uint64_t v)
+
+
+
+
+
+
+
+
+
+
+
+ 4542 static inline bool VmaStrIsEmpty(
const char* pStr)
+
+ 4544 return pStr == VMA_NULL || *pStr ==
'\0';
+
+
+ 4547 #if VMA_STATS_STRING_ENABLED
+
+ 4549 static const char* VmaAlgorithmToStr(uint32_t algorithm)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
- 4573 template<
typename Iterator,
typename Compare>
- 4574 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
-
- 4576 Iterator centerValue = end; --centerValue;
- 4577 Iterator insertIndex = beg;
- 4578 for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
-
- 4580 if(cmp(*memTypeIndex, *centerValue))
-
- 4582 if(insertIndex != memTypeIndex)
-
- 4584 VMA_SWAP(*memTypeIndex, *insertIndex);
-
-
-
+ 4569 template<
typename Iterator,
typename Compare>
+ 4570 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
+
+ 4572 Iterator centerValue = end; --centerValue;
+ 4573 Iterator insertIndex = beg;
+ 4574 for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
+
+ 4576 if(cmp(*memTypeIndex, *centerValue))
+
+ 4578 if(insertIndex != memTypeIndex)
+
+ 4580 VMA_SWAP(*memTypeIndex, *insertIndex);
+
+
+
+
+ 4585 if(insertIndex != centerValue)
+
+ 4587 VMA_SWAP(*insertIndex, *centerValue);
- 4589 if(insertIndex != centerValue)
-
- 4591 VMA_SWAP(*insertIndex, *centerValue);
-
-
-
-
- 4596 template<
typename Iterator,
typename Compare>
- 4597 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
-
-
-
- 4601 Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
- 4602 VmaQuickSort<Iterator, Compare>(beg, it, cmp);
- 4603 VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
-
-
+
+
+
+ 4592 template<
typename Iterator,
typename Compare>
+ 4593 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
+
+
+
+ 4597 Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
+ 4598 VmaQuickSort<Iterator, Compare>(beg, it, cmp);
+ 4599 VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
+
+
+
+ 4603 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
+
+
- 4607 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
-
-
-
-
-
-
-
-
-
-
- 4618 static inline bool VmaBlocksOnSamePage(
- 4619 VkDeviceSize resourceAOffset,
- 4620 VkDeviceSize resourceASize,
- 4621 VkDeviceSize resourceBOffset,
- 4622 VkDeviceSize pageSize)
-
- 4624 VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
- 4625 VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
- 4626 VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
- 4627 VkDeviceSize resourceBStart = resourceBOffset;
- 4628 VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
- 4629 return resourceAEndPage == resourceBStartPage;
-
-
- 4632 enum VmaSuballocationType
-
- 4634 VMA_SUBALLOCATION_TYPE_FREE = 0,
- 4635 VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
- 4636 VMA_SUBALLOCATION_TYPE_BUFFER = 2,
- 4637 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
- 4638 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
- 4639 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
- 4640 VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
-
-
-
-
-
-
-
-
- 4649 static inline bool VmaIsBufferImageGranularityConflict(
- 4650 VmaSuballocationType suballocType1,
- 4651 VmaSuballocationType suballocType2)
-
- 4653 if(suballocType1 > suballocType2)
-
- 4655 VMA_SWAP(suballocType1, suballocType2);
-
-
- 4658 switch(suballocType1)
-
- 4660 case VMA_SUBALLOCATION_TYPE_FREE:
-
- 4662 case VMA_SUBALLOCATION_TYPE_UNKNOWN:
-
- 4664 case VMA_SUBALLOCATION_TYPE_BUFFER:
+
+
+
+
+
+
+
+ 4614 static inline bool VmaBlocksOnSamePage(
+ 4615 VkDeviceSize resourceAOffset,
+ 4616 VkDeviceSize resourceASize,
+ 4617 VkDeviceSize resourceBOffset,
+ 4618 VkDeviceSize pageSize)
+
+ 4620 VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
+ 4621 VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
+ 4622 VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
+ 4623 VkDeviceSize resourceBStart = resourceBOffset;
+ 4624 VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
+ 4625 return resourceAEndPage == resourceBStartPage;
+
+
+ 4628 enum VmaSuballocationType
+
+ 4630 VMA_SUBALLOCATION_TYPE_FREE = 0,
+ 4631 VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
+ 4632 VMA_SUBALLOCATION_TYPE_BUFFER = 2,
+ 4633 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
+ 4634 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
+ 4635 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
+ 4636 VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+
+
+
+
+
+
+
+
+ 4645 static inline bool VmaIsBufferImageGranularityConflict(
+ 4646 VmaSuballocationType suballocType1,
+ 4647 VmaSuballocationType suballocType2)
+
+ 4649 if(suballocType1 > suballocType2)
+
+ 4651 VMA_SWAP(suballocType1, suballocType2);
+
+
+ 4654 switch(suballocType1)
+
+ 4656 case VMA_SUBALLOCATION_TYPE_FREE:
+
+ 4658 case VMA_SUBALLOCATION_TYPE_UNKNOWN:
+
+ 4660 case VMA_SUBALLOCATION_TYPE_BUFFER:
+
+ 4662 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ 4663 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ 4664 case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4666 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
- 4667 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- 4668 case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
-
- 4670 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
- 4671 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
- 4672 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- 4673 case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
-
- 4675 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
- 4676 case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
-
-
-
-
-
-
-
- 4684 static void VmaWriteMagicValue(
void* pData, VkDeviceSize offset)
-
- 4686 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
- 4687 uint32_t* pDst = (uint32_t*)((
char*)pData + offset);
- 4688 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
- 4689 for(
size_t i = 0; i < numberCount; ++i, ++pDst)
-
- 4691 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
-
-
-
-
-
-
- 4698 static bool VmaValidateMagicValue(
const void* pData, VkDeviceSize offset)
-
- 4700 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
- 4701 const uint32_t* pSrc = (
const uint32_t*)((
const char*)pData + offset);
- 4702 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
- 4703 for(
size_t i = 0; i < numberCount; ++i, ++pSrc)
-
- 4705 if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
-
-
-
-
-
-
-
-
-
-
-
-
- 4718 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
-
- 4720 memset(&outBufCreateInfo, 0,
sizeof(outBufCreateInfo));
- 4721 outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- 4722 outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- 4723 outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE;
-
-
-
-
-
- 4729 VMA_CLASS_NO_COPY(VmaMutexLock)
-
- 4731 VmaMutexLock(VMA_MUTEX& mutex,
bool useMutex =
true) :
- 4732 m_pMutex(useMutex ? &mutex : VMA_NULL)
- 4733 {
if(m_pMutex) { m_pMutex->Lock(); } }
-
- 4735 {
if(m_pMutex) { m_pMutex->Unlock(); } }
-
- 4737 VMA_MUTEX* m_pMutex;
-
-
-
- 4741 struct VmaMutexLockRead
-
- 4743 VMA_CLASS_NO_COPY(VmaMutexLockRead)
-
- 4745 VmaMutexLockRead(VMA_RW_MUTEX& mutex,
bool useMutex) :
- 4746 m_pMutex(useMutex ? &mutex : VMA_NULL)
- 4747 {
if(m_pMutex) { m_pMutex->LockRead(); } }
- 4748 ~VmaMutexLockRead() {
if(m_pMutex) { m_pMutex->UnlockRead(); } }
-
- 4750 VMA_RW_MUTEX* m_pMutex;
-
-
-
- 4754 struct VmaMutexLockWrite
-
- 4756 VMA_CLASS_NO_COPY(VmaMutexLockWrite)
-
- 4758 VmaMutexLockWrite(VMA_RW_MUTEX& mutex,
bool useMutex) :
- 4759 m_pMutex(useMutex ? &mutex : VMA_NULL)
- 4760 {
if(m_pMutex) { m_pMutex->LockWrite(); } }
- 4761 ~VmaMutexLockWrite() {
if(m_pMutex) { m_pMutex->UnlockWrite(); } }
-
- 4763 VMA_RW_MUTEX* m_pMutex;
-
-
- 4766 #if VMA_DEBUG_GLOBAL_MUTEX
- 4767 static VMA_MUTEX gDebugGlobalMutex;
- 4768 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
-
- 4770 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-
-
- 4774 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
-
-
-
-
-
-
-
-
-
-
- 4785 template <
typename CmpLess,
typename IterT,
typename KeyT>
- 4786 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end,
const KeyT &key,
const CmpLess& cmp)
-
- 4788 size_t down = 0, up = (end - beg);
-
-
- 4791 const size_t mid = down + (up - down) / 2;
- 4792 if(cmp(*(beg+mid), key))
+ 4667 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
+ 4668 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ 4669 case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
+
+ 4671 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ 4672 case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
+
+
+
+
+
+
+
+ 4680 static void VmaWriteMagicValue(
void* pData, VkDeviceSize offset)
+
+ 4682 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+ 4683 uint32_t* pDst = (uint32_t*)((
char*)pData + offset);
+ 4684 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
+ 4685 for(
size_t i = 0; i < numberCount; ++i, ++pDst)
+
+ 4687 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
+
+
+
+
+
+
+ 4694 static bool VmaValidateMagicValue(
const void* pData, VkDeviceSize offset)
+
+ 4696 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+ 4697 const uint32_t* pSrc = (
const uint32_t*)((
const char*)pData + offset);
+ 4698 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
+ 4699 for(
size_t i = 0; i < numberCount; ++i, ++pSrc)
+
+ 4701 if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
+
+
+
+
+
+
+
+
+
+
+
+
+ 4714 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+
+ 4716 memset(&outBufCreateInfo, 0,
sizeof(outBufCreateInfo));
+ 4717 outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ 4718 outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ 4719 outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE;
+
+
+
+
+
+ 4725 VMA_CLASS_NO_COPY(VmaMutexLock)
+
+ 4727 VmaMutexLock(VMA_MUTEX& mutex,
bool useMutex =
true) :
+ 4728 m_pMutex(useMutex ? &mutex : VMA_NULL)
+ 4729 {
if(m_pMutex) { m_pMutex->Lock(); } }
+
+ 4731 {
if(m_pMutex) { m_pMutex->Unlock(); } }
+
+ 4733 VMA_MUTEX* m_pMutex;
+
+
+
+ 4737 struct VmaMutexLockRead
+
+ 4739 VMA_CLASS_NO_COPY(VmaMutexLockRead)
+
+ 4741 VmaMutexLockRead(VMA_RW_MUTEX& mutex,
bool useMutex) :
+ 4742 m_pMutex(useMutex ? &mutex : VMA_NULL)
+ 4743 {
if(m_pMutex) { m_pMutex->LockRead(); } }
+ 4744 ~VmaMutexLockRead() {
if(m_pMutex) { m_pMutex->UnlockRead(); } }
+
+ 4746 VMA_RW_MUTEX* m_pMutex;
+
+
+
+ 4750 struct VmaMutexLockWrite
+
+ 4752 VMA_CLASS_NO_COPY(VmaMutexLockWrite)
+
+ 4754 VmaMutexLockWrite(VMA_RW_MUTEX& mutex,
bool useMutex) :
+ 4755 m_pMutex(useMutex ? &mutex : VMA_NULL)
+ 4756 {
if(m_pMutex) { m_pMutex->LockWrite(); } }
+ 4757 ~VmaMutexLockWrite() {
if(m_pMutex) { m_pMutex->UnlockWrite(); } }
+
+ 4759 VMA_RW_MUTEX* m_pMutex;
+
+
+ 4762 #if VMA_DEBUG_GLOBAL_MUTEX
+ 4763 static VMA_MUTEX gDebugGlobalMutex;
+ 4764 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
+
+ 4766 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+
+
+ 4770 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
+
+
+
+
+
+
+
+
+
+
+ 4781 template <
typename CmpLess,
typename IterT,
typename KeyT>
+ 4782 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end,
const KeyT &key,
const CmpLess& cmp)
+
+ 4784 size_t down = 0, up = (end - beg);
+
+
+ 4787 const size_t mid = down + (up - down) / 2;
+ 4788 if(cmp(*(beg+mid), key))
+
+
+
+
-
+
-
-
-
-
-
-
-
-
- 4804 template<
typename CmpLess,
typename IterT,
typename KeyT>
- 4805 IterT VmaBinaryFindSorted(
const IterT& beg,
const IterT& end,
const KeyT& value,
const CmpLess& cmp)
-
- 4807 IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
- 4808 beg, end, value, cmp);
-
- 4810 (!cmp(*it, value) && !cmp(value, *it)))
-
-
-
-
-
-
-
-
-
-
-
- 4822 template<
typename T>
- 4823 static bool VmaValidatePointerArray(uint32_t count,
const T* arr)
-
- 4825 for(uint32_t i = 0; i < count; ++i)
-
- 4827 const T iPtr = arr[i];
- 4828 if(iPtr == VMA_NULL)
+
+
+
+
+ 4800 template<
typename CmpLess,
typename IterT,
typename KeyT>
+ 4801 IterT VmaBinaryFindSorted(
const IterT& beg,
const IterT& end,
const KeyT& value,
const CmpLess& cmp)
+
+ 4803 IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
+ 4804 beg, end, value, cmp);
+
+ 4806 (!cmp(*it, value) && !cmp(value, *it)))
+
+
+
+
+
+
+
+
+
+
+
+ 4818 template<
typename T>
+ 4819 static bool VmaValidatePointerArray(uint32_t count,
const T* arr)
+
+ 4821 for(uint32_t i = 0; i < count; ++i)
+
+ 4823 const T iPtr = arr[i];
+ 4824 if(iPtr == VMA_NULL)
+
+
+
+ 4828 for(uint32_t j = i + 1; j < count; ++j)
-
-
- 4832 for(uint32_t j = i + 1; j < count; ++j)
-
-
-
-
-
-
-
-
-
-
- 4843 template<
typename MainT,
typename NewT>
- 4844 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
-
- 4846 newStruct->pNext = mainStruct->pNext;
- 4847 mainStruct->pNext = newStruct;
-
-
-
-
- 4853 static void* VmaMalloc(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t size,
size_t alignment)
-
- 4855 void* result = VMA_NULL;
- 4856 if((pAllocationCallbacks != VMA_NULL) &&
- 4857 (pAllocationCallbacks->pfnAllocation != VMA_NULL))
-
- 4859 result = (*pAllocationCallbacks->pfnAllocation)(
- 4860 pAllocationCallbacks->pUserData,
-
-
- 4863 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+
+
+
+
+
+
+
+
+ 4839 template<
typename MainT,
typename NewT>
+ 4840 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+
+ 4842 newStruct->pNext = mainStruct->pNext;
+ 4843 mainStruct->pNext = newStruct;
+
+
+
+
+ 4849 static void* VmaMalloc(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t size,
size_t alignment)
+
+ 4851 void* result = VMA_NULL;
+ 4852 if((pAllocationCallbacks != VMA_NULL) &&
+ 4853 (pAllocationCallbacks->pfnAllocation != VMA_NULL))
+
+ 4855 result = (*pAllocationCallbacks->pfnAllocation)(
+ 4856 pAllocationCallbacks->pUserData,
+
+
+ 4859 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+
+
+ 4863 result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
-
-
- 4867 result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
-
- 4869 VMA_ASSERT(result != VMA_NULL &&
"CPU memory allocation failed.");
-
-
-
- 4873 static void VmaFree(
const VkAllocationCallbacks* pAllocationCallbacks,
void* ptr)
-
- 4875 if((pAllocationCallbacks != VMA_NULL) &&
- 4876 (pAllocationCallbacks->pfnFree != VMA_NULL))
+ 4865 VMA_ASSERT(result != VMA_NULL &&
"CPU memory allocation failed.");
+
+
+
+ 4869 static void VmaFree(
const VkAllocationCallbacks* pAllocationCallbacks,
void* ptr)
+
+ 4871 if((pAllocationCallbacks != VMA_NULL) &&
+ 4872 (pAllocationCallbacks->pfnFree != VMA_NULL))
+
+ 4874 (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+
+
- 4878 (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+ 4878 VMA_SYSTEM_ALIGNED_FREE(ptr);
-
-
- 4882 VMA_SYSTEM_ALIGNED_FREE(ptr);
-
-
-
- 4886 template<
typename T>
- 4887 static T* VmaAllocate(
const VkAllocationCallbacks* pAllocationCallbacks)
-
- 4889 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T), VMA_ALIGN_OF(T));
-
-
- 4892 template<
typename T>
- 4893 static T* VmaAllocateArray(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t count)
-
- 4895 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T) * count, VMA_ALIGN_OF(T));
-
+
+
+ 4882 template<
typename T>
+ 4883 static T* VmaAllocate(
const VkAllocationCallbacks* pAllocationCallbacks)
+
+ 4885 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T), VMA_ALIGN_OF(T));
+
+
+ 4888 template<
typename T>
+ 4889 static T* VmaAllocateArray(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t count)
+
+ 4891 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T) * count, VMA_ALIGN_OF(T));
+
+
+ 4894 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
+
+ 4896 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
- 4898 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
-
- 4900 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
-
- 4902 template<
typename T>
- 4903 static void vma_delete(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
-
-
- 4906 VmaFree(pAllocationCallbacks, ptr);
-
-
- 4909 template<
typename T>
- 4910 static void vma_delete_array(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr,
size_t count)
-
-
-
- 4914 for(
size_t i = count; i--; )
-
-
-
- 4918 VmaFree(pAllocationCallbacks, ptr);
-
-
-
- 4922 static char* VmaCreateStringCopy(
const VkAllocationCallbacks* allocs,
const char* srcStr)
-
- 4924 if(srcStr != VMA_NULL)
-
- 4926 const size_t len = strlen(srcStr);
- 4927 char*
const result = vma_new_array(allocs,
char, len + 1);
- 4928 memcpy(result, srcStr, len + 1);
-
+ 4898 template<
typename T>
+ 4899 static void vma_delete(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+
+
+ 4902 VmaFree(pAllocationCallbacks, ptr);
+
+
+ 4905 template<
typename T>
+ 4906 static void vma_delete_array(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr,
size_t count)
+
+
+
+ 4910 for(
size_t i = count; i--; )
+
+
+
+ 4914 VmaFree(pAllocationCallbacks, ptr);
+
+
+
+ 4918 static char* VmaCreateStringCopy(
const VkAllocationCallbacks* allocs,
const char* srcStr)
+
+ 4920 if(srcStr != VMA_NULL)
+
+ 4922 const size_t len = strlen(srcStr);
+ 4923 char*
const result = vma_new_array(allocs,
char, len + 1);
+ 4924 memcpy(result, srcStr, len + 1);
+
+
+
+
+
-
-
-
-
-
-
- 4937 static void VmaFreeString(
const VkAllocationCallbacks* allocs,
char* str)
-
-
-
- 4941 const size_t len = strlen(str);
- 4942 vma_delete_array(allocs, str, len + 1);
-
-
-
-
- 4947 template<
typename T>
- 4948 class VmaStlAllocator
-
-
- 4951 const VkAllocationCallbacks*
const m_pCallbacks;
- 4952 typedef T value_type;
-
- 4954 VmaStlAllocator(
const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
- 4955 template<
typename U> VmaStlAllocator(
const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
-
- 4957 T* allocate(
size_t n) {
return VmaAllocateArray<T>(m_pCallbacks, n); }
- 4958 void deallocate(T* p,
size_t n) { VmaFree(m_pCallbacks, p); }
-
- 4960 template<
typename U>
- 4961 bool operator==(
const VmaStlAllocator<U>& rhs)
const
-
- 4963 return m_pCallbacks == rhs.m_pCallbacks;
-
- 4965 template<
typename U>
- 4966 bool operator!=(
const VmaStlAllocator<U>& rhs)
const
-
- 4968 return m_pCallbacks != rhs.m_pCallbacks;
-
-
- 4971 VmaStlAllocator& operator=(
const VmaStlAllocator& x) =
delete;
-
+
+
+ 4933 static void VmaFreeString(
const VkAllocationCallbacks* allocs,
char* str)
+
+
+
+ 4937 const size_t len = strlen(str);
+ 4938 vma_delete_array(allocs, str, len + 1);
+
+
+
+
+ 4943 template<
typename T>
+ 4944 class VmaStlAllocator
+
+
+ 4947 const VkAllocationCallbacks*
const m_pCallbacks;
+ 4948 typedef T value_type;
+
+ 4950 VmaStlAllocator(
const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
+ 4951 template<
typename U> VmaStlAllocator(
const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
+
+ 4953 T* allocate(
size_t n) {
return VmaAllocateArray<T>(m_pCallbacks, n); }
+ 4954 void deallocate(T* p,
size_t n) { VmaFree(m_pCallbacks, p); }
+
+ 4956 template<
typename U>
+ 4957 bool operator==(
const VmaStlAllocator<U>& rhs)
const
+
+ 4959 return m_pCallbacks == rhs.m_pCallbacks;
+
+ 4961 template<
typename U>
+ 4962 bool operator!=(
const VmaStlAllocator<U>& rhs)
const
+
+ 4964 return m_pCallbacks != rhs.m_pCallbacks;
+
+
+ 4967 VmaStlAllocator& operator=(
const VmaStlAllocator& x) =
delete;
+
+
+ 4970 #if VMA_USE_STL_VECTOR
+
+ 4972 #define VmaVector std::vector
- 4974 #if VMA_USE_STL_VECTOR
-
- 4976 #define VmaVector std::vector
-
- 4978 template<
typename T,
typename allocatorT>
- 4979 static void VmaVectorInsert(std::vector<T, allocatorT>& vec,
size_t index,
const T& item)
-
- 4981 vec.insert(vec.begin() + index, item);
-
-
- 4984 template<
typename T,
typename allocatorT>
- 4985 static void VmaVectorRemove(std::vector<T, allocatorT>& vec,
size_t index)
-
- 4987 vec.erase(vec.begin() + index);
-
-
-
-
-
-
-
- 4995 template<
typename T,
typename AllocatorT>
-
-
-
- 4999 typedef T value_type;
-
- 5001 VmaVector(
const AllocatorT& allocator) :
- 5002 m_Allocator(allocator),
-
-
-
-
-
-
- 5009 VmaVector(
size_t count,
const AllocatorT& allocator) :
- 5010 m_Allocator(allocator),
- 5011 m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
-
-
-
-
-
-
-
- 5019 VmaVector(
size_t count,
const T& value,
const AllocatorT& allocator)
- 5020 : VmaVector(count, allocator) {}
-
- 5022 VmaVector(
const VmaVector<T, AllocatorT>& src) :
- 5023 m_Allocator(src.m_Allocator),
- 5024 m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
- 5025 m_Count(src.m_Count),
- 5026 m_Capacity(src.m_Count)
-
-
-
- 5030 memcpy(m_pArray, src.m_pArray, m_Count *
sizeof(T));
-
-
-
-
-
- 5036 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-
-
- 5039 VmaVector& operator=(
const VmaVector<T, AllocatorT>& rhs)
-
-
-
- 5043 resize(rhs.m_Count);
-
-
- 5046 memcpy(m_pArray, rhs.m_pArray, m_Count *
sizeof(T));
-
-
-
-
-
- 5052 bool empty()
const {
return m_Count == 0; }
- 5053 size_t size()
const {
return m_Count; }
- 5054 T* data() {
return m_pArray; }
- 5055 const T* data()
const {
return m_pArray; }
-
- 5057 T& operator[](
size_t index)
-
- 5059 VMA_HEAVY_ASSERT(index < m_Count);
- 5060 return m_pArray[index];
-
- 5062 const T& operator[](
size_t index)
const
-
- 5064 VMA_HEAVY_ASSERT(index < m_Count);
- 5065 return m_pArray[index];
-
-
-
-
- 5070 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
- 5073 const T& front()
const
-
- 5075 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
-
- 5080 VMA_HEAVY_ASSERT(m_Count > 0);
- 5081 return m_pArray[m_Count - 1];
-
- 5083 const T& back()
const
-
- 5085 VMA_HEAVY_ASSERT(m_Count > 0);
- 5086 return m_pArray[m_Count - 1];
-
+ 4974 template<
typename T,
typename allocatorT>
+ 4975 static void VmaVectorInsert(std::vector<T, allocatorT>& vec,
size_t index,
const T& item)
+
+ 4977 vec.insert(vec.begin() + index, item);
+
+
+ 4980 template<
typename T,
typename allocatorT>
+ 4981 static void VmaVectorRemove(std::vector<T, allocatorT>& vec,
size_t index)
+
+ 4983 vec.erase(vec.begin() + index);
+
+
+
+
+
+
+
+ 4991 template<
typename T,
typename AllocatorT>
+
+
+
+ 4995 typedef T value_type;
+
+ 4997 VmaVector(
const AllocatorT& allocator) :
+ 4998 m_Allocator(allocator),
+
+
+
+
+
+
+ 5005 VmaVector(
size_t count,
const AllocatorT& allocator) :
+ 5006 m_Allocator(allocator),
+ 5007 m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
+
+
+
+
+
+
+
+ 5015 VmaVector(
size_t count,
const T& value,
const AllocatorT& allocator)
+ 5016 : VmaVector(count, allocator) {}
+
+ 5018 VmaVector(
const VmaVector<T, AllocatorT>& src) :
+ 5019 m_Allocator(src.m_Allocator),
+ 5020 m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
+ 5021 m_Count(src.m_Count),
+ 5022 m_Capacity(src.m_Count)
+
+
+
+ 5026 memcpy(m_pArray, src.m_pArray, m_Count *
sizeof(T));
+
+
+
+
+
+ 5032 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+
+
+ 5035 VmaVector& operator=(
const VmaVector<T, AllocatorT>& rhs)
+
+
+
+ 5039 resize(rhs.m_Count);
+
+
+ 5042 memcpy(m_pArray, rhs.m_pArray, m_Count *
sizeof(T));
+
+
+
+
+
+ 5048 bool empty()
const {
return m_Count == 0; }
+ 5049 size_t size()
const {
return m_Count; }
+ 5050 T* data() {
return m_pArray; }
+ 5051 const T* data()
const {
return m_pArray; }
+
+ 5053 T& operator[](
size_t index)
+
+ 5055 VMA_HEAVY_ASSERT(index < m_Count);
+ 5056 return m_pArray[index];
+
+ 5058 const T& operator[](
size_t index)
const
+
+ 5060 VMA_HEAVY_ASSERT(index < m_Count);
+ 5061 return m_pArray[index];
+
+
+
+
+ 5066 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+ 5069 const T& front()
const
+
+ 5071 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+
+ 5076 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5077 return m_pArray[m_Count - 1];
+
+ 5079 const T& back()
const
+
+ 5081 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5082 return m_pArray[m_Count - 1];
+
+
+ 5085 void reserve(
size_t newCapacity,
bool freeMemory =
false)
+
+ 5087 newCapacity = VMA_MAX(newCapacity, m_Count);
- 5089 void reserve(
size_t newCapacity,
bool freeMemory =
false)
-
- 5091 newCapacity = VMA_MAX(newCapacity, m_Count);
-
- 5093 if((newCapacity < m_Capacity) && !freeMemory)
-
- 5095 newCapacity = m_Capacity;
-
-
- 5098 if(newCapacity != m_Capacity)
-
- 5100 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
-
-
- 5103 memcpy(newArray, m_pArray, m_Count *
sizeof(T));
-
- 5105 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
- 5106 m_Capacity = newCapacity;
- 5107 m_pArray = newArray;
-
-
-
- 5111 void resize(
size_t newCount,
bool freeMemory =
false)
-
- 5113 size_t newCapacity = m_Capacity;
- 5114 if(newCount > m_Capacity)
+ 5089 if((newCapacity < m_Capacity) && !freeMemory)
+
+ 5091 newCapacity = m_Capacity;
+
+
+ 5094 if(newCapacity != m_Capacity)
+
+ 5096 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
+
+
+ 5099 memcpy(newArray, m_pArray, m_Count *
sizeof(T));
+
+ 5101 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ 5102 m_Capacity = newCapacity;
+ 5103 m_pArray = newArray;
+
+
+
+ 5107 void resize(
size_t newCount,
bool freeMemory =
false)
+
+ 5109 size_t newCapacity = m_Capacity;
+ 5110 if(newCount > m_Capacity)
+
+ 5112 newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (
size_t)8));
+
+
- 5116 newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (
size_t)8));
+ 5116 newCapacity = newCount;
-
-
- 5120 newCapacity = newCount;
-
-
- 5123 if(newCapacity != m_Capacity)
-
- 5125 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
- 5126 const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
- 5127 if(elementsToCopy != 0)
-
- 5129 memcpy(newArray, m_pArray, elementsToCopy *
sizeof(T));
-
- 5131 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
- 5132 m_Capacity = newCapacity;
- 5133 m_pArray = newArray;
-
-
-
-
-
- 5139 void clear(
bool freeMemory =
false)
-
- 5141 resize(0, freeMemory);
-
-
- 5144 void insert(
size_t index,
const T& src)
-
- 5146 VMA_HEAVY_ASSERT(index <= m_Count);
- 5147 const size_t oldCount = size();
- 5148 resize(oldCount + 1);
- 5149 if(index < oldCount)
-
- 5151 memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) *
sizeof(T));
-
- 5153 m_pArray[index] = src;
-
-
- 5156 void remove(
size_t index)
-
- 5158 VMA_HEAVY_ASSERT(index < m_Count);
- 5159 const size_t oldCount = size();
- 5160 if(index < oldCount - 1)
-
- 5162 memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) *
sizeof(T));
-
- 5164 resize(oldCount - 1);
-
-
- 5167 void push_back(
const T& src)
-
- 5169 const size_t newIndex = size();
- 5170 resize(newIndex + 1);
- 5171 m_pArray[newIndex] = src;
-
-
-
-
- 5176 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
- 5180 void push_front(
const T& src)
-
-
-
-
-
-
- 5187 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
- 5191 typedef T* iterator;
-
- 5193 iterator begin() {
return m_pArray; }
- 5194 iterator end() {
return m_pArray + m_Count; }
-
-
- 5197 AllocatorT m_Allocator;
-
-
-
-
-
- 5203 template<
typename T,
typename allocatorT>
- 5204 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec,
size_t index,
const T& item)
-
- 5206 vec.insert(index, item);
-
-
- 5209 template<
typename T,
typename allocatorT>
- 5210 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec,
size_t index)
-
-
-
-
-
-
- 5217 template<
typename CmpLess,
typename VectorT>
- 5218 size_t VmaVectorInsertSorted(VectorT& vector,
const typename VectorT::value_type& value)
-
- 5220 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-
- 5222 vector.data() + vector.size(),
-
- 5224 CmpLess()) - vector.data();
- 5225 VmaVectorInsert(vector, indexToInsert, value);
- 5226 return indexToInsert;
-
-
- 5229 template<
typename CmpLess,
typename VectorT>
- 5230 bool VmaVectorRemoveSorted(VectorT& vector,
const typename VectorT::value_type& value)
-
-
- 5233 typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
-
-
-
-
- 5238 if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
-
- 5240 size_t indexToRemove = it - vector.begin();
- 5241 VmaVectorRemove(vector, indexToRemove);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 5258 template<
typename T,
typename AllocatorT,
size_t N>
- 5259 class VmaSmallVector
-
-
- 5262 typedef T value_type;
-
- 5264 VmaSmallVector(
const AllocatorT& allocator) :
-
- 5266 m_DynamicArray(allocator)
-
-
- 5269 VmaSmallVector(
size_t count,
const AllocatorT& allocator) :
-
- 5271 m_DynamicArray(count > N ? count : 0, allocator)
-
-
- 5274 template<
typename SrcT,
typename SrcAllocatorT,
size_t SrcN>
- 5275 VmaSmallVector(
const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) =
delete;
- 5276 template<
typename SrcT,
typename SrcAllocatorT,
size_t SrcN>
- 5277 VmaSmallVector<T, AllocatorT, N>& operator=(
const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) =
delete;
-
- 5279 bool empty()
const {
return m_Count == 0; }
- 5280 size_t size()
const {
return m_Count; }
- 5281 T* data() {
return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
- 5282 const T* data()
const {
return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-
- 5284 T& operator[](
size_t index)
-
- 5286 VMA_HEAVY_ASSERT(index < m_Count);
- 5287 return data()[index];
-
- 5289 const T& operator[](
size_t index)
const
-
- 5291 VMA_HEAVY_ASSERT(index < m_Count);
- 5292 return data()[index];
-
-
-
-
- 5297 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
- 5300 const T& front()
const
-
- 5302 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
-
- 5307 VMA_HEAVY_ASSERT(m_Count > 0);
- 5308 return data()[m_Count - 1];
-
- 5310 const T& back()
const
-
- 5312 VMA_HEAVY_ASSERT(m_Count > 0);
- 5313 return data()[m_Count - 1];
-
-
- 5316 void resize(
size_t newCount,
bool freeMemory =
false)
-
- 5318 if(newCount > N && m_Count > N)
-
-
- 5321 m_DynamicArray.resize(newCount, freeMemory);
-
- 5323 else if(newCount > N && m_Count <= N)
-
-
- 5326 m_DynamicArray.resize(newCount, freeMemory);
-
-
- 5329 memcpy(m_DynamicArray.data(), m_StaticArray, m_Count *
sizeof(T));
-
-
- 5332 else if(newCount <= N && m_Count > N)
-
-
-
-
- 5337 memcpy(m_StaticArray, m_DynamicArray.data(), newCount *
sizeof(T));
-
- 5339 m_DynamicArray.resize(0, freeMemory);
+
+ 5119 if(newCapacity != m_Capacity)
+
+ 5121 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
+ 5122 const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
+ 5123 if(elementsToCopy != 0)
+
+ 5125 memcpy(newArray, m_pArray, elementsToCopy *
sizeof(T));
+
+ 5127 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ 5128 m_Capacity = newCapacity;
+ 5129 m_pArray = newArray;
+
+
+
+
+
+ 5135 void clear(
bool freeMemory =
false)
+
+ 5137 resize(0, freeMemory);
+
+
+ 5140 void insert(
size_t index,
const T& src)
+
+ 5142 VMA_HEAVY_ASSERT(index <= m_Count);
+ 5143 const size_t oldCount = size();
+ 5144 resize(oldCount + 1);
+ 5145 if(index < oldCount)
+
+ 5147 memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) *
sizeof(T));
+
+ 5149 m_pArray[index] = src;
+
+
+ 5152 void remove(
size_t index)
+
+ 5154 VMA_HEAVY_ASSERT(index < m_Count);
+ 5155 const size_t oldCount = size();
+ 5156 if(index < oldCount - 1)
+
+ 5158 memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) *
sizeof(T));
+
+ 5160 resize(oldCount - 1);
+
+
+ 5163 void push_back(
const T& src)
+
+ 5165 const size_t newIndex = size();
+ 5166 resize(newIndex + 1);
+ 5167 m_pArray[newIndex] = src;
+
+
+
+
+ 5172 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+ 5176 void push_front(
const T& src)
+
+
+
+
+
+
+ 5183 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+ 5187 typedef T* iterator;
+
+ 5189 iterator begin() {
return m_pArray; }
+ 5190 iterator end() {
return m_pArray + m_Count; }
+
+
+ 5193 AllocatorT m_Allocator;
+
+
+
+
+
+ 5199 template<
typename T,
typename allocatorT>
+ 5200 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec,
size_t index,
const T& item)
+
+ 5202 vec.insert(index, item);
+
+
+ 5205 template<
typename T,
typename allocatorT>
+ 5206 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec,
size_t index)
+
+
+
+
+
+
+ 5213 template<
typename CmpLess,
typename VectorT>
+ 5214 size_t VmaVectorInsertSorted(VectorT& vector,
const typename VectorT::value_type& value)
+
+ 5216 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+
+ 5218 vector.data() + vector.size(),
+
+ 5220 CmpLess()) - vector.data();
+ 5221 VmaVectorInsert(vector, indexToInsert, value);
+ 5222 return indexToInsert;
+
+
+ 5225 template<
typename CmpLess,
typename VectorT>
+ 5226 bool VmaVectorRemoveSorted(VectorT& vector,
const typename VectorT::value_type& value)
+
+
+ 5229 typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
+
+
+
+
+ 5234 if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
+
+ 5236 size_t indexToRemove = it - vector.begin();
+ 5237 VmaVectorRemove(vector, indexToRemove);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 5254 template<
typename T,
typename AllocatorT,
size_t N>
+ 5255 class VmaSmallVector
+
+
+ 5258 typedef T value_type;
+
+ 5260 VmaSmallVector(
const AllocatorT& allocator) :
+
+ 5262 m_DynamicArray(allocator)
+
+
+ 5265 VmaSmallVector(
size_t count,
const AllocatorT& allocator) :
+
+ 5267 m_DynamicArray(count > N ? count : 0, allocator)
+
+
+ 5270 template<
typename SrcT,
typename SrcAllocatorT,
size_t SrcN>
+ 5271 VmaSmallVector(
const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) =
delete;
+ 5272 template<
typename SrcT,
typename SrcAllocatorT,
size_t SrcN>
+ 5273 VmaSmallVector<T, AllocatorT, N>& operator=(
const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) =
delete;
+
+ 5275 bool empty()
const {
return m_Count == 0; }
+ 5276 size_t size()
const {
return m_Count; }
+ 5277 T* data() {
return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+ 5278 const T* data()
const {
return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+
+ 5280 T& operator[](
size_t index)
+
+ 5282 VMA_HEAVY_ASSERT(index < m_Count);
+ 5283 return data()[index];
+
+ 5285 const T& operator[](
size_t index)
const
+
+ 5287 VMA_HEAVY_ASSERT(index < m_Count);
+ 5288 return data()[index];
+
+
+
+
+ 5293 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+ 5296 const T& front()
const
+
+ 5298 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+
+ 5303 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5304 return data()[m_Count - 1];
+
+ 5306 const T& back()
const
+
+ 5308 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5309 return data()[m_Count - 1];
+
+
+ 5312 void resize(
size_t newCount,
bool freeMemory =
false)
+
+ 5314 if(newCount > N && m_Count > N)
+
+
+ 5317 m_DynamicArray.resize(newCount, freeMemory);
+
+ 5319 else if(newCount > N && m_Count <= N)
+
+
+ 5322 m_DynamicArray.resize(newCount, freeMemory);
+
+
+ 5325 memcpy(m_DynamicArray.data(), m_StaticArray, m_Count *
sizeof(T));
+
+
+ 5328 else if(newCount <= N && m_Count > N)
+
+
+
+
+ 5333 memcpy(m_StaticArray, m_DynamicArray.data(), newCount *
sizeof(T));
+
+ 5335 m_DynamicArray.resize(0, freeMemory);
+
+
+
+
-
-
-
-
-
-
-
- 5348 void clear(
bool freeMemory =
false)
-
- 5350 m_DynamicArray.clear(freeMemory);
-
-
-
- 5354 void insert(
size_t index,
const T& src)
-
- 5356 VMA_HEAVY_ASSERT(index <= m_Count);
- 5357 const size_t oldCount = size();
- 5358 resize(oldCount + 1);
- 5359 T*
const dataPtr = data();
- 5360 if(index < oldCount)
-
-
- 5363 memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) *
sizeof(T));
-
- 5365 dataPtr[index] = src;
-
-
- 5368 void remove(
size_t index)
-
- 5370 VMA_HEAVY_ASSERT(index < m_Count);
- 5371 const size_t oldCount = size();
- 5372 if(index < oldCount - 1)
-
-
- 5375 T*
const dataPtr = data();
- 5376 memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) *
sizeof(T));
-
- 5378 resize(oldCount - 1);
-
-
- 5381 void push_back(
const T& src)
-
- 5383 const size_t newIndex = size();
- 5384 resize(newIndex + 1);
- 5385 data()[newIndex] = src;
-
-
-
-
- 5390 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
- 5394 void push_front(
const T& src)
-
-
-
-
-
-
- 5401 VMA_HEAVY_ASSERT(m_Count > 0);
-
-
-
- 5405 typedef T* iterator;
-
- 5407 iterator begin() {
return data(); }
- 5408 iterator end() {
return data() + m_Count; }
-
-
-
-
- 5413 VmaVector<T, AllocatorT> m_DynamicArray;
-
-
-
-
-
-
-
-
-
- 5424 template<
typename T>
- 5425 class VmaPoolAllocator
-
- 5427 VMA_CLASS_NO_COPY(VmaPoolAllocator)
-
- 5429 VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
- 5430 ~VmaPoolAllocator();
- 5431 template<
typename... Types> T* Alloc(Types... args);
-
-
-
-
-
- 5437 uint32_t NextFreeIndex;
- 5438 alignas(T)
char Value[
sizeof(T)];
-
-
-
-
-
-
- 5445 uint32_t FirstFreeIndex;
-
+
+
+
+ 5344 void clear(
bool freeMemory =
false)
+
+ 5346 m_DynamicArray.clear(freeMemory);
+
+
+
+ 5350 void insert(
size_t index,
const T& src)
+
+ 5352 VMA_HEAVY_ASSERT(index <= m_Count);
+ 5353 const size_t oldCount = size();
+ 5354 resize(oldCount + 1);
+ 5355 T*
const dataPtr = data();
+ 5356 if(index < oldCount)
+
+
+ 5359 memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) *
sizeof(T));
+
+ 5361 dataPtr[index] = src;
+
+
+ 5364 void remove(
size_t index)
+
+ 5366 VMA_HEAVY_ASSERT(index < m_Count);
+ 5367 const size_t oldCount = size();
+ 5368 if(index < oldCount - 1)
+
+
+ 5371 T*
const dataPtr = data();
+ 5372 memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) *
sizeof(T));
+
+ 5374 resize(oldCount - 1);
+
+
+ 5377 void push_back(
const T& src)
+
+ 5379 const size_t newIndex = size();
+ 5380 resize(newIndex + 1);
+ 5381 data()[newIndex] = src;
+
+
+
+
+ 5386 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+ 5390 void push_front(
const T& src)
+
+
+
+
+
+
+ 5397 VMA_HEAVY_ASSERT(m_Count > 0);
+
+
+
+ 5401 typedef T* iterator;
+
+ 5403 iterator begin() {
return data(); }
+ 5404 iterator end() {
return data() + m_Count; }
+
+
+
+
+ 5409 VmaVector<T, AllocatorT> m_DynamicArray;
+
+
+
+
+
+
+
+
+
+ 5420 template<
typename T>
+ 5421 class VmaPoolAllocator
+
+ 5423 VMA_CLASS_NO_COPY(VmaPoolAllocator)
+
+ 5425 VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
+ 5426 ~VmaPoolAllocator();
+ 5427 template<
typename... Types> T* Alloc(Types... args);
+
+
+
+
+
+ 5433 uint32_t NextFreeIndex;
+ 5434 alignas(T)
char Value[
sizeof(T)];
+
+
+
+
+
+
+ 5441 uint32_t FirstFreeIndex;
+
+
+ 5444 const VkAllocationCallbacks* m_pAllocationCallbacks;
+ 5445 const uint32_t m_FirstBlockCapacity;
+ 5446 VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
- 5448 const VkAllocationCallbacks* m_pAllocationCallbacks;
- 5449 const uint32_t m_FirstBlockCapacity;
- 5450 VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
-
- 5452 ItemBlock& CreateNewBlock();
-
-
- 5455 template<
typename T>
- 5456 VmaPoolAllocator<T>::VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
- 5457 m_pAllocationCallbacks(pAllocationCallbacks),
- 5458 m_FirstBlockCapacity(firstBlockCapacity),
- 5459 m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
-
- 5461 VMA_ASSERT(m_FirstBlockCapacity > 1);
-
-
- 5464 template<
typename T>
- 5465 VmaPoolAllocator<T>::~VmaPoolAllocator()
-
- 5467 for(
size_t i = m_ItemBlocks.size(); i--; )
- 5468 vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
- 5469 m_ItemBlocks.clear();
-
-
- 5472 template<
typename T>
- 5473 template<
typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
-
- 5475 for(
size_t i = m_ItemBlocks.size(); i--; )
-
- 5477 ItemBlock& block = m_ItemBlocks[i];
-
- 5479 if(block.FirstFreeIndex != UINT32_MAX)
-
- 5481 Item*
const pItem = &block.pItems[block.FirstFreeIndex];
- 5482 block.FirstFreeIndex = pItem->NextFreeIndex;
- 5483 T* result = (T*)&pItem->Value;
- 5484 new(result)T(std::forward<Types>(args)...);
-
-
-
-
-
- 5490 ItemBlock& newBlock = CreateNewBlock();
- 5491 Item*
const pItem = &newBlock.pItems[0];
- 5492 newBlock.FirstFreeIndex = pItem->NextFreeIndex;
- 5493 T* result = (T*)&pItem->Value;
- 5494 new(result)T(std::forward<Types>(args)...);
-
-
-
- 5498 template<
typename T>
- 5499 void VmaPoolAllocator<T>::Free(T* ptr)
-
-
- 5502 for(
size_t i = m_ItemBlocks.size(); i--; )
-
- 5504 ItemBlock& block = m_ItemBlocks[i];
+ 5448 ItemBlock& CreateNewBlock();
+
+
+ 5451 template<
typename T>
+ 5452 VmaPoolAllocator<T>::VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
+ 5453 m_pAllocationCallbacks(pAllocationCallbacks),
+ 5454 m_FirstBlockCapacity(firstBlockCapacity),
+ 5455 m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
+
+ 5457 VMA_ASSERT(m_FirstBlockCapacity > 1);
+
+
+ 5460 template<
typename T>
+ 5461 VmaPoolAllocator<T>::~VmaPoolAllocator()
+
+ 5463 for(
size_t i = m_ItemBlocks.size(); i--; )
+ 5464 vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
+ 5465 m_ItemBlocks.clear();
+
+
+ 5468 template<
typename T>
+ 5469 template<
typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
+
+ 5471 for(
size_t i = m_ItemBlocks.size(); i--; )
+
+ 5473 ItemBlock& block = m_ItemBlocks[i];
+
+ 5475 if(block.FirstFreeIndex != UINT32_MAX)
+
+ 5477 Item*
const pItem = &block.pItems[block.FirstFreeIndex];
+ 5478 block.FirstFreeIndex = pItem->NextFreeIndex;
+ 5479 T* result = (T*)&pItem->Value;
+ 5480 new(result)T(std::forward<Types>(args)...);
+
+
+
+
+
+ 5486 ItemBlock& newBlock = CreateNewBlock();
+ 5487 Item*
const pItem = &newBlock.pItems[0];
+ 5488 newBlock.FirstFreeIndex = pItem->NextFreeIndex;
+ 5489 T* result = (T*)&pItem->Value;
+ 5490 new(result)T(std::forward<Types>(args)...);
+
+
+
+ 5494 template<
typename T>
+ 5495 void VmaPoolAllocator<T>::Free(T* ptr)
+
+
+ 5498 for(
size_t i = m_ItemBlocks.size(); i--; )
+
+ 5500 ItemBlock& block = m_ItemBlocks[i];
+
+
+
+ 5504 memcpy(&pItemPtr, &ptr,
sizeof(pItemPtr));
-
-
- 5508 memcpy(&pItemPtr, &ptr,
sizeof(pItemPtr));
-
-
- 5511 if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
-
-
- 5514 const uint32_t index =
static_cast<uint32_t
>(pItemPtr - block.pItems);
- 5515 pItemPtr->NextFreeIndex = block.FirstFreeIndex;
- 5516 block.FirstFreeIndex = index;
-
-
-
- 5520 VMA_ASSERT(0 &&
"Pointer doesn't belong to this memory pool.");
-
-
- 5523 template<
typename T>
- 5524 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
-
- 5526 const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
- 5527 m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
-
- 5529 const ItemBlock newBlock = {
- 5530 vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
-
-
-
- 5534 m_ItemBlocks.push_back(newBlock);
-
-
- 5537 for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
- 5538 newBlock.pItems[i].NextFreeIndex = i + 1;
- 5539 newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
- 5540 return m_ItemBlocks.back();
-
-
-
+
+ 5507 if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
+
+
+ 5510 const uint32_t index =
static_cast<uint32_t
>(pItemPtr - block.pItems);
+ 5511 pItemPtr->NextFreeIndex = block.FirstFreeIndex;
+ 5512 block.FirstFreeIndex = index;
+
+
+
+ 5516 VMA_ASSERT(0 &&
"Pointer doesn't belong to this memory pool.");
+
+
+ 5519 template<
typename T>
+ 5520 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
+
+ 5522 const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
+ 5523 m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
+
+ 5525 const ItemBlock newBlock = {
+ 5526 vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
+
+
+
+ 5530 m_ItemBlocks.push_back(newBlock);
+
+
+ 5533 for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
+ 5534 newBlock.pItems[i].NextFreeIndex = i + 1;
+ 5535 newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
+ 5536 return m_ItemBlocks.back();
+
+
+
+
+ 5542 #if VMA_USE_STL_LIST
+
+ 5544 #define VmaList std::list
- 5546 #if VMA_USE_STL_LIST
+
- 5548 #define VmaList std::list
-
-
-
- 5552 template<
typename T>
-
-
-
-
-
-
-
-
- 5561 template<
typename T>
-
-
- 5564 VMA_CLASS_NO_COPY(VmaRawList)
-
- 5566 typedef VmaListItem<T> ItemType;
+ 5548 template<
typename T>
+
+
+
+
+
+
+
+
+ 5557 template<
typename T>
+
+
+ 5560 VMA_CLASS_NO_COPY(VmaRawList)
+
+ 5562 typedef VmaListItem<T> ItemType;
+
+ 5564 VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks);
+
+
- 5568 VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks);
-
-
-
- 5572 size_t GetCount()
const {
return m_Count; }
- 5573 bool IsEmpty()
const {
return m_Count == 0; }
-
- 5575 ItemType* Front() {
return m_pFront; }
- 5576 const ItemType* Front()
const {
return m_pFront; }
- 5577 ItemType* Back() {
return m_pBack; }
- 5578 const ItemType* Back()
const {
return m_pBack; }
-
- 5580 ItemType* PushBack();
- 5581 ItemType* PushFront();
- 5582 ItemType* PushBack(
const T& value);
- 5583 ItemType* PushFront(
const T& value);
-
-
-
-
- 5588 ItemType* InsertBefore(ItemType* pItem);
-
- 5590 ItemType* InsertAfter(ItemType* pItem);
-
- 5592 ItemType* InsertBefore(ItemType* pItem,
const T& value);
- 5593 ItemType* InsertAfter(ItemType* pItem,
const T& value);
-
- 5595 void Remove(ItemType* pItem);
-
-
- 5598 const VkAllocationCallbacks*
const m_pAllocationCallbacks;
- 5599 VmaPoolAllocator<ItemType> m_ItemAllocator;
-
-
-
-
-
- 5605 template<
typename T>
- 5606 VmaRawList<T>::VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks) :
- 5607 m_pAllocationCallbacks(pAllocationCallbacks),
- 5608 m_ItemAllocator(pAllocationCallbacks, 128),
-
-
-
-
-
-
- 5615 template<
typename T>
- 5616 VmaRawList<T>::~VmaRawList()
-
-
-
-
-
- 5622 template<
typename T>
- 5623 void VmaRawList<T>::Clear()
-
- 5625 if(IsEmpty() ==
false)
-
- 5627 ItemType* pItem = m_pBack;
- 5628 while(pItem != VMA_NULL)
-
- 5630 ItemType*
const pPrevItem = pItem->pPrev;
- 5631 m_ItemAllocator.Free(pItem);
-
-
- 5634 m_pFront = VMA_NULL;
-
-
-
-
-
- 5640 template<
typename T>
- 5641 VmaListItem<T>* VmaRawList<T>::PushBack()
-
- 5643 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
- 5644 pNewItem->pNext = VMA_NULL;
-
-
- 5647 pNewItem->pPrev = VMA_NULL;
- 5648 m_pFront = pNewItem;
-
-
-
-
-
- 5654 pNewItem->pPrev = m_pBack;
- 5655 m_pBack->pNext = pNewItem;
-
-
-
-
-
-
- 5662 template<
typename T>
- 5663 VmaListItem<T>* VmaRawList<T>::PushFront()
-
- 5665 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
- 5666 pNewItem->pPrev = VMA_NULL;
-
-
- 5669 pNewItem->pNext = VMA_NULL;
- 5670 m_pFront = pNewItem;
-
-
-
-
-
- 5676 pNewItem->pNext = m_pFront;
- 5677 m_pFront->pPrev = pNewItem;
- 5678 m_pFront = pNewItem;
-
-
-
-
-
- 5684 template<
typename T>
- 5685 VmaListItem<T>* VmaRawList<T>::PushBack(
const T& value)
-
- 5687 ItemType*
const pNewItem = PushBack();
- 5688 pNewItem->Value = value;
-
-
-
- 5692 template<
typename T>
- 5693 VmaListItem<T>* VmaRawList<T>::PushFront(
const T& value)
-
- 5695 ItemType*
const pNewItem = PushFront();
- 5696 pNewItem->Value = value;
-
-
-
- 5700 template<
typename T>
- 5701 void VmaRawList<T>::PopBack()
-
- 5703 VMA_HEAVY_ASSERT(m_Count > 0);
- 5704 ItemType*
const pBackItem = m_pBack;
- 5705 ItemType*
const pPrevItem = pBackItem->pPrev;
- 5706 if(pPrevItem != VMA_NULL)
-
- 5708 pPrevItem->pNext = VMA_NULL;
-
- 5710 m_pBack = pPrevItem;
- 5711 m_ItemAllocator.Free(pBackItem);
-
-
-
- 5715 template<
typename T>
- 5716 void VmaRawList<T>::PopFront()
-
- 5718 VMA_HEAVY_ASSERT(m_Count > 0);
- 5719 ItemType*
const pFrontItem = m_pFront;
- 5720 ItemType*
const pNextItem = pFrontItem->pNext;
- 5721 if(pNextItem != VMA_NULL)
-
- 5723 pNextItem->pPrev = VMA_NULL;
-
- 5725 m_pFront = pNextItem;
- 5726 m_ItemAllocator.Free(pFrontItem);
-
-
-
- 5730 template<
typename T>
- 5731 void VmaRawList<T>::Remove(ItemType* pItem)
-
- 5733 VMA_HEAVY_ASSERT(pItem != VMA_NULL);
- 5734 VMA_HEAVY_ASSERT(m_Count > 0);
-
- 5736 if(pItem->pPrev != VMA_NULL)
+ 5568 size_t GetCount()
const {
return m_Count; }
+ 5569 bool IsEmpty()
const {
return m_Count == 0; }
+
+ 5571 ItemType* Front() {
return m_pFront; }
+ 5572 const ItemType* Front()
const {
return m_pFront; }
+ 5573 ItemType* Back() {
return m_pBack; }
+ 5574 const ItemType* Back()
const {
return m_pBack; }
+
+ 5576 ItemType* PushBack();
+ 5577 ItemType* PushFront();
+ 5578 ItemType* PushBack(
const T& value);
+ 5579 ItemType* PushFront(
const T& value);
+
+
+
+
+ 5584 ItemType* InsertBefore(ItemType* pItem);
+
+ 5586 ItemType* InsertAfter(ItemType* pItem);
+
+ 5588 ItemType* InsertBefore(ItemType* pItem,
const T& value);
+ 5589 ItemType* InsertAfter(ItemType* pItem,
const T& value);
+
+ 5591 void Remove(ItemType* pItem);
+
+
+ 5594 const VkAllocationCallbacks*
const m_pAllocationCallbacks;
+ 5595 VmaPoolAllocator<ItemType> m_ItemAllocator;
+
+
+
+
+
+ 5601 template<
typename T>
+ 5602 VmaRawList<T>::VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks) :
+ 5603 m_pAllocationCallbacks(pAllocationCallbacks),
+ 5604 m_ItemAllocator(pAllocationCallbacks, 128),
+
+
+
+
+
+
+ 5611 template<
typename T>
+ 5612 VmaRawList<T>::~VmaRawList()
+
+
+
+
+
+ 5618 template<
typename T>
+ 5619 void VmaRawList<T>::Clear()
+
+ 5621 if(IsEmpty() ==
false)
+
+ 5623 ItemType* pItem = m_pBack;
+ 5624 while(pItem != VMA_NULL)
+
+ 5626 ItemType*
const pPrevItem = pItem->pPrev;
+ 5627 m_ItemAllocator.Free(pItem);
+
+
+ 5630 m_pFront = VMA_NULL;
+
+
+
+
+
+ 5636 template<
typename T>
+ 5637 VmaListItem<T>* VmaRawList<T>::PushBack()
+
+ 5639 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
+ 5640 pNewItem->pNext = VMA_NULL;
+
+
+ 5643 pNewItem->pPrev = VMA_NULL;
+ 5644 m_pFront = pNewItem;
+
+
+
+
+
+ 5650 pNewItem->pPrev = m_pBack;
+ 5651 m_pBack->pNext = pNewItem;
+
+
+
+
+
+
+ 5658 template<
typename T>
+ 5659 VmaListItem<T>* VmaRawList<T>::PushFront()
+
+ 5661 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
+ 5662 pNewItem->pPrev = VMA_NULL;
+
+
+ 5665 pNewItem->pNext = VMA_NULL;
+ 5666 m_pFront = pNewItem;
+
+
+
+
+
+ 5672 pNewItem->pNext = m_pFront;
+ 5673 m_pFront->pPrev = pNewItem;
+ 5674 m_pFront = pNewItem;
+
+
+
+
+
+ 5680 template<
typename T>
+ 5681 VmaListItem<T>* VmaRawList<T>::PushBack(
const T& value)
+
+ 5683 ItemType*
const pNewItem = PushBack();
+ 5684 pNewItem->Value = value;
+
+
+
+ 5688 template<
typename T>
+ 5689 VmaListItem<T>* VmaRawList<T>::PushFront(
const T& value)
+
+ 5691 ItemType*
const pNewItem = PushFront();
+ 5692 pNewItem->Value = value;
+
+
+
+ 5696 template<
typename T>
+ 5697 void VmaRawList<T>::PopBack()
+
+ 5699 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5700 ItemType*
const pBackItem = m_pBack;
+ 5701 ItemType*
const pPrevItem = pBackItem->pPrev;
+ 5702 if(pPrevItem != VMA_NULL)
+
+ 5704 pPrevItem->pNext = VMA_NULL;
+
+ 5706 m_pBack = pPrevItem;
+ 5707 m_ItemAllocator.Free(pBackItem);
+
+
+
+ 5711 template<
typename T>
+ 5712 void VmaRawList<T>::PopFront()
+
+ 5714 VMA_HEAVY_ASSERT(m_Count > 0);
+ 5715 ItemType*
const pFrontItem = m_pFront;
+ 5716 ItemType*
const pNextItem = pFrontItem->pNext;
+ 5717 if(pNextItem != VMA_NULL)
+
+ 5719 pNextItem->pPrev = VMA_NULL;
+
+ 5721 m_pFront = pNextItem;
+ 5722 m_ItemAllocator.Free(pFrontItem);
+
+
+
+ 5726 template<
typename T>
+ 5727 void VmaRawList<T>::Remove(ItemType* pItem)
+
+ 5729 VMA_HEAVY_ASSERT(pItem != VMA_NULL);
+ 5730 VMA_HEAVY_ASSERT(m_Count > 0);
+
+ 5732 if(pItem->pPrev != VMA_NULL)
+
+ 5734 pItem->pPrev->pNext = pItem->pNext;
+
+
- 5738 pItem->pPrev->pNext = pItem->pNext;
-
-
-
- 5742 VMA_HEAVY_ASSERT(m_pFront == pItem);
- 5743 m_pFront = pItem->pNext;
-
-
- 5746 if(pItem->pNext != VMA_NULL)
+ 5738 VMA_HEAVY_ASSERT(m_pFront == pItem);
+ 5739 m_pFront = pItem->pNext;
+
+
+ 5742 if(pItem->pNext != VMA_NULL)
+
+ 5744 pItem->pNext->pPrev = pItem->pPrev;
+
+
- 5748 pItem->pNext->pPrev = pItem->pPrev;
-
-
-
- 5752 VMA_HEAVY_ASSERT(m_pBack == pItem);
- 5753 m_pBack = pItem->pPrev;
-
+ 5748 VMA_HEAVY_ASSERT(m_pBack == pItem);
+ 5749 m_pBack = pItem->pPrev;
+
+
+ 5752 m_ItemAllocator.Free(pItem);
+
+
- 5756 m_ItemAllocator.Free(pItem);
-
-
-
- 5760 template<
typename T>
- 5761 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
-
- 5763 if(pItem != VMA_NULL)
-
- 5765 ItemType*
const prevItem = pItem->pPrev;
- 5766 ItemType*
const newItem = m_ItemAllocator.Alloc();
- 5767 newItem->pPrev = prevItem;
- 5768 newItem->pNext = pItem;
- 5769 pItem->pPrev = newItem;
- 5770 if(prevItem != VMA_NULL)
+ 5756 template<
typename T>
+ 5757 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
+
+ 5759 if(pItem != VMA_NULL)
+
+ 5761 ItemType*
const prevItem = pItem->pPrev;
+ 5762 ItemType*
const newItem = m_ItemAllocator.Alloc();
+ 5763 newItem->pPrev = prevItem;
+ 5764 newItem->pNext = pItem;
+ 5765 pItem->pPrev = newItem;
+ 5766 if(prevItem != VMA_NULL)
+
+ 5768 prevItem->pNext = newItem;
+
+
- 5772 prevItem->pNext = newItem;
-
-
-
- 5776 VMA_HEAVY_ASSERT(m_pFront == pItem);
-
-
-
-
-
-
-
-
-
- 5786 template<
typename T>
- 5787 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
-
- 5789 if(pItem != VMA_NULL)
-
- 5791 ItemType*
const nextItem = pItem->pNext;
- 5792 ItemType*
const newItem = m_ItemAllocator.Alloc();
- 5793 newItem->pNext = nextItem;
- 5794 newItem->pPrev = pItem;
- 5795 pItem->pNext = newItem;
- 5796 if(nextItem != VMA_NULL)
+ 5772 VMA_HEAVY_ASSERT(m_pFront == pItem);
+
+
+
+
+
+
+
+
+
+ 5782 template<
typename T>
+ 5783 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
+
+ 5785 if(pItem != VMA_NULL)
+
+ 5787 ItemType*
const nextItem = pItem->pNext;
+ 5788 ItemType*
const newItem = m_ItemAllocator.Alloc();
+ 5789 newItem->pNext = nextItem;
+ 5790 newItem->pPrev = pItem;
+ 5791 pItem->pNext = newItem;
+ 5792 if(nextItem != VMA_NULL)
+
+ 5794 nextItem->pPrev = newItem;
+
+
- 5798 nextItem->pPrev = newItem;
-
-
-
- 5802 VMA_HEAVY_ASSERT(m_pBack == pItem);
-
-
-
-
-
-
-
-
-
- 5812 template<
typename T>
- 5813 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem,
const T& value)
-
- 5815 ItemType*
const newItem = InsertBefore(pItem);
- 5816 newItem->Value = value;
-
-
-
- 5820 template<
typename T>
- 5821 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem,
const T& value)
-
- 5823 ItemType*
const newItem = InsertAfter(pItem);
- 5824 newItem->Value = value;
-
-
-
- 5828 template<
typename T,
typename AllocatorT>
-
-
- 5831 VMA_CLASS_NO_COPY(VmaList)
-
-
-
-
-
-
-
-
-
-
- 5842 T& operator*()
const
-
- 5844 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5845 return m_pItem->Value;
-
- 5847 T* operator->()
const
-
- 5849 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5850 return &m_pItem->Value;
-
-
- 5853 iterator& operator++()
-
- 5855 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5856 m_pItem = m_pItem->pNext;
-
-
- 5859 iterator& operator--()
-
- 5861 if(m_pItem != VMA_NULL)
+ 5798 VMA_HEAVY_ASSERT(m_pBack == pItem);
+
+
+
+
+
+
+
+
+
+ 5808 template<
typename T>
+ 5809 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem,
const T& value)
+
+ 5811 ItemType*
const newItem = InsertBefore(pItem);
+ 5812 newItem->Value = value;
+
+
+
+ 5816 template<
typename T>
+ 5817 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem,
const T& value)
+
+ 5819 ItemType*
const newItem = InsertAfter(pItem);
+ 5820 newItem->Value = value;
+
+
+
+ 5824 template<
typename T,
typename AllocatorT>
+
+
+ 5827 VMA_CLASS_NO_COPY(VmaList)
+
+
+
+
+
+
+
+
+
+
+ 5838 T& operator*()
const
+
+ 5840 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5841 return m_pItem->Value;
+
+ 5843 T* operator->()
const
+
+ 5845 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5846 return &m_pItem->Value;
+
+
+ 5849 iterator& operator++()
+
+ 5851 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5852 m_pItem = m_pItem->pNext;
+
+
+ 5855 iterator& operator--()
+
+ 5857 if(m_pItem != VMA_NULL)
+
+ 5859 m_pItem = m_pItem->pPrev;
+
+
- 5863 m_pItem = m_pItem->pPrev;
-
-
-
- 5867 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
- 5868 m_pItem = m_pList->Back();
-
-
-
-
- 5873 iterator operator++(
int)
-
- 5875 iterator result = *
this;
-
-
-
- 5879 iterator operator--(
int)
-
- 5881 iterator result = *
this;
-
-
-
-
- 5886 bool operator==(
const iterator& rhs)
const
-
- 5888 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
- 5889 return m_pItem == rhs.m_pItem;
-
- 5891 bool operator!=(
const iterator& rhs)
const
-
- 5893 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
- 5894 return m_pItem != rhs.m_pItem;
-
+ 5863 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ 5864 m_pItem = m_pList->Back();
+
+
+
+
+ 5869 iterator operator++(
int)
+
+ 5871 iterator result = *
this;
+
+
+
+ 5875 iterator operator--(
int)
+
+ 5877 iterator result = *
this;
+
+
+
+
+ 5882 bool operator==(
const iterator& rhs)
const
+
+ 5884 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ 5885 return m_pItem == rhs.m_pItem;
+
+ 5887 bool operator!=(
const iterator& rhs)
const
+
+ 5889 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ 5890 return m_pItem != rhs.m_pItem;
+
+
+
+ 5894 VmaRawList<T>* m_pList;
+ 5895 VmaListItem<T>* m_pItem;
-
- 5898 VmaRawList<T>* m_pList;
- 5899 VmaListItem<T>* m_pItem;
-
- 5901 iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
-
-
-
-
-
- 5907 friend class VmaList<T, AllocatorT>;
-
-
- 5910 class const_iterator
-
-
-
-
-
-
-
-
- 5919 const_iterator(
const iterator& src) :
- 5920 m_pList(src.m_pList),
- 5921 m_pItem(src.m_pItem)
-
-
-
- 5925 const T& operator*()
const
-
- 5927 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5928 return m_pItem->Value;
-
- 5930 const T* operator->()
const
-
- 5932 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5933 return &m_pItem->Value;
-
-
- 5936 const_iterator& operator++()
-
- 5938 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
- 5939 m_pItem = m_pItem->pNext;
-
-
- 5942 const_iterator& operator--()
-
- 5944 if(m_pItem != VMA_NULL)
+ 5897 iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
+
+
+
+
+
+ 5903 friend class VmaList<T, AllocatorT>;
+
+
+ 5906 class const_iterator
+
+
+
+
+
+
+
+
+ 5915 const_iterator(
const iterator& src) :
+ 5916 m_pList(src.m_pList),
+ 5917 m_pItem(src.m_pItem)
+
+
+
+ 5921 const T& operator*()
const
+
+ 5923 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5924 return m_pItem->Value;
+
+ 5926 const T* operator->()
const
+
+ 5928 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5929 return &m_pItem->Value;
+
+
+ 5932 const_iterator& operator++()
+
+ 5934 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ 5935 m_pItem = m_pItem->pNext;
+
+
+ 5938 const_iterator& operator--()
+
+ 5940 if(m_pItem != VMA_NULL)
+
+ 5942 m_pItem = m_pItem->pPrev;
+
+
- 5946 m_pItem = m_pItem->pPrev;
-
-
-
- 5950 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
- 5951 m_pItem = m_pList->Back();
-
-
-
-
- 5956 const_iterator operator++(
int)
-
- 5958 const_iterator result = *
this;
-
-
-
- 5962 const_iterator operator--(
int)
-
- 5964 const_iterator result = *
this;
-
-
-
-
- 5969 bool operator==(
const const_iterator& rhs)
const
-
- 5971 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
- 5972 return m_pItem == rhs.m_pItem;
-
- 5974 bool operator!=(
const const_iterator& rhs)
const
-
- 5976 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
- 5977 return m_pItem != rhs.m_pItem;
-
-
-
- 5981 const_iterator(
const VmaRawList<T>* pList,
const VmaListItem<T>* pItem) :
-
-
-
-
-
- 5987 const VmaRawList<T>* m_pList;
- 5988 const VmaListItem<T>* m_pItem;
-
- 5990 friend class VmaList<T, AllocatorT>;
-
-
- 5993 VmaList(
const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
-
- 5995 bool empty()
const {
return m_RawList.IsEmpty(); }
- 5996 size_t size()
const {
return m_RawList.GetCount(); }
-
- 5998 iterator begin() {
return iterator(&m_RawList, m_RawList.Front()); }
- 5999 iterator end() {
return iterator(&m_RawList, VMA_NULL); }
-
- 6001 const_iterator cbegin()
const {
return const_iterator(&m_RawList, m_RawList.Front()); }
- 6002 const_iterator cend()
const {
return const_iterator(&m_RawList, VMA_NULL); }
-
- 6004 void clear() { m_RawList.Clear(); }
- 6005 void push_back(
const T& value) { m_RawList.PushBack(value); }
- 6006 void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
- 6007 iterator insert(iterator it,
const T& value) {
return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+ 5946 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ 5947 m_pItem = m_pList->Back();
+
+
+
+
+ 5952 const_iterator operator++(
int)
+
+ 5954 const_iterator result = *
this;
+
+
+
+ 5958 const_iterator operator--(
int)
+
+ 5960 const_iterator result = *
this;
+
+
+
+
+ 5965 bool operator==(
const const_iterator& rhs)
const
+
+ 5967 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ 5968 return m_pItem == rhs.m_pItem;
+
+ 5970 bool operator!=(
const const_iterator& rhs)
const
+
+ 5972 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ 5973 return m_pItem != rhs.m_pItem;
+
+
+
+ 5977 const_iterator(
const VmaRawList<T>* pList,
const VmaListItem<T>* pItem) :
+
+
+
+
+
+ 5983 const VmaRawList<T>* m_pList;
+ 5984 const VmaListItem<T>* m_pItem;
+
+ 5986 friend class VmaList<T, AllocatorT>;
+
+
+ 5989 VmaList(
const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
+
+ 5991 bool empty()
const {
return m_RawList.IsEmpty(); }
+ 5992 size_t size()
const {
return m_RawList.GetCount(); }
+
+ 5994 iterator begin() {
return iterator(&m_RawList, m_RawList.Front()); }
+ 5995 iterator end() {
return iterator(&m_RawList, VMA_NULL); }
+
+ 5997 const_iterator cbegin()
const {
return const_iterator(&m_RawList, m_RawList.Front()); }
+ 5998 const_iterator cend()
const {
return const_iterator(&m_RawList, VMA_NULL); }
+
+ 6000 void clear() { m_RawList.Clear(); }
+ 6001 void push_back(
const T& value) { m_RawList.PushBack(value); }
+ 6002 void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
+ 6003 iterator insert(iterator it,
const T& value) {
return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+
+
+ 6006 VmaRawList<T> m_RawList;
+
-
- 6010 VmaRawList<T> m_RawList;
-
-
-
-
-
-
-
-
-
- 6021 #if VMA_USE_STL_UNORDERED_MAP
-
- 6023 #define VmaPair std::pair
-
- 6025 #define VMA_MAP_TYPE(KeyT, ValueT) \
- 6026 std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
-
-
-
- 6030 template<
typename T1,
typename T2>
-
-
-
-
-
- 6036 VmaPair() : first(), second() { }
- 6037 VmaPair(
const T1& firstSrc,
const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
-
-
-
-
-
- 6043 template<
typename KeyT,
typename ValueT>
-
-
-
- 6047 typedef VmaPair<KeyT, ValueT> PairType;
- 6048 typedef PairType* iterator;
-
- 6050 VmaMap(
const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
-
- 6052 iterator begin() {
return m_Vector.begin(); }
- 6053 iterator end() {
return m_Vector.end(); }
-
- 6055 void insert(
const PairType& pair);
- 6056 iterator find(
const KeyT& key);
- 6057 void erase(iterator it);
-
-
- 6060 VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
-
-
- 6063 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
-
- 6065 template<
typename FirstT,
typename SecondT>
- 6066 struct VmaPairFirstLess
-
- 6068 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const VmaPair<FirstT, SecondT>& rhs)
const
-
- 6070 return lhs.first < rhs.first;
-
- 6072 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const FirstT& rhsFirst)
const
-
- 6074 return lhs.first < rhsFirst;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 6025 template<
typename ItemTypeTraits>
+ 6026 class VmaIntrusiveLinkedList
+
+
+ 6029 typedef typename ItemTypeTraits::ItemType ItemType;
+ 6030 static ItemType* GetPrev(
const ItemType* item) {
return ItemTypeTraits::GetPrev(item); }
+ 6031 static ItemType* GetNext(
const ItemType* item) {
return ItemTypeTraits::GetNext(item); }
+
+ 6033 VmaIntrusiveLinkedList() { }
+ 6034 VmaIntrusiveLinkedList(
const VmaIntrusiveLinkedList<ItemTypeTraits>& src) =
delete;
+ 6035 VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
+ 6036 m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
+
+ 6038 src.m_Front = src.m_Back = VMA_NULL;
+
+
+ 6041 ~VmaIntrusiveLinkedList()
+
+ 6043 VMA_HEAVY_ASSERT(IsEmpty());
+
+ 6045 VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(
const VmaIntrusiveLinkedList<ItemTypeTraits>& src) =
delete;
+ 6046 VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
+
+
+
+ 6050 VMA_HEAVY_ASSERT(IsEmpty());
+ 6051 m_Front = src.m_Front;
+ 6052 m_Back = src.m_Back;
+ 6053 m_Count = src.m_Count;
+ 6054 src.m_Front = src.m_Back = VMA_NULL;
+
+
+
+
+
+
+
+
+ 6063 ItemType* item = m_Back;
+ 6064 while(item != VMA_NULL)
+
+ 6066 ItemType*
const prevItem = ItemTypeTraits::AccessPrev(item);
+ 6067 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ 6068 ItemTypeTraits::AccessNext(item) = VMA_NULL;
+
+
+
+
+
+
-
-
- 6078 template<
typename KeyT,
typename ValueT>
- 6079 void VmaMap<KeyT, ValueT>::insert(
const PairType& pair)
-
- 6081 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-
- 6083 m_Vector.data() + m_Vector.size(),
-
- 6085 VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
- 6086 VmaVectorInsert(m_Vector, indexToInsert, pair);
-
-
- 6089 template<
typename KeyT,
typename ValueT>
- 6090 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(
const KeyT& key)
-
- 6092 PairType* it = VmaBinaryFindFirstNotLess(
-
- 6094 m_Vector.data() + m_Vector.size(),
-
- 6096 VmaPairFirstLess<KeyT, ValueT>());
- 6097 if((it != m_Vector.end()) && (it->first == key))
-
-
-
-
-
- 6103 return m_Vector.end();
-
-
-
- 6107 template<
typename KeyT,
typename ValueT>
- 6108 void VmaMap<KeyT, ValueT>::erase(iterator it)
-
- 6110 VmaVectorRemove(m_Vector, it - m_Vector.begin());
-
-
-
-
-
-
-
- 6119 class VmaDeviceMemoryBlock;
-
- 6121 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
-
- 6123 struct VmaAllocation_T
-
-
- 6126 static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
-
-
-
- 6130 FLAG_USER_DATA_STRING = 0x01,
-
-
-
- 6134 enum ALLOCATION_TYPE
-
- 6136 ALLOCATION_TYPE_NONE,
- 6137 ALLOCATION_TYPE_BLOCK,
- 6138 ALLOCATION_TYPE_DEDICATED,
-
-
-
-
-
-
- 6145 VmaAllocation_T(uint32_t currentFrameIndex,
bool userDataString) :
-
-
- 6148 m_pUserData{VMA_NULL},
- 6149 m_LastUseFrameIndex{currentFrameIndex},
- 6150 m_MemoryTypeIndex{0},
- 6151 m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
- 6152 m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
-
- 6154 m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
-
- 6156 #if VMA_STATS_STRING_ENABLED
- 6157 m_CreationFrameIndex = currentFrameIndex;
- 6158 m_BufferImageUsage = 0;
-
-
-
-
-
- 6164 VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 &&
"Allocation was not unmapped before destruction.");
-
-
- 6167 VMA_ASSERT(m_pUserData == VMA_NULL);
-
-
- 6170 void InitBlockAllocation(
- 6171 VmaDeviceMemoryBlock* block,
- 6172 VkDeviceSize offset,
- 6173 VkDeviceSize alignment,
-
- 6175 uint32_t memoryTypeIndex,
- 6176 VmaSuballocationType suballocationType,
-
-
-
- 6180 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
- 6181 VMA_ASSERT(block != VMA_NULL);
- 6182 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
- 6183 m_Alignment = alignment;
-
- 6185 m_MemoryTypeIndex = memoryTypeIndex;
- 6186 m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
- 6187 m_SuballocationType = (uint8_t)suballocationType;
- 6188 m_BlockAllocation.m_Block = block;
- 6189 m_BlockAllocation.m_Offset = offset;
- 6190 m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
-
-
-
-
- 6195 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
- 6196 VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
- 6197 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
- 6198 m_MemoryTypeIndex = 0;
- 6199 m_BlockAllocation.m_Block = VMA_NULL;
- 6200 m_BlockAllocation.m_Offset = 0;
- 6201 m_BlockAllocation.m_CanBecomeLost =
true;
-
-
- 6204 void ChangeBlockAllocation(
-
- 6206 VmaDeviceMemoryBlock* block,
- 6207 VkDeviceSize offset);
-
- 6209 void ChangeOffset(VkDeviceSize newOffset);
-
-
- 6212 void InitDedicatedAllocation(
- 6213 uint32_t memoryTypeIndex,
- 6214 VkDeviceMemory hMemory,
- 6215 VmaSuballocationType suballocationType,
-
-
-
- 6219 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
- 6220 VMA_ASSERT(hMemory != VK_NULL_HANDLE);
- 6221 m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
-
-
- 6224 m_MemoryTypeIndex = memoryTypeIndex;
- 6225 m_SuballocationType = (uint8_t)suballocationType;
- 6226 m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
- 6227 m_DedicatedAllocation.m_hMemory = hMemory;
- 6228 m_DedicatedAllocation.m_pMappedData = pMappedData;
-
-
- 6231 ALLOCATION_TYPE GetType()
const {
return (ALLOCATION_TYPE)m_Type; }
- 6232 VkDeviceSize GetAlignment()
const {
return m_Alignment; }
- 6233 VkDeviceSize GetSize()
const {
return m_Size; }
- 6234 bool IsUserDataString()
const {
return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
- 6235 void* GetUserData()
const {
return m_pUserData; }
- 6236 void SetUserData(
VmaAllocator hAllocator,
void* pUserData);
- 6237 VmaSuballocationType GetSuballocationType()
const {
return (VmaSuballocationType)m_SuballocationType; }
-
- 6239 VmaDeviceMemoryBlock* GetBlock()
const
-
- 6241 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
- 6242 return m_BlockAllocation.m_Block;
-
- 6244 VkDeviceSize GetOffset()
const;
- 6245 VkDeviceMemory GetMemory()
const;
- 6246 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
- 6247 bool IsPersistentMap()
const {
return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
- 6248 void* GetMappedData()
const;
- 6249 bool CanBecomeLost()
const;
-
- 6251 uint32_t GetLastUseFrameIndex()
const
-
- 6253 return m_LastUseFrameIndex.load();
-
- 6255 bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
-
- 6257 return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
-
-
-
-
-
-
-
-
-
- 6267 bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
- 6269 void DedicatedAllocCalcStatsInfo(
VmaStatInfo& outInfo)
-
- 6271 VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
-
-
-
-
-
-
-
-
-
-
- 6282 void BlockAllocMap();
- 6283 void BlockAllocUnmap();
- 6284 VkResult DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData);
-
-
- 6287 #if VMA_STATS_STRING_ENABLED
- 6288 uint32_t GetCreationFrameIndex()
const {
return m_CreationFrameIndex; }
- 6289 uint32_t GetBufferImageUsage()
const {
return m_BufferImageUsage; }
-
- 6291 void InitBufferImageUsage(uint32_t bufferImageUsage)
-
- 6293 VMA_ASSERT(m_BufferImageUsage == 0);
- 6294 m_BufferImageUsage = bufferImageUsage;
-
-
- 6297 void PrintParameters(
class VmaJsonWriter& json)
const;
-
-
-
- 6301 VkDeviceSize m_Alignment;
- 6302 VkDeviceSize m_Size;
-
- 6304 VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
- 6305 uint32_t m_MemoryTypeIndex;
-
- 6307 uint8_t m_SuballocationType;
-
-
-
-
-
-
- 6314 struct BlockAllocation
-
- 6316 VmaDeviceMemoryBlock* m_Block;
- 6317 VkDeviceSize m_Offset;
- 6318 bool m_CanBecomeLost;
-
-
-
- 6322 struct DedicatedAllocation
-
- 6324 VkDeviceMemory m_hMemory;
- 6325 void* m_pMappedData;
-
-
-
-
-
- 6331 BlockAllocation m_BlockAllocation;
-
- 6333 DedicatedAllocation m_DedicatedAllocation;
-
-
- 6336 #if VMA_STATS_STRING_ENABLED
- 6337 uint32_t m_CreationFrameIndex;
- 6338 uint32_t m_BufferImageUsage;
-
-
-
-
-
-
-
-
-
- 6348 struct VmaSuballocation
-
- 6350 VkDeviceSize offset;
-
-
- 6353 VmaSuballocationType type;
-
-
-
- 6357 struct VmaSuballocationOffsetLess
-
- 6359 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const
-
- 6361 return lhs.offset < rhs.offset;
-
-
- 6364 struct VmaSuballocationOffsetGreater
-
- 6366 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const
-
- 6368 return lhs.offset > rhs.offset;
-
-
-
- 6372 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
+ 6076 size_t GetCount()
const {
return m_Count; }
+ 6077 bool IsEmpty()
const {
return m_Count == 0; }
+ 6078 ItemType* Front() {
return m_Front; }
+ 6079 const ItemType* Front()
const {
return m_Front; }
+ 6080 ItemType* Back() {
return m_Back; }
+ 6081 const ItemType* Back()
const {
return m_Back; }
+ 6082 void PushBack(ItemType* item)
+
+ 6084 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+
+
+
+
+
+
+
+
+ 6093 ItemTypeTraits::AccessPrev(item) = m_Back;
+ 6094 ItemTypeTraits::AccessNext(m_Back) = item;
+
+
+
+
+ 6099 void PushFront(ItemType* item)
+
+ 6101 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+
+
+
+
+
+
+
+
+ 6110 ItemTypeTraits::AccessNext(item) = m_Front;
+ 6111 ItemTypeTraits::AccessPrev(m_Front) = item;
+
+
+
+
+
+
+ 6118 VMA_HEAVY_ASSERT(m_Count > 0);
+ 6119 ItemType*
const backItem = m_Back;
+ 6120 ItemType*
const prevItem = ItemTypeTraits::GetPrev(backItem);
+ 6121 if(prevItem != VMA_NULL)
+
+ 6123 ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
+
+
+
+ 6127 ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
+ 6128 ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
+
+
+ 6131 ItemType* PopFront()
+
+ 6133 VMA_HEAVY_ASSERT(m_Count > 0);
+ 6134 ItemType*
const frontItem = m_Front;
+ 6135 ItemType*
const nextItem = ItemTypeTraits::GetNext(frontItem);
+ 6136 if(nextItem != VMA_NULL)
+
+ 6138 ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
+
+
+
+ 6142 ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
+ 6143 ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
+
+
+
+
+ 6148 void InsertBefore(ItemType* existingItem, ItemType* newItem)
+
+ 6150 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ 6151 if(existingItem != VMA_NULL)
+
+ 6153 ItemType*
const prevItem = ItemTypeTraits::GetPrev(existingItem);
+ 6154 ItemTypeTraits::AccessPrev(newItem) = prevItem;
+ 6155 ItemTypeTraits::AccessNext(newItem) = existingItem;
+ 6156 ItemTypeTraits::AccessPrev(existingItem) = newItem;
+ 6157 if(prevItem != VMA_NULL)
+
+ 6159 ItemTypeTraits::AccessNext(prevItem) = newItem;
+
+
+
+ 6163 VMA_HEAVY_ASSERT(m_Front == existingItem);
+
+
+
+
+
+
+
+
+ 6172 void InsertAfter(ItemType* existingItem, ItemType* newItem)
+
+ 6174 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ 6175 if(existingItem != VMA_NULL)
+
+ 6177 ItemType*
const nextItem = ItemTypeTraits::GetNext(existingItem);
+ 6178 ItemTypeTraits::AccessNext(newItem) = nextItem;
+ 6179 ItemTypeTraits::AccessPrev(newItem) = existingItem;
+ 6180 ItemTypeTraits::AccessNext(existingItem) = newItem;
+ 6181 if(nextItem != VMA_NULL)
+
+ 6183 ItemTypeTraits::AccessPrev(nextItem) = newItem;
+
+
+
+ 6187 VMA_HEAVY_ASSERT(m_Back == existingItem);
+
+
+
+
+
+ 6193 return PushFront(newItem);
+
+ 6195 void Remove(ItemType* item)
+
+ 6197 VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
+ 6198 if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
+
+ 6200 ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
+
+
+
+ 6204 VMA_HEAVY_ASSERT(m_Front == item);
+ 6205 m_Front = ItemTypeTraits::GetNext(item);
+
+
+ 6208 if(ItemTypeTraits::GetNext(item) != VMA_NULL)
+
+ 6210 ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
+
+
+
+ 6214 VMA_HEAVY_ASSERT(m_Back == item);
+ 6215 m_Back = ItemTypeTraits::GetPrev(item);
+
+ 6217 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ 6218 ItemTypeTraits::AccessNext(item) = VMA_NULL;
+
+
+
+ 6222 ItemType* m_Front = VMA_NULL;
+ 6223 ItemType* m_Back = VMA_NULL;
+
+
+
+
+
+
+
+
+ 6233 #if VMA_USE_STL_UNORDERED_MAP
+
+ 6235 #define VmaPair std::pair
+
+ 6237 #define VMA_MAP_TYPE(KeyT, ValueT) \
+ 6238 std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
+
+
+
+ 6242 template<
typename T1,
typename T2>
+
+
+
+
+
+ 6248 VmaPair() : first(), second() { }
+ 6249 VmaPair(
const T1& firstSrc,
const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
+
+
+
+
+
+ 6255 template<
typename KeyT,
typename ValueT>
+
+
+
+ 6259 typedef VmaPair<KeyT, ValueT> PairType;
+ 6260 typedef PairType* iterator;
+
+ 6262 VmaMap(
const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
+
+ 6264 iterator begin() {
return m_Vector.begin(); }
+ 6265 iterator end() {
return m_Vector.end(); }
+
+ 6267 void insert(
const PairType& pair);
+ 6268 iterator find(
const KeyT& key);
+ 6269 void erase(iterator it);
+
+
+ 6272 VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
+
+
+ 6275 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
+
+ 6277 template<
typename FirstT,
typename SecondT>
+ 6278 struct VmaPairFirstLess
+
+ 6280 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const VmaPair<FirstT, SecondT>& rhs)
const
+
+ 6282 return lhs.first < rhs.first;
+
+ 6284 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const FirstT& rhsFirst)
const
+
+ 6286 return lhs.first < rhsFirst;
+
+
+
+ 6290 template<
typename KeyT,
typename ValueT>
+ 6291 void VmaMap<KeyT, ValueT>::insert(
const PairType& pair)
+
+ 6293 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+
+ 6295 m_Vector.data() + m_Vector.size(),
+
+ 6297 VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
+ 6298 VmaVectorInsert(m_Vector, indexToInsert, pair);
+
+
+ 6301 template<
typename KeyT,
typename ValueT>
+ 6302 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(
const KeyT& key)
+
+ 6304 PairType* it = VmaBinaryFindFirstNotLess(
+
+ 6306 m_Vector.data() + m_Vector.size(),
+
+ 6308 VmaPairFirstLess<KeyT, ValueT>());
+ 6309 if((it != m_Vector.end()) && (it->first == key))
+
+
+
+
+
+ 6315 return m_Vector.end();
+
+
+
+ 6319 template<
typename KeyT,
typename ValueT>
+ 6320 void VmaMap<KeyT, ValueT>::erase(iterator it)
+
+ 6322 VmaVectorRemove(m_Vector, it - m_Vector.begin());
+
+
+
+
+
+
+
+ 6331 class VmaDeviceMemoryBlock;
+
+ 6333 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
+
+ 6335 struct VmaAllocation_T
+
+
+ 6338 static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
+
+
+
+ 6342 FLAG_USER_DATA_STRING = 0x01,
+
+
+
+ 6346 enum ALLOCATION_TYPE
+
+ 6348 ALLOCATION_TYPE_NONE,
+ 6349 ALLOCATION_TYPE_BLOCK,
+ 6350 ALLOCATION_TYPE_DEDICATED,
+
+
+
+
+
+
+ 6357 VmaAllocation_T(uint32_t currentFrameIndex,
bool userDataString) :
+
+
+ 6360 m_pUserData{VMA_NULL},
+ 6361 m_LastUseFrameIndex{currentFrameIndex},
+ 6362 m_MemoryTypeIndex{0},
+ 6363 m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
+ 6364 m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
+
+ 6366 m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
+
+ 6368 #if VMA_STATS_STRING_ENABLED
+ 6369 m_CreationFrameIndex = currentFrameIndex;
+ 6370 m_BufferImageUsage = 0;
+
+
-
- 6375 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
-
- 6377 enum class VmaAllocationRequestType
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 6399 struct VmaAllocationRequest
-
- 6401 VkDeviceSize offset;
- 6402 VkDeviceSize sumFreeSize;
- 6403 VkDeviceSize sumItemSize;
- 6404 VmaSuballocationList::iterator item;
- 6405 size_t itemsToMakeLostCount;
-
- 6407 VmaAllocationRequestType type;
-
- 6409 VkDeviceSize CalcCost()
const
-
- 6411 return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
-
-
-
-
-
-
-
- 6419 class VmaBlockMetadata
-
-
-
- 6423 virtual ~VmaBlockMetadata() { }
- 6424 virtual void Init(VkDeviceSize size) { m_Size = size; }
-
-
- 6427 virtual bool Validate()
const = 0;
- 6428 VkDeviceSize GetSize()
const {
return m_Size; }
- 6429 virtual size_t GetAllocationCount()
const = 0;
- 6430 virtual VkDeviceSize GetSumFreeSize()
const = 0;
- 6431 virtual VkDeviceSize GetUnusedRangeSizeMax()
const = 0;
-
- 6433 virtual bool IsEmpty()
const = 0;
-
- 6435 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const = 0;
-
- 6437 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const = 0;
-
- 6439 #if VMA_STATS_STRING_ENABLED
- 6440 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const = 0;
-
-
-
-
-
- 6446 virtual bool CreateAllocationRequest(
- 6447 uint32_t currentFrameIndex,
- 6448 uint32_t frameInUseCount,
- 6449 VkDeviceSize bufferImageGranularity,
- 6450 VkDeviceSize allocSize,
- 6451 VkDeviceSize allocAlignment,
-
- 6453 VmaSuballocationType allocType,
- 6454 bool canMakeOtherLost,
-
-
- 6457 VmaAllocationRequest* pAllocationRequest) = 0;
-
- 6459 virtual bool MakeRequestedAllocationsLost(
- 6460 uint32_t currentFrameIndex,
- 6461 uint32_t frameInUseCount,
- 6462 VmaAllocationRequest* pAllocationRequest) = 0;
-
- 6464 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
-
- 6466 virtual VkResult CheckCorruption(
const void* pBlockData) = 0;
-
-
-
- 6470 const VmaAllocationRequest& request,
- 6471 VmaSuballocationType type,
- 6472 VkDeviceSize allocSize,
-
-
-
-
- 6477 virtual void FreeAtOffset(VkDeviceSize offset) = 0;
-
-
- 6480 const VkAllocationCallbacks* GetAllocationCallbacks()
const {
return m_pAllocationCallbacks; }
-
- 6482 #if VMA_STATS_STRING_ENABLED
- 6483 void PrintDetailedMap_Begin(
class VmaJsonWriter& json,
- 6484 VkDeviceSize unusedBytes,
- 6485 size_t allocationCount,
- 6486 size_t unusedRangeCount)
const;
- 6487 void PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
- 6488 VkDeviceSize offset,
-
- 6490 void PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
- 6491 VkDeviceSize offset,
- 6492 VkDeviceSize size)
const;
- 6493 void PrintDetailedMap_End(
class VmaJsonWriter& json)
const;
-
+
+
+ 6376 VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 &&
"Allocation was not unmapped before destruction.");
+
+
+ 6379 VMA_ASSERT(m_pUserData == VMA_NULL);
+
+
+ 6382 void InitBlockAllocation(
+ 6383 VmaDeviceMemoryBlock* block,
+ 6384 VkDeviceSize offset,
+ 6385 VkDeviceSize alignment,
+
+ 6387 uint32_t memoryTypeIndex,
+ 6388 VmaSuballocationType suballocationType,
+
+
+
+ 6392 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ 6393 VMA_ASSERT(block != VMA_NULL);
+ 6394 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+ 6395 m_Alignment = alignment;
+
+ 6397 m_MemoryTypeIndex = memoryTypeIndex;
+ 6398 m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+ 6399 m_SuballocationType = (uint8_t)suballocationType;
+ 6400 m_BlockAllocation.m_Block = block;
+ 6401 m_BlockAllocation.m_Offset = offset;
+ 6402 m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
+
+
+
+
+ 6407 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ 6408 VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
+ 6409 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+ 6410 m_MemoryTypeIndex = 0;
+ 6411 m_BlockAllocation.m_Block = VMA_NULL;
+ 6412 m_BlockAllocation.m_Offset = 0;
+ 6413 m_BlockAllocation.m_CanBecomeLost =
true;
+
+
+ 6416 void ChangeBlockAllocation(
+
+ 6418 VmaDeviceMemoryBlock* block,
+ 6419 VkDeviceSize offset);
+
+ 6421 void ChangeOffset(VkDeviceSize newOffset);
+
+
+ 6424 void InitDedicatedAllocation(
+ 6425 uint32_t memoryTypeIndex,
+ 6426 VkDeviceMemory hMemory,
+ 6427 VmaSuballocationType suballocationType,
+
+
+
+ 6431 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ 6432 VMA_ASSERT(hMemory != VK_NULL_HANDLE);
+ 6433 m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
+
+
+ 6436 m_MemoryTypeIndex = memoryTypeIndex;
+ 6437 m_SuballocationType = (uint8_t)suballocationType;
+ 6438 m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+ 6439 m_DedicatedAllocation.m_hMemory = hMemory;
+ 6440 m_DedicatedAllocation.m_pMappedData = pMappedData;
+ 6441 m_DedicatedAllocation.m_Prev = VMA_NULL;
+ 6442 m_DedicatedAllocation.m_Next = VMA_NULL;
+
+
+ 6445 ALLOCATION_TYPE GetType()
const {
return (ALLOCATION_TYPE)m_Type; }
+ 6446 VkDeviceSize GetAlignment()
const {
return m_Alignment; }
+ 6447 VkDeviceSize GetSize()
const {
return m_Size; }
+ 6448 bool IsUserDataString()
const {
return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
+ 6449 void* GetUserData()
const {
return m_pUserData; }
+ 6450 void SetUserData(
VmaAllocator hAllocator,
void* pUserData);
+ 6451 VmaSuballocationType GetSuballocationType()
const {
return (VmaSuballocationType)m_SuballocationType; }
+
+ 6453 VmaDeviceMemoryBlock* GetBlock()
const
+
+ 6455 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+ 6456 return m_BlockAllocation.m_Block;
+
+ 6458 VkDeviceSize GetOffset()
const;
+ 6459 VkDeviceMemory GetMemory()
const;
+ 6460 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
+ 6461 bool IsPersistentMap()
const {
return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
+ 6462 void* GetMappedData()
const;
+ 6463 bool CanBecomeLost()
const;
+
+ 6465 uint32_t GetLastUseFrameIndex()
const
+
+ 6467 return m_LastUseFrameIndex.load();
+
+ 6469 bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
+
+ 6471 return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
+
+
+
+
+
+
+
+
+
+ 6481 bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ 6483 void DedicatedAllocCalcStatsInfo(
VmaStatInfo& outInfo)
+
+ 6485 VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
+
+
+
+
+
+
+
+
+
-
- 6497 VkDeviceSize m_Size;
- 6498 const VkAllocationCallbacks* m_pAllocationCallbacks;
-
+ 6496 void BlockAllocMap();
+ 6497 void BlockAllocUnmap();
+ 6498 VkResult DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData);
+
- 6501 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
- 6502 VMA_ASSERT(0 && "Validation failed: " #cond); \
-
-
-
- 6506 class VmaBlockMetadata_Generic :
public VmaBlockMetadata
-
- 6508 VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
-
-
- 6511 virtual ~VmaBlockMetadata_Generic();
- 6512 virtual void Init(VkDeviceSize size);
+ 6501 #if VMA_STATS_STRING_ENABLED
+ 6502 uint32_t GetCreationFrameIndex()
const {
return m_CreationFrameIndex; }
+ 6503 uint32_t GetBufferImageUsage()
const {
return m_BufferImageUsage; }
+
+ 6505 void InitBufferImageUsage(uint32_t bufferImageUsage)
+
+ 6507 VMA_ASSERT(m_BufferImageUsage == 0);
+ 6508 m_BufferImageUsage = bufferImageUsage;
+
+
+ 6511 void PrintParameters(
class VmaJsonWriter& json)
const;
+
- 6514 virtual bool Validate()
const;
- 6515 virtual size_t GetAllocationCount()
const {
return m_Suballocations.size() - m_FreeCount; }
- 6516 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
- 6517 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
- 6518 virtual bool IsEmpty()
const;
-
- 6520 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
- 6521 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
-
- 6523 #if VMA_STATS_STRING_ENABLED
- 6524 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
-
+
+ 6515 VkDeviceSize m_Alignment;
+ 6516 VkDeviceSize m_Size;
+
+ 6518 VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
+ 6519 uint32_t m_MemoryTypeIndex;
+
+ 6521 uint8_t m_SuballocationType;
+
+
+
+
- 6527 virtual bool CreateAllocationRequest(
- 6528 uint32_t currentFrameIndex,
- 6529 uint32_t frameInUseCount,
- 6530 VkDeviceSize bufferImageGranularity,
- 6531 VkDeviceSize allocSize,
- 6532 VkDeviceSize allocAlignment,
-
- 6534 VmaSuballocationType allocType,
- 6535 bool canMakeOtherLost,
-
- 6537 VmaAllocationRequest* pAllocationRequest);
-
- 6539 virtual bool MakeRequestedAllocationsLost(
- 6540 uint32_t currentFrameIndex,
- 6541 uint32_t frameInUseCount,
- 6542 VmaAllocationRequest* pAllocationRequest);
+
+ 6528 struct BlockAllocation
+
+ 6530 VmaDeviceMemoryBlock* m_Block;
+ 6531 VkDeviceSize m_Offset;
+ 6532 bool m_CanBecomeLost;
+
+
+
+ 6536 struct DedicatedAllocation
+
+ 6538 VkDeviceMemory m_hMemory;
+ 6539 void* m_pMappedData;
+ 6540 VmaAllocation_T* m_Prev;
+ 6541 VmaAllocation_T* m_Next;
+
- 6544 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
- 6546 virtual VkResult CheckCorruption(
const void* pBlockData);
-
-
- 6549 const VmaAllocationRequest& request,
- 6550 VmaSuballocationType type,
- 6551 VkDeviceSize allocSize,
-
-
-
- 6555 virtual void FreeAtOffset(VkDeviceSize offset);
+
+
+
+ 6547 BlockAllocation m_BlockAllocation;
+
+ 6549 DedicatedAllocation m_DedicatedAllocation;
+
+
+ 6552 #if VMA_STATS_STRING_ENABLED
+ 6553 uint32_t m_CreationFrameIndex;
+ 6554 uint32_t m_BufferImageUsage;
+
-
-
- 6560 bool IsBufferImageGranularityConflictPossible(
- 6561 VkDeviceSize bufferImageGranularity,
- 6562 VmaSuballocationType& inOutPrevSuballocType)
const;
-
-
- 6565 friend class VmaDefragmentationAlgorithm_Generic;
- 6566 friend class VmaDefragmentationAlgorithm_Fast;
-
- 6568 uint32_t m_FreeCount;
- 6569 VkDeviceSize m_SumFreeSize;
- 6570 VmaSuballocationList m_Suballocations;
-
-
- 6573 VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
-
- 6575 bool ValidateFreeSuballocationList()
const;
-
-
-
- 6579 bool CheckAllocation(
- 6580 uint32_t currentFrameIndex,
- 6581 uint32_t frameInUseCount,
- 6582 VkDeviceSize bufferImageGranularity,
- 6583 VkDeviceSize allocSize,
- 6584 VkDeviceSize allocAlignment,
- 6585 VmaSuballocationType allocType,
- 6586 VmaSuballocationList::const_iterator suballocItem,
- 6587 bool canMakeOtherLost,
- 6588 VkDeviceSize* pOffset,
- 6589 size_t* itemsToMakeLostCount,
- 6590 VkDeviceSize* pSumFreeSize,
- 6591 VkDeviceSize* pSumItemSize)
const;
-
- 6593 void MergeFreeWithNext(VmaSuballocationList::iterator item);
-
-
-
- 6597 VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
-
-
- 6600 void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
-
-
- 6603 void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+ 6559 friend struct VmaDedicatedAllocationListItemTraits;
+
+
+ 6562 struct VmaDedicatedAllocationListItemTraits
+
+ 6564 typedef VmaAllocation_T ItemType;
+ 6565 static ItemType* GetPrev(
const ItemType* item)
+
+ 6567 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ 6568 return item->m_DedicatedAllocation.m_Prev;
+
+ 6570 static ItemType* GetNext(
const ItemType* item)
+
+ 6572 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ 6573 return item->m_DedicatedAllocation.m_Next;
+
+ 6575 static ItemType*& AccessPrev(ItemType* item)
+
+ 6577 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ 6578 return item->m_DedicatedAllocation.m_Prev;
+
+ 6580 static ItemType*& AccessNext(ItemType* item){
+ 6581 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ 6582 return item->m_DedicatedAllocation.m_Next;
+
+
+
+
+
+
+
+ 6590 struct VmaSuballocation
+
+ 6592 VkDeviceSize offset;
+
+
+ 6595 VmaSuballocationType type;
+
+
+
+ 6599 struct VmaSuballocationOffsetLess
+
+ 6601 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const
+
+ 6603 return lhs.offset < rhs.offset;
+
+
+ 6606 struct VmaSuballocationOffsetGreater
+
+ 6608 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const
+
+ 6610 return lhs.offset > rhs.offset;
+
+
+
+ 6614 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
+
+
+ 6617 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
+
+ 6619 enum class VmaAllocationRequestType
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 6684 class VmaBlockMetadata_Linear :
public VmaBlockMetadata
-
- 6686 VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
-
-
- 6689 virtual ~VmaBlockMetadata_Linear();
- 6690 virtual void Init(VkDeviceSize size);
-
- 6692 virtual bool Validate()
const;
- 6693 virtual size_t GetAllocationCount()
const;
- 6694 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
- 6695 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
- 6696 virtual bool IsEmpty()
const {
return GetAllocationCount() == 0; }
-
- 6698 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
- 6699 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
+
+
+
+
+
+
+
+
+
+
+ 6641 struct VmaAllocationRequest
+
+ 6643 VkDeviceSize offset;
+ 6644 VkDeviceSize sumFreeSize;
+ 6645 VkDeviceSize sumItemSize;
+ 6646 VmaSuballocationList::iterator item;
+ 6647 size_t itemsToMakeLostCount;
+
+ 6649 VmaAllocationRequestType type;
+
+ 6651 VkDeviceSize CalcCost()
const
+
+ 6653 return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
+
+
+
+
+
+
+
+ 6661 class VmaBlockMetadata
+
+
+
+ 6665 virtual ~VmaBlockMetadata() { }
+ 6666 virtual void Init(VkDeviceSize size) { m_Size = size; }
+
+
+ 6669 virtual bool Validate()
const = 0;
+ 6670 VkDeviceSize GetSize()
const {
return m_Size; }
+ 6671 virtual size_t GetAllocationCount()
const = 0;
+ 6672 virtual VkDeviceSize GetSumFreeSize()
const = 0;
+ 6673 virtual VkDeviceSize GetUnusedRangeSizeMax()
const = 0;
+
+ 6675 virtual bool IsEmpty()
const = 0;
+
+ 6677 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const = 0;
+
+ 6679 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const = 0;
+
+ 6681 #if VMA_STATS_STRING_ENABLED
+ 6682 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const = 0;
+
+
+
+
+
+ 6688 virtual bool CreateAllocationRequest(
+ 6689 uint32_t currentFrameIndex,
+ 6690 uint32_t frameInUseCount,
+ 6691 VkDeviceSize bufferImageGranularity,
+ 6692 VkDeviceSize allocSize,
+ 6693 VkDeviceSize allocAlignment,
+
+ 6695 VmaSuballocationType allocType,
+ 6696 bool canMakeOtherLost,
+
+
+ 6699 VmaAllocationRequest* pAllocationRequest) = 0;
- 6701 #if VMA_STATS_STRING_ENABLED
- 6702 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
-
-
- 6705 virtual bool CreateAllocationRequest(
- 6706 uint32_t currentFrameIndex,
- 6707 uint32_t frameInUseCount,
- 6708 VkDeviceSize bufferImageGranularity,
- 6709 VkDeviceSize allocSize,
- 6710 VkDeviceSize allocAlignment,
-
- 6712 VmaSuballocationType allocType,
- 6713 bool canMakeOtherLost,
-
- 6715 VmaAllocationRequest* pAllocationRequest);
+ 6701 virtual bool MakeRequestedAllocationsLost(
+ 6702 uint32_t currentFrameIndex,
+ 6703 uint32_t frameInUseCount,
+ 6704 VmaAllocationRequest* pAllocationRequest) = 0;
+
+ 6706 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
+
+ 6708 virtual VkResult CheckCorruption(
const void* pBlockData) = 0;
+
+
+
+ 6712 const VmaAllocationRequest& request,
+ 6713 VmaSuballocationType type,
+ 6714 VkDeviceSize allocSize,
+
- 6717 virtual bool MakeRequestedAllocationsLost(
- 6718 uint32_t currentFrameIndex,
- 6719 uint32_t frameInUseCount,
- 6720 VmaAllocationRequest* pAllocationRequest);
-
- 6722 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+
+ 6719 virtual void FreeAtOffset(VkDeviceSize offset) = 0;
+
+
+ 6722 const VkAllocationCallbacks* GetAllocationCallbacks()
const {
return m_pAllocationCallbacks; }
- 6724 virtual VkResult CheckCorruption(
const void* pBlockData);
-
-
- 6727 const VmaAllocationRequest& request,
- 6728 VmaSuballocationType type,
- 6729 VkDeviceSize allocSize,
-
-
-
- 6733 virtual void FreeAtOffset(VkDeviceSize offset);
-
-
-
-
-
-
-
-
-
- 6743 typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
-
- 6745 enum SECOND_VECTOR_MODE
-
- 6747 SECOND_VECTOR_EMPTY,
-
-
-
-
- 6752 SECOND_VECTOR_RING_BUFFER,
-
-
-
-
-
- 6758 SECOND_VECTOR_DOUBLE_STACK,
-
-
- 6761 VkDeviceSize m_SumFreeSize;
- 6762 SuballocationVectorType m_Suballocations0, m_Suballocations1;
- 6763 uint32_t m_1stVectorIndex;
- 6764 SECOND_VECTOR_MODE m_2ndVectorMode;
-
- 6766 SuballocationVectorType& AccessSuballocations1st() {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
- 6767 SuballocationVectorType& AccessSuballocations2nd() {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
- 6768 const SuballocationVectorType& AccessSuballocations1st()
const {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
- 6769 const SuballocationVectorType& AccessSuballocations2nd()
const {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-
-
- 6772 size_t m_1stNullItemsBeginCount;
-
- 6774 size_t m_1stNullItemsMiddleCount;
-
- 6776 size_t m_2ndNullItemsCount;
-
- 6778 bool ShouldCompact1st()
const;
- 6779 void CleanupAfterFree();
+ 6724 #if VMA_STATS_STRING_ENABLED
+ 6725 void PrintDetailedMap_Begin(
class VmaJsonWriter& json,
+ 6726 VkDeviceSize unusedBytes,
+ 6727 size_t allocationCount,
+ 6728 size_t unusedRangeCount)
const;
+ 6729 void PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
+ 6730 VkDeviceSize offset,
+
+ 6732 void PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
+ 6733 VkDeviceSize offset,
+ 6734 VkDeviceSize size)
const;
+ 6735 void PrintDetailedMap_End(
class VmaJsonWriter& json)
const;
+
+
+
+ 6739 VkDeviceSize m_Size;
+ 6740 const VkAllocationCallbacks* m_pAllocationCallbacks;
+
+
+ 6743 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
+ 6744 VMA_ASSERT(0 && "Validation failed: " #cond); \
+
+
+
+ 6748 class VmaBlockMetadata_Generic :
public VmaBlockMetadata
+
+ 6750 VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
+
+
+ 6753 virtual ~VmaBlockMetadata_Generic();
+ 6754 virtual void Init(VkDeviceSize size);
+
+ 6756 virtual bool Validate()
const;
+ 6757 virtual size_t GetAllocationCount()
const {
return m_Suballocations.size() - m_FreeCount; }
+ 6758 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
+ 6759 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
+ 6760 virtual bool IsEmpty()
const;
+
+ 6762 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
+ 6763 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
+
+ 6765 #if VMA_STATS_STRING_ENABLED
+ 6766 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
+
+
+ 6769 virtual bool CreateAllocationRequest(
+ 6770 uint32_t currentFrameIndex,
+ 6771 uint32_t frameInUseCount,
+ 6772 VkDeviceSize bufferImageGranularity,
+ 6773 VkDeviceSize allocSize,
+ 6774 VkDeviceSize allocAlignment,
+
+ 6776 VmaSuballocationType allocType,
+ 6777 bool canMakeOtherLost,
+
+ 6779 VmaAllocationRequest* pAllocationRequest);
- 6781 bool CreateAllocationRequest_LowerAddress(
+ 6781 virtual bool MakeRequestedAllocationsLost(
6782 uint32_t currentFrameIndex,
6783 uint32_t frameInUseCount,
- 6784 VkDeviceSize bufferImageGranularity,
- 6785 VkDeviceSize allocSize,
- 6786 VkDeviceSize allocAlignment,
- 6787 VmaSuballocationType allocType,
- 6788 bool canMakeOtherLost,
-
- 6790 VmaAllocationRequest* pAllocationRequest);
- 6791 bool CreateAllocationRequest_UpperAddress(
- 6792 uint32_t currentFrameIndex,
- 6793 uint32_t frameInUseCount,
- 6794 VkDeviceSize bufferImageGranularity,
- 6795 VkDeviceSize allocSize,
- 6796 VkDeviceSize allocAlignment,
- 6797 VmaSuballocationType allocType,
- 6798 bool canMakeOtherLost,
-
- 6800 VmaAllocationRequest* pAllocationRequest);
-
-
-
-
-
-
-
-
-
-
-
-
-
- 6814 class VmaBlockMetadata_Buddy :
public VmaBlockMetadata
-
- 6816 VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
-
-
- 6819 virtual ~VmaBlockMetadata_Buddy();
- 6820 virtual void Init(VkDeviceSize size);
-
- 6822 virtual bool Validate()
const;
- 6823 virtual size_t GetAllocationCount()
const {
return m_AllocationCount; }
- 6824 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize + GetUnusableSize(); }
- 6825 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
- 6826 virtual bool IsEmpty()
const {
return m_Root->type == Node::TYPE_FREE; }
-
- 6828 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
- 6829 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
-
- 6831 #if VMA_STATS_STRING_ENABLED
- 6832 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
-
-
- 6835 virtual bool CreateAllocationRequest(
- 6836 uint32_t currentFrameIndex,
- 6837 uint32_t frameInUseCount,
- 6838 VkDeviceSize bufferImageGranularity,
- 6839 VkDeviceSize allocSize,
- 6840 VkDeviceSize allocAlignment,
-
- 6842 VmaSuballocationType allocType,
- 6843 bool canMakeOtherLost,
-
- 6845 VmaAllocationRequest* pAllocationRequest);
-
- 6847 virtual bool MakeRequestedAllocationsLost(
- 6848 uint32_t currentFrameIndex,
- 6849 uint32_t frameInUseCount,
- 6850 VmaAllocationRequest* pAllocationRequest);
-
- 6852 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
-
- 6854 virtual VkResult CheckCorruption(
const void* pBlockData) {
return VK_ERROR_FEATURE_NOT_PRESENT; }
-
-
- 6857 const VmaAllocationRequest& request,
- 6858 VmaSuballocationType type,
- 6859 VkDeviceSize allocSize,
-
-
- 6862 virtual void Free(
const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
- 6863 virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
-
-
- 6866 static const VkDeviceSize MIN_NODE_SIZE = 32;
- 6867 static const size_t MAX_LEVELS = 30;
-
- 6869 struct ValidationContext
-
- 6871 size_t calculatedAllocationCount;
- 6872 size_t calculatedFreeCount;
- 6873 VkDeviceSize calculatedSumFreeSize;
-
- 6875 ValidationContext() :
- 6876 calculatedAllocationCount(0),
- 6877 calculatedFreeCount(0),
- 6878 calculatedSumFreeSize(0) { }
-
-
-
-
- 6883 VkDeviceSize offset;
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 6913 VkDeviceSize m_UsableSize;
- 6914 uint32_t m_LevelCount;
-
-
-
-
-
- 6920 } m_FreeList[MAX_LEVELS];
-
- 6922 size_t m_AllocationCount;
-
-
-
- 6926 VkDeviceSize m_SumFreeSize;
-
- 6928 VkDeviceSize GetUnusableSize()
const {
return GetSize() - m_UsableSize; }
- 6929 void DeleteNode(Node* node);
- 6930 bool ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const;
- 6931 uint32_t AllocSizeToLevel(VkDeviceSize allocSize)
const;
- 6932 inline VkDeviceSize LevelToNodeSize(uint32_t level)
const {
return m_UsableSize >> level; }
-
- 6934 void FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset);
- 6935 void CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const;
-
-
-
- 6939 void AddToFreeListFront(uint32_t level, Node* node);
-
-
-
- 6943 void RemoveFromFreeList(uint32_t level, Node* node);
-
- 6945 #if VMA_STATS_STRING_ENABLED
- 6946 void PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const;
-
-
-
-
-
-
-
-
-
- 6956 class VmaDeviceMemoryBlock
-
- 6958 VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
-
- 6960 VmaBlockMetadata* m_pMetadata;
-
-
+ 6784 VmaAllocationRequest* pAllocationRequest);
+
+ 6786 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ 6788 virtual VkResult CheckCorruption(
const void* pBlockData);
+
+
+ 6791 const VmaAllocationRequest& request,
+ 6792 VmaSuballocationType type,
+ 6793 VkDeviceSize allocSize,
+
+
+
+ 6797 virtual void FreeAtOffset(VkDeviceSize offset);
+
+
+
+ 6802 bool IsBufferImageGranularityConflictPossible(
+ 6803 VkDeviceSize bufferImageGranularity,
+ 6804 VmaSuballocationType& inOutPrevSuballocType)
const;
+
+
+ 6807 friend class VmaDefragmentationAlgorithm_Generic;
+ 6808 friend class VmaDefragmentationAlgorithm_Fast;
+
+ 6810 uint32_t m_FreeCount;
+ 6811 VkDeviceSize m_SumFreeSize;
+ 6812 VmaSuballocationList m_Suballocations;
+
+
+ 6815 VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
+
+ 6817 bool ValidateFreeSuballocationList()
const;
+
+
+
+ 6821 bool CheckAllocation(
+ 6822 uint32_t currentFrameIndex,
+ 6823 uint32_t frameInUseCount,
+ 6824 VkDeviceSize bufferImageGranularity,
+ 6825 VkDeviceSize allocSize,
+ 6826 VkDeviceSize allocAlignment,
+ 6827 VmaSuballocationType allocType,
+ 6828 VmaSuballocationList::const_iterator suballocItem,
+ 6829 bool canMakeOtherLost,
+ 6830 VkDeviceSize* pOffset,
+ 6831 size_t* itemsToMakeLostCount,
+ 6832 VkDeviceSize* pSumFreeSize,
+ 6833 VkDeviceSize* pSumItemSize)
const;
+
+ 6835 void MergeFreeWithNext(VmaSuballocationList::iterator item);
+
+
+
+ 6839 VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
+
+
+ 6842 void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
+
+
+ 6845 void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 6926 class VmaBlockMetadata_Linear :
public VmaBlockMetadata
+
+ 6928 VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
+
+
+ 6931 virtual ~VmaBlockMetadata_Linear();
+ 6932 virtual void Init(VkDeviceSize size);
+
+ 6934 virtual bool Validate()
const;
+ 6935 virtual size_t GetAllocationCount()
const;
+ 6936 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
+ 6937 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
+ 6938 virtual bool IsEmpty()
const {
return GetAllocationCount() == 0; }
+
+ 6940 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
+ 6941 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
+
+ 6943 #if VMA_STATS_STRING_ENABLED
+ 6944 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
+
+
+ 6947 virtual bool CreateAllocationRequest(
+ 6948 uint32_t currentFrameIndex,
+ 6949 uint32_t frameInUseCount,
+ 6950 VkDeviceSize bufferImageGranularity,
+ 6951 VkDeviceSize allocSize,
+ 6952 VkDeviceSize allocAlignment,
+
+ 6954 VmaSuballocationType allocType,
+ 6955 bool canMakeOtherLost,
+
+ 6957 VmaAllocationRequest* pAllocationRequest);
+
+ 6959 virtual bool MakeRequestedAllocationsLost(
+ 6960 uint32_t currentFrameIndex,
+ 6961 uint32_t frameInUseCount,
+ 6962 VmaAllocationRequest* pAllocationRequest);
- 6964 ~VmaDeviceMemoryBlock()
-
- 6966 VMA_ASSERT(m_MapCount == 0 &&
"VkDeviceMemory block is being destroyed while it is still mapped.");
- 6967 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
-
-
-
-
-
-
- 6974 uint32_t newMemoryTypeIndex,
- 6975 VkDeviceMemory newMemory,
- 6976 VkDeviceSize newSize,
-
- 6978 uint32_t algorithm);
-
-
-
- 6982 VmaPool GetParentPool()
const {
return m_hParentPool; }
- 6983 VkDeviceMemory GetDeviceMemory()
const {
return m_hMemory; }
- 6984 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
- 6985 uint32_t GetId()
const {
return m_Id; }
- 6986 void* GetMappedData()
const {
return m_pMappedData; }
-
-
- 6989 bool Validate()
const;
-
-
-
-
- 6994 VkResult Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData);
-
-
- 6997 VkResult WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
- 6998 VkResult ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-
- 7000 VkResult BindBufferMemory(
-
-
- 7003 VkDeviceSize allocationLocalOffset,
-
-
- 7006 VkResult BindImageMemory(
-
-
- 7009 VkDeviceSize allocationLocalOffset,
-
-
+ 6964 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ 6966 virtual VkResult CheckCorruption(
const void* pBlockData);
+
+
+ 6969 const VmaAllocationRequest& request,
+ 6970 VmaSuballocationType type,
+ 6971 VkDeviceSize allocSize,
+
+
+
+ 6975 virtual void FreeAtOffset(VkDeviceSize offset);
+
+
+
+
+
+
+
+
+
+ 6985 typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
+
+ 6987 enum SECOND_VECTOR_MODE
+
+ 6989 SECOND_VECTOR_EMPTY,
+
+
+
+
+ 6994 SECOND_VECTOR_RING_BUFFER,
+
+
+
+
+
+ 7000 SECOND_VECTOR_DOUBLE_STACK,
+
+
+ 7003 VkDeviceSize m_SumFreeSize;
+ 7004 SuballocationVectorType m_Suballocations0, m_Suballocations1;
+ 7005 uint32_t m_1stVectorIndex;
+ 7006 SECOND_VECTOR_MODE m_2ndVectorMode;
+
+ 7008 SuballocationVectorType& AccessSuballocations1st() {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+ 7009 SuballocationVectorType& AccessSuballocations2nd() {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+ 7010 const SuballocationVectorType& AccessSuballocations1st()
const {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+ 7011 const SuballocationVectorType& AccessSuballocations2nd()
const {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-
-
- 7015 uint32_t m_MemoryTypeIndex;
-
- 7017 VkDeviceMemory m_hMemory;
-
-
-
-
-
-
-
- 7025 uint32_t m_MapCount;
- 7026 void* m_pMappedData;
-
-
- 7029 struct VmaPointerLess
-
- 7031 bool operator()(
const void* lhs,
const void* rhs)
const
-
-
-
-
-
- 7037 struct VmaDefragmentationMove
-
- 7039 size_t srcBlockIndex;
- 7040 size_t dstBlockIndex;
- 7041 VkDeviceSize srcOffset;
- 7042 VkDeviceSize dstOffset;
-
-
- 7045 VmaDeviceMemoryBlock* pSrcBlock;
- 7046 VmaDeviceMemoryBlock* pDstBlock;
-
-
- 7049 class VmaDefragmentationAlgorithm;
-
-
-
-
-
-
-
- 7057 struct VmaBlockVector
-
- 7059 VMA_CLASS_NO_COPY(VmaBlockVector)
-
-
-
-
- 7064 uint32_t memoryTypeIndex,
- 7065 VkDeviceSize preferredBlockSize,
- 7066 size_t minBlockCount,
- 7067 size_t maxBlockCount,
- 7068 VkDeviceSize bufferImageGranularity,
- 7069 uint32_t frameInUseCount,
- 7070 bool explicitBlockSize,
-
-
-
-
- 7075 VkResult CreateMinBlocks();
+
+ 7014 size_t m_1stNullItemsBeginCount;
+
+ 7016 size_t m_1stNullItemsMiddleCount;
+
+ 7018 size_t m_2ndNullItemsCount;
+
+ 7020 bool ShouldCompact1st()
const;
+ 7021 void CleanupAfterFree();
+
+ 7023 bool CreateAllocationRequest_LowerAddress(
+ 7024 uint32_t currentFrameIndex,
+ 7025 uint32_t frameInUseCount,
+ 7026 VkDeviceSize bufferImageGranularity,
+ 7027 VkDeviceSize allocSize,
+ 7028 VkDeviceSize allocAlignment,
+ 7029 VmaSuballocationType allocType,
+ 7030 bool canMakeOtherLost,
+
+ 7032 VmaAllocationRequest* pAllocationRequest);
+ 7033 bool CreateAllocationRequest_UpperAddress(
+ 7034 uint32_t currentFrameIndex,
+ 7035 uint32_t frameInUseCount,
+ 7036 VkDeviceSize bufferImageGranularity,
+ 7037 VkDeviceSize allocSize,
+ 7038 VkDeviceSize allocAlignment,
+ 7039 VmaSuballocationType allocType,
+ 7040 bool canMakeOtherLost,
+
+ 7042 VmaAllocationRequest* pAllocationRequest);
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 7056 class VmaBlockMetadata_Buddy :
public VmaBlockMetadata
+
+ 7058 VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
+
+
+ 7061 virtual ~VmaBlockMetadata_Buddy();
+ 7062 virtual void Init(VkDeviceSize size);
+
+ 7064 virtual bool Validate()
const;
+ 7065 virtual size_t GetAllocationCount()
const {
return m_AllocationCount; }
+ 7066 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize + GetUnusableSize(); }
+ 7067 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
+ 7068 virtual bool IsEmpty()
const {
return m_Root->type == Node::TYPE_FREE; }
+
+ 7070 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
+ 7071 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
+
+ 7073 #if VMA_STATS_STRING_ENABLED
+ 7074 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
+
- 7077 VmaAllocator GetAllocator()
const {
return m_hAllocator; }
- 7078 VmaPool GetParentPool()
const {
return m_hParentPool; }
- 7079 bool IsCustomPool()
const {
return m_hParentPool != VMA_NULL; }
- 7080 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
- 7081 VkDeviceSize GetPreferredBlockSize()
const {
return m_PreferredBlockSize; }
- 7082 VkDeviceSize GetBufferImageGranularity()
const {
return m_BufferImageGranularity; }
- 7083 uint32_t GetFrameInUseCount()
const {
return m_FrameInUseCount; }
- 7084 uint32_t GetAlgorithm()
const {
return m_Algorithm; }
-
-
-
-
- 7089 bool IsCorruptionDetectionEnabled()
const;
-
-
- 7092 uint32_t currentFrameIndex,
-
- 7094 VkDeviceSize alignment,
-
- 7096 VmaSuballocationType suballocType,
- 7097 size_t allocationCount,
-
-
-
-
-
-
-
- 7105 #if VMA_STATS_STRING_ENABLED
- 7106 void PrintDetailedMap(
class VmaJsonWriter& json);
-
-
- 7109 void MakePoolAllocationsLost(
- 7110 uint32_t currentFrameIndex,
- 7111 size_t* pLostAllocationCount);
- 7112 VkResult CheckCorruption();
-
-
-
- 7116 class VmaBlockVectorDefragmentationContext* pCtx,
-
- 7118 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
- 7119 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
- 7120 VkCommandBuffer commandBuffer);
- 7121 void DefragmentationEnd(
- 7122 class VmaBlockVectorDefragmentationContext* pCtx,
-
-
-
- 7126 uint32_t ProcessDefragmentations(
- 7127 class VmaBlockVectorDefragmentationContext *pCtx,
-
-
- 7130 void CommitDefragmentations(
- 7131 class VmaBlockVectorDefragmentationContext *pCtx,
-
-
-
-
- 7137 size_t GetBlockCount()
const {
return m_Blocks.size(); }
- 7138 VmaDeviceMemoryBlock* GetBlock(
size_t index)
const {
return m_Blocks[index]; }
- 7139 size_t CalcAllocationCount()
const;
- 7140 bool IsBufferImageGranularityConflictPossible()
const;
-
-
- 7143 friend class VmaDefragmentationAlgorithm_Generic;
-
-
-
- 7147 const uint32_t m_MemoryTypeIndex;
- 7148 const VkDeviceSize m_PreferredBlockSize;
- 7149 const size_t m_MinBlockCount;
- 7150 const size_t m_MaxBlockCount;
- 7151 const VkDeviceSize m_BufferImageGranularity;
- 7152 const uint32_t m_FrameInUseCount;
- 7153 const bool m_ExplicitBlockSize;
- 7154 const uint32_t m_Algorithm;
- 7155 const float m_Priority;
- 7156 VMA_RW_MUTEX m_Mutex;
+ 7077 virtual bool CreateAllocationRequest(
+ 7078 uint32_t currentFrameIndex,
+ 7079 uint32_t frameInUseCount,
+ 7080 VkDeviceSize bufferImageGranularity,
+ 7081 VkDeviceSize allocSize,
+ 7082 VkDeviceSize allocAlignment,
+
+ 7084 VmaSuballocationType allocType,
+ 7085 bool canMakeOtherLost,
+
+ 7087 VmaAllocationRequest* pAllocationRequest);
+
+ 7089 virtual bool MakeRequestedAllocationsLost(
+ 7090 uint32_t currentFrameIndex,
+ 7091 uint32_t frameInUseCount,
+ 7092 VmaAllocationRequest* pAllocationRequest);
+
+ 7094 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ 7096 virtual VkResult CheckCorruption(
const void* pBlockData) {
return VK_ERROR_FEATURE_NOT_PRESENT; }
+
+
+ 7099 const VmaAllocationRequest& request,
+ 7100 VmaSuballocationType type,
+ 7101 VkDeviceSize allocSize,
+
+
+ 7104 virtual void Free(
const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
+ 7105 virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
+
+
+ 7108 static const VkDeviceSize MIN_NODE_SIZE = 32;
+ 7109 static const size_t MAX_LEVELS = 30;
+
+ 7111 struct ValidationContext
+
+ 7113 size_t calculatedAllocationCount;
+ 7114 size_t calculatedFreeCount;
+ 7115 VkDeviceSize calculatedSumFreeSize;
+
+ 7117 ValidationContext() :
+ 7118 calculatedAllocationCount(0),
+ 7119 calculatedFreeCount(0),
+ 7120 calculatedSumFreeSize(0) { }
+
+
+
+
+ 7125 VkDeviceSize offset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 7155 VkDeviceSize m_UsableSize;
+ 7156 uint32_t m_LevelCount;
-
-
- 7160 bool m_HasEmptyBlock;
-
- 7162 VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
- 7163 uint32_t m_NextBlockId;
-
- 7165 VkDeviceSize CalcMaxBlockSize()
const;
-
-
- 7168 void Remove(VmaDeviceMemoryBlock* pBlock);
+
+
+
+
+ 7162 } m_FreeList[MAX_LEVELS];
+
+ 7164 size_t m_AllocationCount;
+
+
+
+ 7168 VkDeviceSize m_SumFreeSize;
-
-
- 7172 void IncrementallySortBlocks();
-
- 7174 VkResult AllocatePage(
- 7175 uint32_t currentFrameIndex,
-
- 7177 VkDeviceSize alignment,
-
- 7179 VmaSuballocationType suballocType,
-
-
-
- 7183 VkResult AllocateFromBlock(
- 7184 VmaDeviceMemoryBlock* pBlock,
- 7185 uint32_t currentFrameIndex,
-
- 7187 VkDeviceSize alignment,
-
-
- 7190 VmaSuballocationType suballocType,
-
-
-
- 7194 VkResult CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex);
-
-
- 7197 void ApplyDefragmentationMovesCpu(
- 7198 class VmaBlockVectorDefragmentationContext* pDefragCtx,
- 7199 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
-
- 7201 void ApplyDefragmentationMovesGpu(
- 7202 class VmaBlockVectorDefragmentationContext* pDefragCtx,
- 7203 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
- 7204 VkCommandBuffer commandBuffer);
+ 7170 VkDeviceSize GetUnusableSize()
const {
return GetSize() - m_UsableSize; }
+ 7171 void DeleteNode(Node* node);
+ 7172 bool ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const;
+ 7173 uint32_t AllocSizeToLevel(VkDeviceSize allocSize)
const;
+ 7174 inline VkDeviceSize LevelToNodeSize(uint32_t level)
const {
return m_UsableSize >> level; }
+
+ 7176 void FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset);
+ 7177 void CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const;
+
+
+
+ 7181 void AddToFreeListFront(uint32_t level, Node* node);
+
+
+
+ 7185 void RemoveFromFreeList(uint32_t level, Node* node);
+
+ 7187 #if VMA_STATS_STRING_ENABLED
+ 7188 void PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const;
+
+
+
+
+
+
+
+
+
+ 7198 class VmaDeviceMemoryBlock
+
+ 7200 VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
+
+ 7202 VmaBlockMetadata* m_pMetadata;
+
+
-
-
-
-
-
+ 7206 ~VmaDeviceMemoryBlock()
+
+ 7208 VMA_ASSERT(m_MapCount == 0 &&
"VkDeviceMemory block is being destroyed while it is still mapped.");
+ 7209 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
- 7212 void UpdateHasEmptyBlock();
-
-
-
-
- 7217 VMA_CLASS_NO_COPY(VmaPool_T)
-
- 7219 VmaBlockVector m_BlockVector;
-
-
-
-
- 7224 VkDeviceSize preferredBlockSize);
-
-
+
+
+
+
+ 7216 uint32_t newMemoryTypeIndex,
+ 7217 VkDeviceMemory newMemory,
+ 7218 VkDeviceSize newSize,
+
+ 7220 uint32_t algorithm);
+
+
+
+ 7224 VmaPool GetParentPool()
const {
return m_hParentPool; }
+ 7225 VkDeviceMemory GetDeviceMemory()
const {
return m_hMemory; }
+ 7226 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
7227 uint32_t GetId()
const {
return m_Id; }
- 7228 void SetId(uint32_t
id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
+ 7228 void* GetMappedData()
const {
return m_pMappedData; }
- 7230 const char* GetName()
const {
return m_Name; }
- 7231 void SetName(
const char* pName);
+
+ 7231 bool Validate()
const;
- 7233 #if VMA_STATS_STRING_ENABLED
-
-
-
-
-
-
-
+
+
+
+ 7236 VkResult Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData);
+
+
+ 7239 VkResult WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+ 7240 VkResult ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-
-
-
-
-
-
-
- 7249 class VmaDefragmentationAlgorithm
-
- 7251 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
-
- 7253 VmaDefragmentationAlgorithm(
-
- 7255 VmaBlockVector* pBlockVector,
- 7256 uint32_t currentFrameIndex) :
- 7257 m_hAllocator(hAllocator),
- 7258 m_pBlockVector(pBlockVector),
- 7259 m_CurrentFrameIndex(currentFrameIndex)
-
-
- 7262 virtual ~VmaDefragmentationAlgorithm()
-
-
-
- 7266 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) = 0;
- 7267 virtual void AddAll() = 0;
-
- 7269 virtual VkResult Defragment(
- 7270 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
- 7271 VkDeviceSize maxBytesToMove,
- 7272 uint32_t maxAllocationsToMove,
-
-
- 7275 virtual VkDeviceSize GetBytesMoved()
const = 0;
- 7276 virtual uint32_t GetAllocationsMoved()
const = 0;
-
-
-
- 7280 VmaBlockVector*
const m_pBlockVector;
- 7281 const uint32_t m_CurrentFrameIndex;
+ 7242 VkResult BindBufferMemory(
+
+
+ 7245 VkDeviceSize allocationLocalOffset,
+
+
+ 7248 VkResult BindImageMemory(
+
+
+ 7251 VkDeviceSize allocationLocalOffset,
+
+
+
+
+
+ 7257 uint32_t m_MemoryTypeIndex;
+
+ 7259 VkDeviceMemory m_hMemory;
+
+
+
+
+
+
+
+ 7267 uint32_t m_MapCount;
+ 7268 void* m_pMappedData;
+
+
+ 7271 struct VmaDefragmentationMove
+
+ 7273 size_t srcBlockIndex;
+ 7274 size_t dstBlockIndex;
+ 7275 VkDeviceSize srcOffset;
+ 7276 VkDeviceSize dstOffset;
+
+
+ 7279 VmaDeviceMemoryBlock* pSrcBlock;
+ 7280 VmaDeviceMemoryBlock* pDstBlock;
+
- 7283 struct AllocationInfo
-
-
- 7286 VkBool32* m_pChanged;
-
-
- 7289 m_hAllocation(VK_NULL_HANDLE),
- 7290 m_pChanged(VMA_NULL)
-
-
-
- 7294 m_hAllocation(hAlloc),
- 7295 m_pChanged(pChanged)
-
-
-
-
-
- 7301 class VmaDefragmentationAlgorithm_Generic :
public VmaDefragmentationAlgorithm
-
- 7303 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
-
- 7305 VmaDefragmentationAlgorithm_Generic(
-
- 7307 VmaBlockVector* pBlockVector,
- 7308 uint32_t currentFrameIndex,
- 7309 bool overlappingMoveSupported);
- 7310 virtual ~VmaDefragmentationAlgorithm_Generic();
-
- 7312 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
- 7313 virtual void AddAll() { m_AllAllocations =
true; }
-
- 7315 virtual VkResult Defragment(
- 7316 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
- 7317 VkDeviceSize maxBytesToMove,
- 7318 uint32_t maxAllocationsToMove,
-
-
- 7321 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
- 7322 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
-
-
- 7325 uint32_t m_AllocationCount;
- 7326 bool m_AllAllocations;
-
- 7328 VkDeviceSize m_BytesMoved;
- 7329 uint32_t m_AllocationsMoved;
-
- 7331 struct AllocationInfoSizeGreater
-
- 7333 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const
-
- 7335 return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
-
-
+ 7283 class VmaDefragmentationAlgorithm;
+
+
+
+
+
+
+
+ 7291 struct VmaBlockVector
+
+ 7293 VMA_CLASS_NO_COPY(VmaBlockVector)
+
+
+
+
+ 7298 uint32_t memoryTypeIndex,
+ 7299 VkDeviceSize preferredBlockSize,
+ 7300 size_t minBlockCount,
+ 7301 size_t maxBlockCount,
+ 7302 VkDeviceSize bufferImageGranularity,
+ 7303 uint32_t frameInUseCount,
+ 7304 bool explicitBlockSize,
+
+
+
+
+ 7309 VkResult CreateMinBlocks();
+
+ 7311 VmaAllocator GetAllocator()
const {
return m_hAllocator; }
+ 7312 VmaPool GetParentPool()
const {
return m_hParentPool; }
+ 7313 bool IsCustomPool()
const {
return m_hParentPool != VMA_NULL; }
+ 7314 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
+ 7315 VkDeviceSize GetPreferredBlockSize()
const {
return m_PreferredBlockSize; }
+ 7316 VkDeviceSize GetBufferImageGranularity()
const {
return m_BufferImageGranularity; }
+ 7317 uint32_t GetFrameInUseCount()
const {
return m_FrameInUseCount; }
+ 7318 uint32_t GetAlgorithm()
const {
return m_Algorithm; }
+
+
+
+
+ 7323 bool IsCorruptionDetectionEnabled()
const;
+
+
+ 7326 uint32_t currentFrameIndex,
+
+ 7328 VkDeviceSize alignment,
+
+ 7330 VmaSuballocationType suballocType,
+ 7331 size_t allocationCount,
+
+
+
+
+
+
- 7339 struct AllocationInfoOffsetGreater
-
- 7341 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const
-
- 7343 return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
-
-
-
-
-
- 7349 size_t m_OriginalBlockIndex;
- 7350 VmaDeviceMemoryBlock* m_pBlock;
- 7351 bool m_HasNonMovableAllocations;
- 7352 VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
-
- 7354 BlockInfo(
const VkAllocationCallbacks* pAllocationCallbacks) :
- 7355 m_OriginalBlockIndex(SIZE_MAX),
-
- 7357 m_HasNonMovableAllocations(true),
- 7358 m_Allocations(pAllocationCallbacks)
-
-
-
- 7362 void CalcHasNonMovableAllocations()
-
- 7364 const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
- 7365 const size_t defragmentAllocCount = m_Allocations.size();
- 7366 m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
-
-
- 7369 void SortAllocationsBySizeDescending()
-
- 7371 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
-
-
- 7374 void SortAllocationsByOffsetDescending()
-
- 7376 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
-
-
-
- 7380 struct BlockPointerLess
-
- 7382 bool operator()(
const BlockInfo* pLhsBlockInfo,
const VmaDeviceMemoryBlock* pRhsBlock)
const
-
- 7384 return pLhsBlockInfo->m_pBlock < pRhsBlock;
-
- 7386 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const
-
- 7388 return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
-
-
+ 7339 #if VMA_STATS_STRING_ENABLED
+ 7340 void PrintDetailedMap(
class VmaJsonWriter& json);
+
+
+ 7343 void MakePoolAllocationsLost(
+ 7344 uint32_t currentFrameIndex,
+ 7345 size_t* pLostAllocationCount);
+ 7346 VkResult CheckCorruption();
+
+
+
+ 7350 class VmaBlockVectorDefragmentationContext* pCtx,
+
+ 7352 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
+ 7353 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
+ 7354 VkCommandBuffer commandBuffer);
+ 7355 void DefragmentationEnd(
+ 7356 class VmaBlockVectorDefragmentationContext* pCtx,
+
+
+
+ 7360 uint32_t ProcessDefragmentations(
+ 7361 class VmaBlockVectorDefragmentationContext *pCtx,
+
+
+ 7364 void CommitDefragmentations(
+ 7365 class VmaBlockVectorDefragmentationContext *pCtx,
+
+
+
+
+ 7371 size_t GetBlockCount()
const {
return m_Blocks.size(); }
+ 7372 VmaDeviceMemoryBlock* GetBlock(
size_t index)
const {
return m_Blocks[index]; }
+ 7373 size_t CalcAllocationCount()
const;
+ 7374 bool IsBufferImageGranularityConflictPossible()
const;
+
+
+ 7377 friend class VmaDefragmentationAlgorithm_Generic;
+
+
+
+ 7381 const uint32_t m_MemoryTypeIndex;
+ 7382 const VkDeviceSize m_PreferredBlockSize;
+ 7383 const size_t m_MinBlockCount;
+ 7384 const size_t m_MaxBlockCount;
+ 7385 const VkDeviceSize m_BufferImageGranularity;
+ 7386 const uint32_t m_FrameInUseCount;
+ 7387 const bool m_ExplicitBlockSize;
+ 7388 const uint32_t m_Algorithm;
+ 7389 const float m_Priority;
+ 7390 VMA_RW_MUTEX m_Mutex;
-
-
- 7394 struct BlockInfoCompareMoveDestination
-
- 7396 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const
-
- 7398 if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
-
-
-
- 7402 if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
-
-
-
- 7406 if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
-
-
-
-
-
-
-
- 7414 typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
- 7415 BlockInfoVector m_Blocks;
-
- 7417 VkResult DefragmentRound(
- 7418 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
- 7419 VkDeviceSize maxBytesToMove,
- 7420 uint32_t maxAllocationsToMove,
- 7421 bool freeOldAllocations);
-
- 7423 size_t CalcBlocksWithNonMovableCount()
const;
-
- 7425 static bool MoveMakesSense(
- 7426 size_t dstBlockIndex, VkDeviceSize dstOffset,
- 7427 size_t srcBlockIndex, VkDeviceSize srcOffset);
-
+
+
+ 7394 bool m_HasEmptyBlock;
+
+ 7396 VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
+ 7397 uint32_t m_NextBlockId;
+
+ 7399 VkDeviceSize CalcMaxBlockSize()
const;
+
+
+ 7402 void Remove(VmaDeviceMemoryBlock* pBlock);
+
+
+
+ 7406 void IncrementallySortBlocks();
+
+ 7408 VkResult AllocatePage(
+ 7409 uint32_t currentFrameIndex,
+
+ 7411 VkDeviceSize alignment,
+
+ 7413 VmaSuballocationType suballocType,
+
+
+
+ 7417 VkResult AllocateFromBlock(
+ 7418 VmaDeviceMemoryBlock* pBlock,
+ 7419 uint32_t currentFrameIndex,
+
+ 7421 VkDeviceSize alignment,
+
+
+ 7424 VmaSuballocationType suballocType,
+
+
+
+ 7428 VkResult CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex);
- 7430 class VmaDefragmentationAlgorithm_Fast :
public VmaDefragmentationAlgorithm
-
- 7432 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
-
- 7434 VmaDefragmentationAlgorithm_Fast(
-
- 7436 VmaBlockVector* pBlockVector,
- 7437 uint32_t currentFrameIndex,
- 7438 bool overlappingMoveSupported);
- 7439 virtual ~VmaDefragmentationAlgorithm_Fast();
-
- 7441 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
- 7442 virtual void AddAll() { m_AllAllocations =
true; }
-
- 7444 virtual VkResult Defragment(
- 7445 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
- 7446 VkDeviceSize maxBytesToMove,
- 7447 uint32_t maxAllocationsToMove,
-
-
- 7450 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
- 7451 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
-
-
-
-
- 7456 size_t origBlockIndex;
-
-
- 7459 class FreeSpaceDatabase
-
-
-
-
-
- 7465 s.blockInfoIndex = SIZE_MAX;
- 7466 for(
size_t i = 0; i < MAX_COUNT; ++i)
-
- 7468 m_FreeSpaces[i] = s;
-
-
-
- 7472 void Register(
size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
-
- 7474 if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
-
-
+
+ 7431 void ApplyDefragmentationMovesCpu(
+ 7432 class VmaBlockVectorDefragmentationContext* pDefragCtx,
+ 7433 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
+
+ 7435 void ApplyDefragmentationMovesGpu(
+ 7436 class VmaBlockVectorDefragmentationContext* pDefragCtx,
+ 7437 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ 7438 VkCommandBuffer commandBuffer);
+
+
+
+
+
+
+
+ 7446 void UpdateHasEmptyBlock();
+
+
+
+
+ 7451 VMA_CLASS_NO_COPY(VmaPool_T)
+
+ 7453 VmaBlockVector m_BlockVector;
+
+
+
+
+ 7458 VkDeviceSize preferredBlockSize);
+
+
+ 7461 uint32_t GetId()
const {
return m_Id; }
+ 7462 void SetId(uint32_t
id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
+
+ 7464 const char* GetName()
const {
return m_Name; }
+ 7465 void SetName(
const char* pName);
+
+ 7467 #if VMA_STATS_STRING_ENABLED
+
+
+
+
+
+
+ 7474 VmaPool_T* m_PrevPool = VMA_NULL;
+ 7475 VmaPool_T* m_NextPool = VMA_NULL;
+ 7476 friend struct VmaPoolListItemTraits;
+
-
- 7480 size_t bestIndex = SIZE_MAX;
- 7481 for(
size_t i = 0; i < MAX_COUNT; ++i)
-
-
- 7484 if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
-
-
-
-
- 7489 if(m_FreeSpaces[i].size < size &&
- 7490 (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
-
-
-
-
-
- 7496 if(bestIndex != SIZE_MAX)
-
- 7498 m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
- 7499 m_FreeSpaces[bestIndex].offset = offset;
- 7500 m_FreeSpaces[bestIndex].size = size;
-
-
-
- 7504 bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
- 7505 size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
-
- 7507 size_t bestIndex = SIZE_MAX;
- 7508 VkDeviceSize bestFreeSpaceAfter = 0;
- 7509 for(
size_t i = 0; i < MAX_COUNT; ++i)
-
-
- 7512 if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
-
- 7514 const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
-
- 7516 if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
-
- 7518 const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
-
- 7520 if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
-
-
- 7523 bestFreeSpaceAfter = freeSpaceAfter;
-
-
-
-
+ 7479 struct VmaPoolListItemTraits
+
+ 7481 typedef VmaPool_T ItemType;
+ 7482 static ItemType* GetPrev(
const ItemType* item) {
return item->m_PrevPool; }
+ 7483 static ItemType* GetNext(
const ItemType* item) {
return item->m_NextPool; }
+ 7484 static ItemType*& AccessPrev(ItemType* item) {
return item->m_PrevPool; }
+ 7485 static ItemType*& AccessNext(ItemType* item) {
return item->m_NextPool; }
+
+
+
+
+
+
+
+
+
+ 7495 class VmaDefragmentationAlgorithm
+
+ 7497 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
+
+ 7499 VmaDefragmentationAlgorithm(
+
+ 7501 VmaBlockVector* pBlockVector,
+ 7502 uint32_t currentFrameIndex) :
+ 7503 m_hAllocator(hAllocator),
+ 7504 m_pBlockVector(pBlockVector),
+ 7505 m_CurrentFrameIndex(currentFrameIndex)
+
+
+ 7508 virtual ~VmaDefragmentationAlgorithm()
+
+
+
+ 7512 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) = 0;
+ 7513 virtual void AddAll() = 0;
+
+ 7515 virtual VkResult Defragment(
+ 7516 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ 7517 VkDeviceSize maxBytesToMove,
+ 7518 uint32_t maxAllocationsToMove,
+
+
+ 7521 virtual VkDeviceSize GetBytesMoved()
const = 0;
+ 7522 virtual uint32_t GetAllocationsMoved()
const = 0;
+
+
+
+ 7526 VmaBlockVector*
const m_pBlockVector;
+ 7527 const uint32_t m_CurrentFrameIndex;
- 7529 if(bestIndex != SIZE_MAX)
-
- 7531 outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
- 7532 outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
+ 7529 struct AllocationInfo
+
+
+ 7532 VkBool32* m_pChanged;
- 7534 if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
-
- 7537 const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
- 7538 m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
- 7539 m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
-
-
-
-
- 7544 m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
-
+
+ 7535 m_hAllocation(VK_NULL_HANDLE),
+ 7536 m_pChanged(VMA_NULL)
+
+
+
+ 7540 m_hAllocation(hAlloc),
+ 7541 m_pChanged(pChanged)
+
+
+
+
-
-
-
-
-
-
-
- 7554 static const size_t MAX_COUNT = 4;
-
-
-
- 7558 size_t blockInfoIndex;
- 7559 VkDeviceSize offset;
-
- 7561 } m_FreeSpaces[MAX_COUNT];
-
-
- 7564 const bool m_OverlappingMoveSupported;
-
- 7566 uint32_t m_AllocationCount;
- 7567 bool m_AllAllocations;
-
- 7569 VkDeviceSize m_BytesMoved;
- 7570 uint32_t m_AllocationsMoved;
-
- 7572 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
+ 7547 class VmaDefragmentationAlgorithm_Generic :
public VmaDefragmentationAlgorithm
+
+ 7549 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
+
+ 7551 VmaDefragmentationAlgorithm_Generic(
+
+ 7553 VmaBlockVector* pBlockVector,
+ 7554 uint32_t currentFrameIndex,
+ 7555 bool overlappingMoveSupported);
+ 7556 virtual ~VmaDefragmentationAlgorithm_Generic();
+
+ 7558 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
+ 7559 virtual void AddAll() { m_AllAllocations =
true; }
+
+ 7561 virtual VkResult Defragment(
+ 7562 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ 7563 VkDeviceSize maxBytesToMove,
+ 7564 uint32_t maxAllocationsToMove,
+
+
+ 7567 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
+ 7568 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
+
+
+ 7571 uint32_t m_AllocationCount;
+ 7572 bool m_AllAllocations;
- 7574 void PreprocessMetadata();
- 7575 void PostprocessMetadata();
- 7576 void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc);
-
-
- 7579 struct VmaBlockDefragmentationContext
-
-
-
- 7583 BLOCK_FLAG_USED = 0x00000001,
-
-
-
-
-
- 7589 class VmaBlockVectorDefragmentationContext
-
- 7591 VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
-
-
-
- 7595 VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
- 7596 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
- 7597 uint32_t defragmentationMovesProcessed;
- 7598 uint32_t defragmentationMovesCommitted;
- 7599 bool hasDefragmentationPlan;
-
- 7601 VmaBlockVectorDefragmentationContext(
-
-
- 7604 VmaBlockVector* pBlockVector,
- 7605 uint32_t currFrameIndex);
- 7606 ~VmaBlockVectorDefragmentationContext();
+ 7574 VkDeviceSize m_BytesMoved;
+ 7575 uint32_t m_AllocationsMoved;
+
+ 7577 struct AllocationInfoSizeGreater
+
+ 7579 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const
+
+ 7581 return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
+
+
+
+ 7585 struct AllocationInfoOffsetGreater
+
+ 7587 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const
+
+ 7589 return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
+
+
+
+
+
+ 7595 size_t m_OriginalBlockIndex;
+ 7596 VmaDeviceMemoryBlock* m_pBlock;
+ 7597 bool m_HasNonMovableAllocations;
+ 7598 VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
+
+ 7600 BlockInfo(
const VkAllocationCallbacks* pAllocationCallbacks) :
+ 7601 m_OriginalBlockIndex(SIZE_MAX),
+
+ 7603 m_HasNonMovableAllocations(true),
+ 7604 m_Allocations(pAllocationCallbacks)
+
+
- 7608 VmaPool GetCustomPool()
const {
return m_hCustomPool; }
- 7609 VmaBlockVector* GetBlockVector()
const {
return m_pBlockVector; }
- 7610 VmaDefragmentationAlgorithm* GetAlgorithm()
const {
return m_pAlgorithm; }
-
- 7612 void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
- 7613 void AddAll() { m_AllAllocations =
true; }
+ 7608 void CalcHasNonMovableAllocations()
+
+ 7610 const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
+ 7611 const size_t defragmentAllocCount = m_Allocations.size();
+ 7612 m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
+
-
-
-
-
-
-
-
- 7622 VmaBlockVector*
const m_pBlockVector;
- 7623 const uint32_t m_CurrFrameIndex;
-
- 7625 VmaDefragmentationAlgorithm* m_pAlgorithm;
-
-
-
-
-
-
-
- 7633 VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
- 7634 bool m_AllAllocations;
-
-
- 7637 struct VmaDefragmentationContext_T
-
-
- 7640 VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
-
- 7642 VmaDefragmentationContext_T(
-
- 7644 uint32_t currFrameIndex,
-
-
- 7647 ~VmaDefragmentationContext_T();
-
- 7649 void AddPools(uint32_t poolCount,
const VmaPool* pPools);
- 7650 void AddAllocations(
- 7651 uint32_t allocationCount,
-
- 7653 VkBool32* pAllocationsChanged);
-
-
-
-
-
-
-
- 7661 VkResult Defragment(
- 7662 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
- 7663 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
-
-
-
- 7667 VkResult DefragmentPassEnd();
+ 7615 void SortAllocationsBySizeDescending()
+
+ 7617 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
+
+
+ 7620 void SortAllocationsByOffsetDescending()
+
+ 7622 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
+
+
+
+ 7626 struct BlockPointerLess
+
+ 7628 bool operator()(
const BlockInfo* pLhsBlockInfo,
const VmaDeviceMemoryBlock* pRhsBlock)
const
+
+ 7630 return pLhsBlockInfo->m_pBlock < pRhsBlock;
+
+ 7632 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const
+
+ 7634 return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
+
+
+
+
+
+ 7640 struct BlockInfoCompareMoveDestination
+
+ 7642 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const
+
+ 7644 if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
+
+
+
+ 7648 if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
+
+
+
+ 7652 if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
+
+
+
+
+
+
+
+ 7660 typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
+ 7661 BlockInfoVector m_Blocks;
+
+ 7663 VkResult DefragmentRound(
+ 7664 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ 7665 VkDeviceSize maxBytesToMove,
+ 7666 uint32_t maxAllocationsToMove,
+ 7667 bool freeOldAllocations);
-
-
- 7671 const uint32_t m_CurrFrameIndex;
- 7672 const uint32_t m_Flags;
-
-
- 7675 VkDeviceSize m_MaxCpuBytesToMove;
- 7676 uint32_t m_MaxCpuAllocationsToMove;
- 7677 VkDeviceSize m_MaxGpuBytesToMove;
- 7678 uint32_t m_MaxGpuAllocationsToMove;
-
-
- 7681 VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
-
- 7683 VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
-
-
- 7686 #if VMA_RECORDING_ENABLED
-
-
-
-
-
-
- 7693 void WriteConfiguration(
- 7694 const VkPhysicalDeviceProperties& devProps,
- 7695 const VkPhysicalDeviceMemoryProperties& memProps,
- 7696 uint32_t vulkanApiVersion,
- 7697 bool dedicatedAllocationExtensionEnabled,
- 7698 bool bindMemory2ExtensionEnabled,
- 7699 bool memoryBudgetExtensionEnabled,
- 7700 bool deviceCoherentMemoryExtensionEnabled);
-
-
- 7703 void RecordCreateAllocator(uint32_t frameIndex);
- 7704 void RecordDestroyAllocator(uint32_t frameIndex);
- 7705 void RecordCreatePool(uint32_t frameIndex,
-
-
- 7708 void RecordDestroyPool(uint32_t frameIndex,
VmaPool pool);
- 7709 void RecordAllocateMemory(uint32_t frameIndex,
- 7710 const VkMemoryRequirements& vkMemReq,
-
-
- 7713 void RecordAllocateMemoryPages(uint32_t frameIndex,
- 7714 const VkMemoryRequirements& vkMemReq,
-
- 7716 uint64_t allocationCount,
-
- 7718 void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
- 7719 const VkMemoryRequirements& vkMemReq,
- 7720 bool requiresDedicatedAllocation,
- 7721 bool prefersDedicatedAllocation,
-
-
- 7724 void RecordAllocateMemoryForImage(uint32_t frameIndex,
- 7725 const VkMemoryRequirements& vkMemReq,
- 7726 bool requiresDedicatedAllocation,
- 7727 bool prefersDedicatedAllocation,
-
-
- 7730 void RecordFreeMemory(uint32_t frameIndex,
-
- 7732 void RecordFreeMemoryPages(uint32_t frameIndex,
- 7733 uint64_t allocationCount,
-
- 7735 void RecordSetAllocationUserData(uint32_t frameIndex,
-
- 7737 const void* pUserData);
- 7738 void RecordCreateLostAllocation(uint32_t frameIndex,
-
- 7740 void RecordMapMemory(uint32_t frameIndex,
-
- 7742 void RecordUnmapMemory(uint32_t frameIndex,
-
- 7744 void RecordFlushAllocation(uint32_t frameIndex,
- 7745 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
- 7746 void RecordInvalidateAllocation(uint32_t frameIndex,
- 7747 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
- 7748 void RecordCreateBuffer(uint32_t frameIndex,
- 7749 const VkBufferCreateInfo& bufCreateInfo,
-
-
- 7752 void RecordCreateImage(uint32_t frameIndex,
- 7753 const VkImageCreateInfo& imageCreateInfo,
-
-
- 7756 void RecordDestroyBuffer(uint32_t frameIndex,
-
- 7758 void RecordDestroyImage(uint32_t frameIndex,
-
- 7760 void RecordTouchAllocation(uint32_t frameIndex,
-
- 7762 void RecordGetAllocationInfo(uint32_t frameIndex,
-
- 7764 void RecordMakePoolAllocationsLost(uint32_t frameIndex,
-
- 7766 void RecordDefragmentationBegin(uint32_t frameIndex,
-
-
- 7769 void RecordDefragmentationEnd(uint32_t frameIndex,
-
- 7771 void RecordSetPoolName(uint32_t frameIndex,
-
-
+ 7669 size_t CalcBlocksWithNonMovableCount()
const;
+
+ 7671 static bool MoveMakesSense(
+ 7672 size_t dstBlockIndex, VkDeviceSize dstOffset,
+ 7673 size_t srcBlockIndex, VkDeviceSize srcOffset);
+
+
+ 7676 class VmaDefragmentationAlgorithm_Fast :
public VmaDefragmentationAlgorithm
+
+ 7678 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
+
+ 7680 VmaDefragmentationAlgorithm_Fast(
+
+ 7682 VmaBlockVector* pBlockVector,
+ 7683 uint32_t currentFrameIndex,
+ 7684 bool overlappingMoveSupported);
+ 7685 virtual ~VmaDefragmentationAlgorithm_Fast();
+
+ 7687 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
+ 7688 virtual void AddAll() { m_AllAllocations =
true; }
+
+ 7690 virtual VkResult Defragment(
+ 7691 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ 7692 VkDeviceSize maxBytesToMove,
+ 7693 uint32_t maxAllocationsToMove,
+
+
+ 7696 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
+ 7697 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
+
+
+
+
+ 7702 size_t origBlockIndex;
+
+
+ 7705 class FreeSpaceDatabase
+
+
+
+
+
+ 7711 s.blockInfoIndex = SIZE_MAX;
+ 7712 for(
size_t i = 0; i < MAX_COUNT; ++i)
+
+ 7714 m_FreeSpaces[i] = s;
+
+
+
+ 7718 void Register(
size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
+
+ 7720 if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+
+
+
+
+ 7726 size_t bestIndex = SIZE_MAX;
+ 7727 for(
size_t i = 0; i < MAX_COUNT; ++i)
+
+
+ 7730 if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
+
+
+
+
+ 7735 if(m_FreeSpaces[i].size < size &&
+ 7736 (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
+
+
+
+
+
+ 7742 if(bestIndex != SIZE_MAX)
+
+ 7744 m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
+ 7745 m_FreeSpaces[bestIndex].offset = offset;
+ 7746 m_FreeSpaces[bestIndex].size = size;
+
+
+
+ 7750 bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
+ 7751 size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
+
+ 7753 size_t bestIndex = SIZE_MAX;
+ 7754 VkDeviceSize bestFreeSpaceAfter = 0;
+ 7755 for(
size_t i = 0; i < MAX_COUNT; ++i)
+
+
+ 7758 if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
+
+ 7760 const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
+
+ 7762 if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
+
+ 7764 const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
+
+ 7766 if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
+
+
+ 7769 bestFreeSpaceAfter = freeSpaceAfter;
+
+
+
+
-
-
-
-
-
-
-
- 7782 class UserDataString
-
-
-
- 7786 const char* GetString()
const {
return m_Str; }
-
-
-
-
-
+ 7775 if(bestIndex != SIZE_MAX)
+
+ 7777 outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
+ 7778 outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
+
+ 7780 if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+
+ 7783 const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
+ 7784 m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
+ 7785 m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
+
+
+
+
+ 7790 m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
+
-
-
-
- 7796 VMA_MUTEX m_FileMutex;
- 7797 std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
+
+
+
+
+
- 7799 void GetBasicParams(CallParams& outParams);
-
-
- 7802 template<
typename T>
- 7803 void PrintPointerList(uint64_t count,
const T* pItems)
-
-
-
- 7807 fprintf(m_File,
"%p", pItems[0]);
- 7808 for(uint64_t i = 1; i < count; ++i)
-
- 7810 fprintf(m_File,
" %p", pItems[i]);
-
-
-
+
+ 7800 static const size_t MAX_COUNT = 4;
+
+
+
+ 7804 size_t blockInfoIndex;
+ 7805 VkDeviceSize offset;
+
+ 7807 } m_FreeSpaces[MAX_COUNT];
+
+
+ 7810 const bool m_OverlappingMoveSupported;
+
+ 7812 uint32_t m_AllocationCount;
+ 7813 bool m_AllAllocations;
- 7815 void PrintPointerList(uint64_t count,
const VmaAllocation* pItems);
-
-
-
-
-
-
-
-
- 7824 class VmaAllocationObjectAllocator
-
- 7826 VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
-
- 7828 VmaAllocationObjectAllocator(
const VkAllocationCallbacks* pAllocationCallbacks);
-
- 7830 template<
typename... Types>
VmaAllocation Allocate(Types... args);
-
-
-
-
- 7835 VmaPoolAllocator<VmaAllocation_T> m_Allocator;
-
-
- 7838 struct VmaCurrentBudgetData
-
- 7840 VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
- 7841 VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
-
- 7843 #if VMA_MEMORY_BUDGET
- 7844 VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
- 7845 VMA_RW_MUTEX m_BudgetMutex;
- 7846 uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
- 7847 uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
- 7848 uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
-
-
- 7851 VmaCurrentBudgetData()
-
- 7853 for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
-
- 7855 m_BlockBytes[heapIndex] = 0;
- 7856 m_AllocationBytes[heapIndex] = 0;
- 7857 #if VMA_MEMORY_BUDGET
- 7858 m_VulkanUsage[heapIndex] = 0;
- 7859 m_VulkanBudget[heapIndex] = 0;
- 7860 m_BlockBytesAtBudgetFetch[heapIndex] = 0;
-
-
-
- 7864 #if VMA_MEMORY_BUDGET
- 7865 m_OperationsSinceBudgetFetch = 0;
-
-
-
- 7869 void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-
- 7871 m_AllocationBytes[heapIndex] += allocationSize;
- 7872 #if VMA_MEMORY_BUDGET
- 7873 ++m_OperationsSinceBudgetFetch;
-
-
-
- 7877 void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-
- 7879 VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
- 7880 m_AllocationBytes[heapIndex] -= allocationSize;
- 7881 #if VMA_MEMORY_BUDGET
- 7882 ++m_OperationsSinceBudgetFetch;
-
-
-
-
-
- 7888 struct VmaAllocator_T
-
- 7890 VMA_CLASS_NO_COPY(VmaAllocator_T)
-
-
- 7893 uint32_t m_VulkanApiVersion;
- 7894 bool m_UseKhrDedicatedAllocation;
- 7895 bool m_UseKhrBindMemory2;
- 7896 bool m_UseExtMemoryBudget;
- 7897 bool m_UseAmdDeviceCoherentMemory;
- 7898 bool m_UseKhrBufferDeviceAddress;
- 7899 bool m_UseExtMemoryPriority;
-
- 7901 VkInstance m_hInstance;
- 7902 bool m_AllocationCallbacksSpecified;
- 7903 VkAllocationCallbacks m_AllocationCallbacks;
-
- 7905 VmaAllocationObjectAllocator m_AllocationObjectAllocator;
-
-
- 7908 uint32_t m_HeapSizeLimitMask;
-
- 7910 VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
- 7911 VkPhysicalDeviceMemoryProperties m_MemProps;
-
-
- 7914 VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
-
-
- 7917 typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
- 7918 AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
- 7919 VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
+ 7815 VkDeviceSize m_BytesMoved;
+ 7816 uint32_t m_AllocationsMoved;
+
+ 7818 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
+
+ 7820 void PreprocessMetadata();
+ 7821 void PostprocessMetadata();
+ 7822 void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc);
+
+
+ 7825 struct VmaBlockDefragmentationContext
+
+
+
+ 7829 BLOCK_FLAG_USED = 0x00000001,
+
+
+
+
+
+ 7835 class VmaBlockVectorDefragmentationContext
+
+ 7837 VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
+
+
+
+ 7841 VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
+ 7842 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
+ 7843 uint32_t defragmentationMovesProcessed;
+ 7844 uint32_t defragmentationMovesCommitted;
+ 7845 bool hasDefragmentationPlan;
+
+ 7847 VmaBlockVectorDefragmentationContext(
+
+
+ 7850 VmaBlockVector* pBlockVector,
+ 7851 uint32_t currFrameIndex);
+ 7852 ~VmaBlockVectorDefragmentationContext();
+
+ 7854 VmaPool GetCustomPool()
const {
return m_hCustomPool; }
+ 7855 VmaBlockVector* GetBlockVector()
const {
return m_pBlockVector; }
+ 7856 VmaDefragmentationAlgorithm* GetAlgorithm()
const {
return m_pAlgorithm; }
+
+ 7858 void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
+ 7859 void AddAll() { m_AllAllocations =
true; }
+
+
+
+
+
+
+
+
+ 7868 VmaBlockVector*
const m_pBlockVector;
+ 7869 const uint32_t m_CurrFrameIndex;
+
+ 7871 VmaDefragmentationAlgorithm* m_pAlgorithm;
+
+
+
+
+
+
+
+ 7879 VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
+ 7880 bool m_AllAllocations;
+
+
+ 7883 struct VmaDefragmentationContext_T
+
+
+ 7886 VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
+
+ 7888 VmaDefragmentationContext_T(
+
+ 7890 uint32_t currFrameIndex,
+
+
+ 7893 ~VmaDefragmentationContext_T();
+
+ 7895 void AddPools(uint32_t poolCount,
const VmaPool* pPools);
+ 7896 void AddAllocations(
+ 7897 uint32_t allocationCount,
+
+ 7899 VkBool32* pAllocationsChanged);
+
+
+
+
+
+
+
+ 7907 VkResult Defragment(
+ 7908 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+ 7909 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+
+
+
+ 7913 VkResult DefragmentPassEnd();
+
+
+
+ 7917 const uint32_t m_CurrFrameIndex;
+ 7918 const uint32_t m_Flags;
+
- 7921 VmaCurrentBudgetData m_Budget;
-
-
-
-
-
- 7927 const VkAllocationCallbacks* GetAllocationCallbacks()
const
-
- 7929 return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
-
-
-
- 7933 return m_VulkanFunctions;
-
-
- 7936 VkPhysicalDevice GetPhysicalDevice()
const {
return m_PhysicalDevice; }
-
- 7938 VkDeviceSize GetBufferImageGranularity()
const
-
-
- 7941 static_cast<VkDeviceSize
>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
- 7942 m_PhysicalDeviceProperties.limits.bufferImageGranularity);
-
-
- 7945 uint32_t GetMemoryHeapCount()
const {
return m_MemProps.memoryHeapCount; }
- 7946 uint32_t GetMemoryTypeCount()
const {
return m_MemProps.memoryTypeCount; }
-
- 7948 uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex)
const
-
- 7950 VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
- 7951 return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-
-
- 7954 bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex)
const
-
- 7956 return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
- 7957 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
-
- 7960 VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex)
const
-
- 7962 return IsMemoryTypeNonCoherent(memTypeIndex) ?
- 7963 VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
- 7964 (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
-
-
- 7967 bool IsIntegratedGpu()
const
-
- 7969 return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
-
-
- 7972 uint32_t GetGlobalMemoryTypeBits()
const {
return m_GlobalMemoryTypeBits; }
-
- 7974 #if VMA_RECORDING_ENABLED
- 7975 VmaRecorder* GetRecorder()
const {
return m_pRecorder; }
-
-
- 7978 void GetBufferMemoryRequirements(
-
- 7980 VkMemoryRequirements& memReq,
- 7981 bool& requiresDedicatedAllocation,
- 7982 bool& prefersDedicatedAllocation)
const;
- 7983 void GetImageMemoryRequirements(
-
- 7985 VkMemoryRequirements& memReq,
- 7986 bool& requiresDedicatedAllocation,
- 7987 bool& prefersDedicatedAllocation)
const;
-
-
- 7990 VkResult AllocateMemory(
- 7991 const VkMemoryRequirements& vkMemReq,
- 7992 bool requiresDedicatedAllocation,
- 7993 bool prefersDedicatedAllocation,
- 7994 VkBuffer dedicatedBuffer,
- 7995 VkBufferUsageFlags dedicatedBufferUsage,
- 7996 VkImage dedicatedImage,
-
- 7998 VmaSuballocationType suballocType,
- 7999 size_t allocationCount,
-
-
-
-
- 8004 size_t allocationCount,
-
-
- 8007 VkResult ResizeAllocation(
-
- 8009 VkDeviceSize newSize);
-
- 8011 void CalculateStats(
VmaStats* pStats);
-
-
- 8014 VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
-
- 8016 #if VMA_STATS_STRING_ENABLED
- 8017 void PrintDetailedMap(
class VmaJsonWriter& json);
-
-
- 8020 VkResult DefragmentationBegin(
-
-
-
- 8024 VkResult DefragmentationEnd(
-
-
- 8027 VkResult DefragmentationPassBegin(
-
-
- 8030 VkResult DefragmentationPassEnd(
-
-
-
-
-
-
- 8037 void DestroyPool(
VmaPool pool);
-
-
- 8040 void SetCurrentFrameIndex(uint32_t frameIndex);
- 8041 uint32_t GetCurrentFrameIndex()
const {
return m_CurrentFrameIndex.load(); }
-
- 8043 void MakePoolAllocationsLost(
-
- 8045 size_t* pLostAllocationCount);
- 8046 VkResult CheckPoolCorruption(
VmaPool hPool);
- 8047 VkResult CheckCorruption(uint32_t memoryTypeBits);
-
-
-
-
- 8052 VkResult AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
-
- 8054 void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
-
- 8056 VkResult BindVulkanBuffer(
- 8057 VkDeviceMemory memory,
- 8058 VkDeviceSize memoryOffset,
-
-
-
- 8062 VkResult BindVulkanImage(
- 8063 VkDeviceMemory memory,
- 8064 VkDeviceSize memoryOffset,
-
-
-
-
-
-
- 8071 VkResult BindBufferMemory(
-
- 8073 VkDeviceSize allocationLocalOffset,
-
-
- 8076 VkResult BindImageMemory(
-
- 8078 VkDeviceSize allocationLocalOffset,
-
-
-
- 8082 VkResult FlushOrInvalidateAllocation(
-
- 8084 VkDeviceSize offset, VkDeviceSize size,
- 8085 VMA_CACHE_OPERATION op);
- 8086 VkResult FlushOrInvalidateAllocations(
- 8087 uint32_t allocationCount,
-
- 8089 const VkDeviceSize* offsets,
const VkDeviceSize* sizes,
- 8090 VMA_CACHE_OPERATION op);
-
- 8092 void FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern);
-
-
-
-
-
- 8098 uint32_t GetGpuDefragmentationMemoryTypeBits();
-
-
- 8101 VkDeviceSize m_PreferredLargeHeapBlockSize;
-
- 8103 VkPhysicalDevice m_PhysicalDevice;
- 8104 VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
- 8105 VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits;
-
- 8107 VMA_RW_MUTEX m_PoolsMutex;
-
- 8109 VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
- 8110 uint32_t m_NextPoolId;
-
-
-
-
- 8115 uint32_t m_GlobalMemoryTypeBits;
-
- 8117 #if VMA_RECORDING_ENABLED
- 8118 VmaRecorder* m_pRecorder;
-
-
-
+ 7921 VkDeviceSize m_MaxCpuBytesToMove;
+ 7922 uint32_t m_MaxCpuAllocationsToMove;
+ 7923 VkDeviceSize m_MaxGpuBytesToMove;
+ 7924 uint32_t m_MaxGpuAllocationsToMove;
+
+
+ 7927 VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
+
+ 7929 VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
+
+
+ 7932 #if VMA_RECORDING_ENABLED
+
+
+
+
+
+
+ 7939 void WriteConfiguration(
+ 7940 const VkPhysicalDeviceProperties& devProps,
+ 7941 const VkPhysicalDeviceMemoryProperties& memProps,
+ 7942 uint32_t vulkanApiVersion,
+ 7943 bool dedicatedAllocationExtensionEnabled,
+ 7944 bool bindMemory2ExtensionEnabled,
+ 7945 bool memoryBudgetExtensionEnabled,
+ 7946 bool deviceCoherentMemoryExtensionEnabled);
+
+
+ 7949 void RecordCreateAllocator(uint32_t frameIndex);
+ 7950 void RecordDestroyAllocator(uint32_t frameIndex);
+ 7951 void RecordCreatePool(uint32_t frameIndex,
+
+
+ 7954 void RecordDestroyPool(uint32_t frameIndex,
VmaPool pool);
+ 7955 void RecordAllocateMemory(uint32_t frameIndex,
+ 7956 const VkMemoryRequirements& vkMemReq,
+
+
+ 7959 void RecordAllocateMemoryPages(uint32_t frameIndex,
+ 7960 const VkMemoryRequirements& vkMemReq,
+
+ 7962 uint64_t allocationCount,
+
+ 7964 void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+ 7965 const VkMemoryRequirements& vkMemReq,
+ 7966 bool requiresDedicatedAllocation,
+ 7967 bool prefersDedicatedAllocation,
+
+
+ 7970 void RecordAllocateMemoryForImage(uint32_t frameIndex,
+ 7971 const VkMemoryRequirements& vkMemReq,
+ 7972 bool requiresDedicatedAllocation,
+ 7973 bool prefersDedicatedAllocation,
+
+
+ 7976 void RecordFreeMemory(uint32_t frameIndex,
+
+ 7978 void RecordFreeMemoryPages(uint32_t frameIndex,
+ 7979 uint64_t allocationCount,
+
+ 7981 void RecordSetAllocationUserData(uint32_t frameIndex,
+
+ 7983 const void* pUserData);
+ 7984 void RecordCreateLostAllocation(uint32_t frameIndex,
+
+ 7986 void RecordMapMemory(uint32_t frameIndex,
+
+ 7988 void RecordUnmapMemory(uint32_t frameIndex,
+
+ 7990 void RecordFlushAllocation(uint32_t frameIndex,
+ 7991 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+ 7992 void RecordInvalidateAllocation(uint32_t frameIndex,
+ 7993 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+ 7994 void RecordCreateBuffer(uint32_t frameIndex,
+ 7995 const VkBufferCreateInfo& bufCreateInfo,
+
+
+ 7998 void RecordCreateImage(uint32_t frameIndex,
+ 7999 const VkImageCreateInfo& imageCreateInfo,
+
+
+ 8002 void RecordDestroyBuffer(uint32_t frameIndex,
+
+ 8004 void RecordDestroyImage(uint32_t frameIndex,
+
+ 8006 void RecordTouchAllocation(uint32_t frameIndex,
+
+ 8008 void RecordGetAllocationInfo(uint32_t frameIndex,
+
+ 8010 void RecordMakePoolAllocationsLost(uint32_t frameIndex,
+
+ 8012 void RecordDefragmentationBegin(uint32_t frameIndex,
+
+
+ 8015 void RecordDefragmentationEnd(uint32_t frameIndex,
+
+ 8017 void RecordSetPoolName(uint32_t frameIndex,
+
+
+
+
+
+
+
+
+
+
+ 8028 class UserDataString
+
+
+
+ 8032 const char* GetString()
const {
return m_Str; }
+
+
+
+
+
+
+
+
+
+ 8042 VMA_MUTEX m_FileMutex;
+ 8043 std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
+
+ 8045 void GetBasicParams(CallParams& outParams);
+
+
+ 8048 template<
typename T>
+ 8049 void PrintPointerList(uint64_t count,
const T* pItems)
+
+
+
+ 8053 fprintf(m_File,
"%p", pItems[0]);
+ 8054 for(uint64_t i = 1; i < count; ++i)
+
+ 8056 fprintf(m_File,
" %p", pItems[i]);
+
+
+
+
+ 8061 void PrintPointerList(uint64_t count,
const VmaAllocation* pItems);
+
+
+
+
+
+
+
+
+ 8070 class VmaAllocationObjectAllocator
+
+ 8072 VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
+
+ 8074 VmaAllocationObjectAllocator(
const VkAllocationCallbacks* pAllocationCallbacks);
+
+ 8076 template<
typename... Types>
VmaAllocation Allocate(Types... args);
+
+
+
+
+ 8081 VmaPoolAllocator<VmaAllocation_T> m_Allocator;
+
+
+ 8084 struct VmaCurrentBudgetData
+
+ 8086 VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
+ 8087 VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
+
+ 8089 #if VMA_MEMORY_BUDGET
+ 8090 VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
+ 8091 VMA_RW_MUTEX m_BudgetMutex;
+ 8092 uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
+ 8093 uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
+ 8094 uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
+
+
+ 8097 VmaCurrentBudgetData()
+
+ 8099 for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
+
+ 8101 m_BlockBytes[heapIndex] = 0;
+ 8102 m_AllocationBytes[heapIndex] = 0;
+ 8103 #if VMA_MEMORY_BUDGET
+ 8104 m_VulkanUsage[heapIndex] = 0;
+ 8105 m_VulkanBudget[heapIndex] = 0;
+ 8106 m_BlockBytesAtBudgetFetch[heapIndex] = 0;
+
+
+
+ 8110 #if VMA_MEMORY_BUDGET
+ 8111 m_OperationsSinceBudgetFetch = 0;
+
+
+
+ 8115 void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+
+ 8117 m_AllocationBytes[heapIndex] += allocationSize;
+ 8118 #if VMA_MEMORY_BUDGET
+ 8119 ++m_OperationsSinceBudgetFetch;
+
+
- 8123 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
- 8124 void ImportVulkanFunctions_Static();
-
-
-
-
- 8129 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
- 8130 void ImportVulkanFunctions_Dynamic();
-
+ 8123 void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+
+ 8125 VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
+ 8126 m_AllocationBytes[heapIndex] -= allocationSize;
+ 8127 #if VMA_MEMORY_BUDGET
+ 8128 ++m_OperationsSinceBudgetFetch;
+
+
+
- 8133 void ValidateVulkanFunctions();
-
- 8135 VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
-
- 8137 VkResult AllocateMemoryOfType(
-
- 8139 VkDeviceSize alignment,
- 8140 bool dedicatedAllocation,
- 8141 VkBuffer dedicatedBuffer,
- 8142 VkBufferUsageFlags dedicatedBufferUsage,
- 8143 VkImage dedicatedImage,
-
- 8145 uint32_t memTypeIndex,
- 8146 VmaSuballocationType suballocType,
- 8147 size_t allocationCount,
-
-
-
- 8151 VkResult AllocateDedicatedMemoryPage(
-
- 8153 VmaSuballocationType suballocType,
- 8154 uint32_t memTypeIndex,
- 8155 const VkMemoryAllocateInfo& allocInfo,
-
- 8157 bool isUserDataString,
-
-
-
-
- 8162 VkResult AllocateDedicatedMemory(
-
- 8164 VmaSuballocationType suballocType,
- 8165 uint32_t memTypeIndex,
-
-
- 8168 bool isUserDataString,
-
-
- 8171 VkBuffer dedicatedBuffer,
- 8172 VkBufferUsageFlags dedicatedBufferUsage,
- 8173 VkImage dedicatedImage,
- 8174 size_t allocationCount,
-
-
-
-
-
-
-
-
- 8183 uint32_t CalculateGpuDefragmentationMemoryTypeBits()
const;
-
- 8185 uint32_t CalculateGlobalMemoryTypeBits()
const;
-
- 8187 bool GetFlushOrInvalidateRange(
-
- 8189 VkDeviceSize offset, VkDeviceSize size,
- 8190 VkMappedMemoryRange& outRange)
const;
-
- 8192 #if VMA_MEMORY_BUDGET
- 8193 void UpdateVulkanBudget();
-
-
-
-
-
- 8200 static void* VmaMalloc(
VmaAllocator hAllocator,
size_t size,
size_t alignment)
-
- 8202 return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
-
-
- 8205 static void VmaFree(
VmaAllocator hAllocator,
void* ptr)
-
- 8207 VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
-
-
- 8210 template<
typename T>
-
-
- 8213 return (T*)VmaMalloc(hAllocator,
sizeof(T), VMA_ALIGN_OF(T));
-
-
- 8216 template<
typename T>
- 8217 static T* VmaAllocateArray(
VmaAllocator hAllocator,
size_t count)
-
- 8219 return (T*)VmaMalloc(hAllocator,
sizeof(T) * count, VMA_ALIGN_OF(T));
-
-
- 8222 template<
typename T>
- 8223 static void vma_delete(
VmaAllocator hAllocator, T* ptr)
-
-
-
-
- 8228 VmaFree(hAllocator, ptr);
-
-
-
- 8232 template<
typename T>
- 8233 static void vma_delete_array(
VmaAllocator hAllocator, T* ptr,
size_t count)
-
-
-
- 8237 for(
size_t i = count; i--; )
-
- 8239 VmaFree(hAllocator, ptr);
-
-
-
-
-
- 8246 #if VMA_STATS_STRING_ENABLED
+
+ 8134 struct VmaAllocator_T
+
+ 8136 VMA_CLASS_NO_COPY(VmaAllocator_T)
+
+
+ 8139 uint32_t m_VulkanApiVersion;
+ 8140 bool m_UseKhrDedicatedAllocation;
+ 8141 bool m_UseKhrBindMemory2;
+ 8142 bool m_UseExtMemoryBudget;
+ 8143 bool m_UseAmdDeviceCoherentMemory;
+ 8144 bool m_UseKhrBufferDeviceAddress;
+ 8145 bool m_UseExtMemoryPriority;
+
+ 8147 VkInstance m_hInstance;
+ 8148 bool m_AllocationCallbacksSpecified;
+ 8149 VkAllocationCallbacks m_AllocationCallbacks;
+
+ 8151 VmaAllocationObjectAllocator m_AllocationObjectAllocator;
+
+
+ 8154 uint32_t m_HeapSizeLimitMask;
+
+ 8156 VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
+ 8157 VkPhysicalDeviceMemoryProperties m_MemProps;
+
+
+ 8160 VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+
+ 8162 typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
+ 8163 DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
+ 8164 VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
+
+ 8166 VmaCurrentBudgetData m_Budget;
+ 8167 VMA_ATOMIC_UINT32 m_DeviceMemoryCount;
+
+
+
+
+
+ 8173 const VkAllocationCallbacks* GetAllocationCallbacks()
const
+
+ 8175 return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
+
+
+
+ 8179 return m_VulkanFunctions;
+
+
+ 8182 VkPhysicalDevice GetPhysicalDevice()
const {
return m_PhysicalDevice; }
+
+ 8184 VkDeviceSize GetBufferImageGranularity()
const
+
+
+ 8187 static_cast<VkDeviceSize
>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
+ 8188 m_PhysicalDeviceProperties.limits.bufferImageGranularity);
+
+
+ 8191 uint32_t GetMemoryHeapCount()
const {
return m_MemProps.memoryHeapCount; }
+ 8192 uint32_t GetMemoryTypeCount()
const {
return m_MemProps.memoryTypeCount; }
+
+ 8194 uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex)
const
+
+ 8196 VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
+ 8197 return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
+
+
+ 8200 bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex)
const
+
+ 8202 return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
+ 8203 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
+
+ 8206 VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex)
const
+
+ 8208 return IsMemoryTypeNonCoherent(memTypeIndex) ?
+ 8209 VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
+ 8210 (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
+
+
+ 8213 bool IsIntegratedGpu()
const
+
+ 8215 return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+
+
+ 8218 uint32_t GetGlobalMemoryTypeBits()
const {
return m_GlobalMemoryTypeBits; }
+
+ 8220 #if VMA_RECORDING_ENABLED
+ 8221 VmaRecorder* GetRecorder()
const {
return m_pRecorder; }
+
+
+ 8224 void GetBufferMemoryRequirements(
+
+ 8226 VkMemoryRequirements& memReq,
+ 8227 bool& requiresDedicatedAllocation,
+ 8228 bool& prefersDedicatedAllocation)
const;
+ 8229 void GetImageMemoryRequirements(
+
+ 8231 VkMemoryRequirements& memReq,
+ 8232 bool& requiresDedicatedAllocation,
+ 8233 bool& prefersDedicatedAllocation)
const;
+
+
+ 8236 VkResult AllocateMemory(
+ 8237 const VkMemoryRequirements& vkMemReq,
+ 8238 bool requiresDedicatedAllocation,
+ 8239 bool prefersDedicatedAllocation,
+ 8240 VkBuffer dedicatedBuffer,
+ 8241 VkBufferUsageFlags dedicatedBufferUsage,
+ 8242 VkImage dedicatedImage,
+
+ 8244 VmaSuballocationType suballocType,
+ 8245 size_t allocationCount,
+
- 8248 class VmaStringBuilder
-
-
- 8251 VmaStringBuilder(
VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
- 8252 size_t GetLength()
const {
return m_Data.size(); }
- 8253 const char* GetData()
const {
return m_Data.data(); }
+
+
+ 8250 size_t allocationCount,
+
+
+ 8253 void CalculateStats(
VmaStats* pStats);
- 8255 void Add(
char ch) { m_Data.push_back(ch); }
- 8256 void Add(
const char* pStr);
- 8257 void AddNewLine() { Add(
'\n'); }
- 8258 void AddNumber(uint32_t num);
- 8259 void AddNumber(uint64_t num);
- 8260 void AddPointer(
const void* ptr);
+
+ 8256 VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
+
+ 8258 #if VMA_STATS_STRING_ENABLED
+ 8259 void PrintDetailedMap(
class VmaJsonWriter& json);
+
-
- 8263 VmaVector< char, VmaStlAllocator<char> > m_Data;
-
-
- 8266 void VmaStringBuilder::Add(
const char* pStr)
-
- 8268 const size_t strLen = strlen(pStr);
-
-
- 8271 const size_t oldCount = m_Data.size();
- 8272 m_Data.resize(oldCount + strLen);
- 8273 memcpy(m_Data.data() + oldCount, pStr, strLen);
-
-
-
- 8277 void VmaStringBuilder::AddNumber(uint32_t num)
-
-
-
-
-
-
- 8284 *--p =
'0' + (num % 10);
-
-
-
-
-
+ 8262 VkResult DefragmentationBegin(
+
+
+
+ 8266 VkResult DefragmentationEnd(
+
+
+ 8269 VkResult DefragmentationPassBegin(
+
+
+ 8272 VkResult DefragmentationPassEnd(
+
+
+
+
+
+
+ 8279 void DestroyPool(
VmaPool pool);
+
+
+ 8282 void SetCurrentFrameIndex(uint32_t frameIndex);
+ 8283 uint32_t GetCurrentFrameIndex()
const {
return m_CurrentFrameIndex.load(); }
+
+ 8285 void MakePoolAllocationsLost(
+
+ 8287 size_t* pLostAllocationCount);
+ 8288 VkResult CheckPoolCorruption(
VmaPool hPool);
+ 8289 VkResult CheckCorruption(uint32_t memoryTypeBits);
- 8291 void VmaStringBuilder::AddNumber(uint64_t num)
-
-
-
-
-
-
- 8298 *--p =
'0' + (num % 10);
-
-
-
-
-
-
- 8305 void VmaStringBuilder::AddPointer(
const void* ptr)
-
-
- 8308 VmaPtrToStr(buf,
sizeof(buf), ptr);
-
-
-
-
-
-
-
- 8317 #if VMA_STATS_STRING_ENABLED
-
-
-
- 8321 VMA_CLASS_NO_COPY(VmaJsonWriter)
-
- 8323 VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
-
-
- 8326 void BeginObject(
bool singleLine =
false);
-
-
- 8329 void BeginArray(
bool singleLine =
false);
-
-
- 8332 void WriteString(
const char* pStr);
- 8333 void BeginString(
const char* pStr = VMA_NULL);
- 8334 void ContinueString(
const char* pStr);
- 8335 void ContinueString(uint32_t n);
- 8336 void ContinueString(uint64_t n);
- 8337 void ContinueString_Pointer(
const void* ptr);
- 8338 void EndString(
const char* pStr = VMA_NULL);
-
- 8340 void WriteNumber(uint32_t n);
- 8341 void WriteNumber(uint64_t n);
- 8342 void WriteBool(
bool b);
-
+
+
+
+ 8294 VkResult AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
+
+ 8296 void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
+
+ 8298 VkResult BindVulkanBuffer(
+ 8299 VkDeviceMemory memory,
+ 8300 VkDeviceSize memoryOffset,
+
+
+
+ 8304 VkResult BindVulkanImage(
+ 8305 VkDeviceMemory memory,
+ 8306 VkDeviceSize memoryOffset,
+
+
+
+
+
+
+ 8313 VkResult BindBufferMemory(
+
+ 8315 VkDeviceSize allocationLocalOffset,
+
+
+ 8318 VkResult BindImageMemory(
+
+ 8320 VkDeviceSize allocationLocalOffset,
+
+
+
+ 8324 VkResult FlushOrInvalidateAllocation(
+
+ 8326 VkDeviceSize offset, VkDeviceSize size,
+ 8327 VMA_CACHE_OPERATION op);
+ 8328 VkResult FlushOrInvalidateAllocations(
+ 8329 uint32_t allocationCount,
+
+ 8331 const VkDeviceSize* offsets,
const VkDeviceSize* sizes,
+ 8332 VMA_CACHE_OPERATION op);
+
+ 8334 void FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern);
+
+
+
+
+
+ 8340 uint32_t GetGpuDefragmentationMemoryTypeBits();
+
+
+ 8343 VkDeviceSize m_PreferredLargeHeapBlockSize;
-
- 8346 static const char*
const INDENT;
-
- 8348 enum COLLECTION_TYPE
-
- 8350 COLLECTION_TYPE_OBJECT,
- 8351 COLLECTION_TYPE_ARRAY,
-
-
-
- 8355 COLLECTION_TYPE type;
- 8356 uint32_t valueCount;
- 8357 bool singleLineMode;
-
+ 8345 VkPhysicalDevice m_PhysicalDevice;
+ 8346 VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
+ 8347 VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits;
+
+ 8349 VMA_RW_MUTEX m_PoolsMutex;
+ 8350 typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
+
+
+ 8353 uint32_t m_NextPoolId;
+
+
+
+
+ 8358 uint32_t m_GlobalMemoryTypeBits;
- 8360 VmaStringBuilder& m_SB;
- 8361 VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
- 8362 bool m_InsideString;
+ 8360 #if VMA_RECORDING_ENABLED
+ 8361 VmaRecorder* m_pRecorder;
+
- 8364 void BeginValue(
bool isString);
- 8365 void WriteIndent(
bool oneLess =
false);
-
-
- 8368 const char*
const VmaJsonWriter::INDENT =
" ";
+
+
+ 8366 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+ 8367 void ImportVulkanFunctions_Static();
+
- 8370 VmaJsonWriter::VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
-
- 8372 m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
- 8373 m_InsideString(false)
-
-
-
- 8377 VmaJsonWriter::~VmaJsonWriter()
-
- 8379 VMA_ASSERT(!m_InsideString);
- 8380 VMA_ASSERT(m_Stack.empty());
-
-
- 8383 void VmaJsonWriter::BeginObject(
bool singleLine)
-
- 8385 VMA_ASSERT(!m_InsideString);
-
-
-
-
-
- 8391 item.type = COLLECTION_TYPE_OBJECT;
- 8392 item.valueCount = 0;
- 8393 item.singleLineMode = singleLine;
- 8394 m_Stack.push_back(item);
-
-
- 8397 void VmaJsonWriter::EndObject()
-
- 8399 VMA_ASSERT(!m_InsideString);
-
-
-
+
+
+ 8372 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+ 8373 void ImportVulkanFunctions_Dynamic();
+
+
+ 8376 void ValidateVulkanFunctions();
+
+ 8378 VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+
+ 8380 VkResult AllocateMemoryOfType(
+
+ 8382 VkDeviceSize alignment,
+ 8383 bool dedicatedAllocation,
+ 8384 VkBuffer dedicatedBuffer,
+ 8385 VkBufferUsageFlags dedicatedBufferUsage,
+ 8386 VkImage dedicatedImage,
+
+ 8388 uint32_t memTypeIndex,
+ 8389 VmaSuballocationType suballocType,
+ 8390 size_t allocationCount,
+
+
+
+ 8394 VkResult AllocateDedicatedMemoryPage(
+
+ 8396 VmaSuballocationType suballocType,
+ 8397 uint32_t memTypeIndex,
+ 8398 const VkMemoryAllocateInfo& allocInfo,
+
+ 8400 bool isUserDataString,
+
+
- 8404 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
-
-
-
- 8408 void VmaJsonWriter::BeginArray(
bool singleLine)
-
- 8410 VMA_ASSERT(!m_InsideString);
-
-
-
-
-
- 8416 item.type = COLLECTION_TYPE_ARRAY;
- 8417 item.valueCount = 0;
- 8418 item.singleLineMode = singleLine;
- 8419 m_Stack.push_back(item);
-
+
+ 8405 VkResult AllocateDedicatedMemory(
+
+ 8407 VmaSuballocationType suballocType,
+ 8408 uint32_t memTypeIndex,
+
+
+ 8411 bool isUserDataString,
+
+
+ 8414 VkBuffer dedicatedBuffer,
+ 8415 VkBufferUsageFlags dedicatedBufferUsage,
+ 8416 VkImage dedicatedImage,
+ 8417 size_t allocationCount,
+
+
+
- 8422 void VmaJsonWriter::EndArray()
-
- 8424 VMA_ASSERT(!m_InsideString);
-
-
-
-
- 8429 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
-
-
-
- 8433 void VmaJsonWriter::WriteString(
const char* pStr)
-
-
-
-
-
- 8439 void VmaJsonWriter::BeginString(
const char* pStr)
-
- 8441 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8426 uint32_t CalculateGpuDefragmentationMemoryTypeBits()
const;
+
+ 8428 uint32_t CalculateGlobalMemoryTypeBits()
const;
+
+ 8430 bool GetFlushOrInvalidateRange(
+
+ 8432 VkDeviceSize offset, VkDeviceSize size,
+ 8433 VkMappedMemoryRange& outRange)
const;
+
+ 8435 #if VMA_MEMORY_BUDGET
+ 8436 void UpdateVulkanBudget();
+
+
+
+
-
-
- 8445 m_InsideString =
true;
- 8446 if(pStr != VMA_NULL && pStr[0] !=
'\0')
-
- 8448 ContinueString(pStr);
-
-
-
- 8452 void VmaJsonWriter::ContinueString(
const char* pStr)
-
- 8454 VMA_ASSERT(m_InsideString);
-
- 8456 const size_t strLen = strlen(pStr);
- 8457 for(
size_t i = 0; i < strLen; ++i)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 8490 VMA_ASSERT(0 &&
"Character not currently supported.");
-
-
-
-
-
- 8496 void VmaJsonWriter::ContinueString(uint32_t n)
-
- 8498 VMA_ASSERT(m_InsideString);
-
-
-
- 8502 void VmaJsonWriter::ContinueString(uint64_t n)
-
- 8504 VMA_ASSERT(m_InsideString);
-
-
-
- 8508 void VmaJsonWriter::ContinueString_Pointer(
const void* ptr)
-
- 8510 VMA_ASSERT(m_InsideString);
- 8511 m_SB.AddPointer(ptr);
-
-
- 8514 void VmaJsonWriter::EndString(
const char* pStr)
-
- 8516 VMA_ASSERT(m_InsideString);
- 8517 if(pStr != VMA_NULL && pStr[0] !=
'\0')
-
- 8519 ContinueString(pStr);
-
-
- 8522 m_InsideString =
false;
-
-
- 8525 void VmaJsonWriter::WriteNumber(uint32_t n)
-
- 8527 VMA_ASSERT(!m_InsideString);
-
-
-
-
- 8532 void VmaJsonWriter::WriteNumber(uint64_t n)
-
- 8534 VMA_ASSERT(!m_InsideString);
-
-
-
-
- 8539 void VmaJsonWriter::WriteBool(
bool b)
-
- 8541 VMA_ASSERT(!m_InsideString);
-
- 8543 m_SB.Add(b ?
"true" :
"false");
-
-
- 8546 void VmaJsonWriter::WriteNull()
-
- 8548 VMA_ASSERT(!m_InsideString);
-
-
-
-
- 8553 void VmaJsonWriter::BeginValue(
bool isString)
-
- 8555 if(!m_Stack.empty())
-
- 8557 StackItem& currItem = m_Stack.back();
- 8558 if(currItem.type == COLLECTION_TYPE_OBJECT &&
- 8559 currItem.valueCount % 2 == 0)
-
- 8561 VMA_ASSERT(isString);
-
-
- 8564 if(currItem.type == COLLECTION_TYPE_OBJECT &&
- 8565 currItem.valueCount % 2 != 0)
-
-
-
- 8569 else if(currItem.valueCount > 0)
-
-
-
-
-
-
-
-
- 8578 ++currItem.valueCount;
-
-
-
- 8582 void VmaJsonWriter::WriteIndent(
bool oneLess)
-
- 8584 if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
-
-
+ 8443 static void* VmaMalloc(
VmaAllocator hAllocator,
size_t size,
size_t alignment)
+
+ 8445 return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
+
+
+ 8448 static void VmaFree(
VmaAllocator hAllocator,
void* ptr)
+
+ 8450 VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
+
+
+ 8453 template<
typename T>
+
+
+ 8456 return (T*)VmaMalloc(hAllocator,
sizeof(T), VMA_ALIGN_OF(T));
+
+
+ 8459 template<
typename T>
+ 8460 static T* VmaAllocateArray(
VmaAllocator hAllocator,
size_t count)
+
+ 8462 return (T*)VmaMalloc(hAllocator,
sizeof(T) * count, VMA_ALIGN_OF(T));
+
+
+ 8465 template<
typename T>
+ 8466 static void vma_delete(
VmaAllocator hAllocator, T* ptr)
+
+
+
+
+ 8471 VmaFree(hAllocator, ptr);
+
+
+
+ 8475 template<
typename T>
+ 8476 static void vma_delete_array(
VmaAllocator hAllocator, T* ptr,
size_t count)
+
+
+
+ 8480 for(
size_t i = count; i--; )
+
+ 8482 VmaFree(hAllocator, ptr);
+
+
+
+
+
+ 8489 #if VMA_STATS_STRING_ENABLED
+
+ 8491 class VmaStringBuilder
+
+
+ 8494 VmaStringBuilder(
VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
+ 8495 size_t GetLength()
const {
return m_Data.size(); }
+ 8496 const char* GetData()
const {
return m_Data.data(); }
+
+ 8498 void Add(
char ch) { m_Data.push_back(ch); }
+ 8499 void Add(
const char* pStr);
+ 8500 void AddNewLine() { Add(
'\n'); }
+ 8501 void AddNumber(uint32_t num);
+ 8502 void AddNumber(uint64_t num);
+ 8503 void AddPointer(
const void* ptr);
+
+
+ 8506 VmaVector< char, VmaStlAllocator<char> > m_Data;
+
+
+ 8509 void VmaStringBuilder::Add(
const char* pStr)
+
+ 8511 const size_t strLen = strlen(pStr);
+
+
+ 8514 const size_t oldCount = m_Data.size();
+ 8515 m_Data.resize(oldCount + strLen);
+ 8516 memcpy(m_Data.data() + oldCount, pStr, strLen);
+
+
+
+ 8520 void VmaStringBuilder::AddNumber(uint32_t num)
+
+
+
+
+
+
+ 8527 *--p =
'0' + (num % 10);
+
+
+
+
+
+
+ 8534 void VmaStringBuilder::AddNumber(uint64_t num)
+
+
+
+
+
+
+ 8541 *--p =
'0' + (num % 10);
+
+
+
+
+
+
+ 8548 void VmaStringBuilder::AddPointer(
const void* ptr)
+
+
+ 8551 VmaPtrToStr(buf,
sizeof(buf), ptr);
+
+
+
+
+
+
+
+ 8560 #if VMA_STATS_STRING_ENABLED
+
+
+
+ 8564 VMA_CLASS_NO_COPY(VmaJsonWriter)
+
+ 8566 VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
+
+
+ 8569 void BeginObject(
bool singleLine =
false);
+
+
+ 8572 void BeginArray(
bool singleLine =
false);
+
+
+ 8575 void WriteString(
const char* pStr);
+ 8576 void BeginString(
const char* pStr = VMA_NULL);
+ 8577 void ContinueString(
const char* pStr);
+ 8578 void ContinueString(uint32_t n);
+ 8579 void ContinueString(uint64_t n);
+ 8580 void ContinueString_Pointer(
const void* ptr);
+ 8581 void EndString(
const char* pStr = VMA_NULL);
+
+ 8583 void WriteNumber(uint32_t n);
+ 8584 void WriteNumber(uint64_t n);
+ 8585 void WriteBool(
bool b);
+
- 8588 size_t count = m_Stack.size();
- 8589 if(count > 0 && oneLess)
-
-
-
- 8593 for(
size_t i = 0; i < count; ++i)
-
-
-
-
-
-
-
-
-
- 8604 void VmaAllocation_T::SetUserData(
VmaAllocator hAllocator,
void* pUserData)
-
- 8606 if(IsUserDataString())
-
- 8608 VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
-
- 8610 FreeUserDataString(hAllocator);
-
- 8612 if(pUserData != VMA_NULL)
-
- 8614 m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (
const char*)pUserData);
-
-
-
-
- 8619 m_pUserData = pUserData;
-
-
-
- 8623 void VmaAllocation_T::ChangeBlockAllocation(
-
- 8625 VmaDeviceMemoryBlock* block,
- 8626 VkDeviceSize offset)
+
+ 8589 static const char*
const INDENT;
+
+ 8591 enum COLLECTION_TYPE
+
+ 8593 COLLECTION_TYPE_OBJECT,
+ 8594 COLLECTION_TYPE_ARRAY,
+
+
+
+ 8598 COLLECTION_TYPE type;
+ 8599 uint32_t valueCount;
+ 8600 bool singleLineMode;
+
+
+ 8603 VmaStringBuilder& m_SB;
+ 8604 VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
+ 8605 bool m_InsideString;
+
+ 8607 void BeginValue(
bool isString);
+ 8608 void WriteIndent(
bool oneLess =
false);
+
+
+ 8611 const char*
const VmaJsonWriter::INDENT =
" ";
+
+ 8613 VmaJsonWriter::VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
+
+ 8615 m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
+ 8616 m_InsideString(false)
+
+
+
+ 8620 VmaJsonWriter::~VmaJsonWriter()
+
+ 8622 VMA_ASSERT(!m_InsideString);
+ 8623 VMA_ASSERT(m_Stack.empty());
+
+
+ 8626 void VmaJsonWriter::BeginObject(
bool singleLine)
- 8628 VMA_ASSERT(block != VMA_NULL);
- 8629 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-
-
- 8632 if(block != m_BlockAllocation.m_Block)
-
- 8634 uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
- 8635 if(IsPersistentMap())
-
- 8637 m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
- 8638 block->Map(hAllocator, mapRefCount, VMA_NULL);
-
-
- 8641 m_BlockAllocation.m_Block = block;
- 8642 m_BlockAllocation.m_Offset = offset;
-
-
- 8645 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
-
- 8647 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
- 8648 m_BlockAllocation.m_Offset = newOffset;
+ 8628 VMA_ASSERT(!m_InsideString);
+
+
+
+
+
+ 8634 item.type = COLLECTION_TYPE_OBJECT;
+ 8635 item.valueCount = 0;
+ 8636 item.singleLineMode = singleLine;
+ 8637 m_Stack.push_back(item);
+
+
+ 8640 void VmaJsonWriter::EndObject()
+
+ 8642 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8647 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
+
- 8651 VkDeviceSize VmaAllocation_T::GetOffset()
const
-
-
-
- 8655 case ALLOCATION_TYPE_BLOCK:
- 8656 return m_BlockAllocation.m_Offset;
- 8657 case ALLOCATION_TYPE_DEDICATED:
-
-
-
-
-
+ 8651 void VmaJsonWriter::BeginArray(
bool singleLine)
+
+ 8653 VMA_ASSERT(!m_InsideString);
+
+
+
+
+
+ 8659 item.type = COLLECTION_TYPE_ARRAY;
+ 8660 item.valueCount = 0;
+ 8661 item.singleLineMode = singleLine;
+ 8662 m_Stack.push_back(item);
- 8665 VkDeviceMemory VmaAllocation_T::GetMemory()
const
-
-
-
- 8669 case ALLOCATION_TYPE_BLOCK:
- 8670 return m_BlockAllocation.m_Block->GetDeviceMemory();
- 8671 case ALLOCATION_TYPE_DEDICATED:
- 8672 return m_DedicatedAllocation.m_hMemory;
-
-
- 8675 return VK_NULL_HANDLE;
-
-
-
- 8679 void* VmaAllocation_T::GetMappedData()
const
-
-
-
- 8683 case ALLOCATION_TYPE_BLOCK:
-
-
- 8686 void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
- 8687 VMA_ASSERT(pBlockData != VMA_NULL);
- 8688 return (
char*)pBlockData + m_BlockAllocation.m_Offset;
-
-
-
-
-
-
- 8695 case ALLOCATION_TYPE_DEDICATED:
- 8696 VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
- 8697 return m_DedicatedAllocation.m_pMappedData;
-
-
-
-
-
-
- 8704 bool VmaAllocation_T::CanBecomeLost()
const
-
-
-
- 8708 case ALLOCATION_TYPE_BLOCK:
- 8709 return m_BlockAllocation.m_CanBecomeLost;
- 8710 case ALLOCATION_TYPE_DEDICATED:
-
-
-
-
-
-
-
- 8718 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
- 8720 VMA_ASSERT(CanBecomeLost());
-
-
-
-
-
- 8726 uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
-
-
- 8729 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
-
-
-
- 8734 else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
-
-
-
-
-
- 8740 if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
-
-
-
-
-
-
-
-
-
- 8750 #if VMA_STATS_STRING_ENABLED
-
-
- 8753 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
-
-
-
-
-
-
-
-
- 8762 void VmaAllocation_T::PrintParameters(
class VmaJsonWriter& json)
const
-
- 8764 json.WriteString(
"Type");
- 8765 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
-
- 8767 json.WriteString(
"Size");
- 8768 json.WriteNumber(m_Size);
-
- 8770 if(m_pUserData != VMA_NULL)
-
- 8772 json.WriteString(
"UserData");
- 8773 if(IsUserDataString())
-
- 8775 json.WriteString((
const char*)m_pUserData);
-
-
-
-
- 8780 json.ContinueString_Pointer(m_pUserData);
-
-
-
-
- 8785 json.WriteString(
"CreationFrameIndex");
- 8786 json.WriteNumber(m_CreationFrameIndex);
-
- 8788 json.WriteString(
"LastUseFrameIndex");
- 8789 json.WriteNumber(GetLastUseFrameIndex());
-
- 8791 if(m_BufferImageUsage != 0)
-
- 8793 json.WriteString(
"Usage");
- 8794 json.WriteNumber(m_BufferImageUsage);
-
-
-
-
-
- 8800 void VmaAllocation_T::FreeUserDataString(
VmaAllocator hAllocator)
-
- 8802 VMA_ASSERT(IsUserDataString());
- 8803 VmaFreeString(hAllocator->GetAllocationCallbacks(), (
char*)m_pUserData);
- 8804 m_pUserData = VMA_NULL;
-
+ 8665 void VmaJsonWriter::EndArray()
+
+ 8667 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8672 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
+
+
+
+ 8676 void VmaJsonWriter::WriteString(
const char* pStr)
+
+
+
+
+
+ 8682 void VmaJsonWriter::BeginString(
const char* pStr)
+
+ 8684 VMA_ASSERT(!m_InsideString);
+
+
+
+ 8688 m_InsideString =
true;
+ 8689 if(pStr != VMA_NULL && pStr[0] !=
'\0')
+
+ 8691 ContinueString(pStr);
+
+
+
+ 8695 void VmaJsonWriter::ContinueString(
const char* pStr)
+
+ 8697 VMA_ASSERT(m_InsideString);
+
+ 8699 const size_t strLen = strlen(pStr);
+ 8700 for(
size_t i = 0; i < strLen; ++i)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 8733 VMA_ASSERT(0 &&
"Character not currently supported.");
+
+
+
+
+
+ 8739 void VmaJsonWriter::ContinueString(uint32_t n)
+
+ 8741 VMA_ASSERT(m_InsideString);
+
+
+
+ 8745 void VmaJsonWriter::ContinueString(uint64_t n)
+
+ 8747 VMA_ASSERT(m_InsideString);
+
+
+
+ 8751 void VmaJsonWriter::ContinueString_Pointer(
const void* ptr)
+
+ 8753 VMA_ASSERT(m_InsideString);
+ 8754 m_SB.AddPointer(ptr);
+
+
+ 8757 void VmaJsonWriter::EndString(
const char* pStr)
+
+ 8759 VMA_ASSERT(m_InsideString);
+ 8760 if(pStr != VMA_NULL && pStr[0] !=
'\0')
+
+ 8762 ContinueString(pStr);
+
+
+ 8765 m_InsideString =
false;
+
+
+ 8768 void VmaJsonWriter::WriteNumber(uint32_t n)
+
+ 8770 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8775 void VmaJsonWriter::WriteNumber(uint64_t n)
+
+ 8777 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8782 void VmaJsonWriter::WriteBool(
bool b)
+
+ 8784 VMA_ASSERT(!m_InsideString);
+
+ 8786 m_SB.Add(b ?
"true" :
"false");
+
+
+ 8789 void VmaJsonWriter::WriteNull()
+
+ 8791 VMA_ASSERT(!m_InsideString);
+
+
+
+
+ 8796 void VmaJsonWriter::BeginValue(
bool isString)
+
+ 8798 if(!m_Stack.empty())
+
+ 8800 StackItem& currItem = m_Stack.back();
+ 8801 if(currItem.type == COLLECTION_TYPE_OBJECT &&
+ 8802 currItem.valueCount % 2 == 0)
+
+ 8804 VMA_ASSERT(isString);
+
- 8807 void VmaAllocation_T::BlockAllocMap()
-
- 8809 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
-
- 8811 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
-
-
-
-
-
- 8817 VMA_ASSERT(0 &&
"Allocation mapped too many times simultaneously.");
-
-
-
- 8821 void VmaAllocation_T::BlockAllocUnmap()
-
- 8823 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+ 8807 if(currItem.type == COLLECTION_TYPE_OBJECT &&
+ 8808 currItem.valueCount % 2 != 0)
+
+
+
+ 8812 else if(currItem.valueCount > 0)
+
+
+
+
+
+
+
+
+ 8821 ++currItem.valueCount;
+
+
- 8825 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
-
-
-
-
-
- 8831 VMA_ASSERT(0 &&
"Unmapping allocation not previously mapped.");
-
-
-
- 8835 VkResult VmaAllocation_T::DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData)
-
- 8837 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-
-
-
- 8841 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
-
- 8843 VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
- 8844 *ppData = m_DedicatedAllocation.m_pMappedData;
-
-
-
-
-
- 8850 VMA_ASSERT(0 &&
"Dedicated allocation mapped too many times simultaneously.");
- 8851 return VK_ERROR_MEMORY_MAP_FAILED;
-
-
-
-
- 8856 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
- 8857 hAllocator->m_hDevice,
- 8858 m_DedicatedAllocation.m_hMemory,
-
-
-
-
- 8863 if(result == VK_SUCCESS)
-
- 8865 m_DedicatedAllocation.m_pMappedData = *ppData;
-
-
-
-
-
-
- 8872 void VmaAllocation_T::DedicatedAllocUnmap(
VmaAllocator hAllocator)
-
- 8874 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-
- 8876 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
-
-
-
-
- 8881 m_DedicatedAllocation.m_pMappedData = VMA_NULL;
- 8882 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
- 8883 hAllocator->m_hDevice,
- 8884 m_DedicatedAllocation.m_hMemory);
-
-
-
-
- 8889 VMA_ASSERT(0 &&
"Unmapping dedicated allocation not previously mapped.");
-
-
-
- 8893 #if VMA_STATS_STRING_ENABLED
-
- 8895 static void VmaPrintStatInfo(VmaJsonWriter& json,
const VmaStatInfo& stat)
-
-
-
- 8899 json.WriteString(
"Blocks");
-
-
- 8902 json.WriteString(
"Allocations");
-
-
- 8905 json.WriteString(
"UnusedRanges");
-
+ 8825 void VmaJsonWriter::WriteIndent(
bool oneLess)
+
+ 8827 if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
+
+
+
+ 8831 size_t count = m_Stack.size();
+ 8832 if(count > 0 && oneLess)
+
+
+
+ 8836 for(
size_t i = 0; i < count; ++i)
+
+
+
+
+
+
+
+
+
+ 8847 void VmaAllocation_T::SetUserData(
VmaAllocator hAllocator,
void* pUserData)
+
+ 8849 if(IsUserDataString())
+
+ 8851 VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
+
+ 8853 FreeUserDataString(hAllocator);
+
+ 8855 if(pUserData != VMA_NULL)
+
+ 8857 m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (
const char*)pUserData);
+
+
+
+
+ 8862 m_pUserData = pUserData;
+
+
+
+ 8866 void VmaAllocation_T::ChangeBlockAllocation(
+
+ 8868 VmaDeviceMemoryBlock* block,
+ 8869 VkDeviceSize offset)
+
+ 8871 VMA_ASSERT(block != VMA_NULL);
+ 8872 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+
+
+ 8875 if(block != m_BlockAllocation.m_Block)
+
+ 8877 uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
+ 8878 if(IsPersistentMap())
+
+ 8880 m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
+ 8881 block->Map(hAllocator, mapRefCount, VMA_NULL);
+
+
+ 8884 m_BlockAllocation.m_Block = block;
+ 8885 m_BlockAllocation.m_Offset = offset;
+
+
+ 8888 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
+
+ 8890 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+ 8891 m_BlockAllocation.m_Offset = newOffset;
+
+
+ 8894 VkDeviceSize VmaAllocation_T::GetOffset()
const
+
+
+
+ 8898 case ALLOCATION_TYPE_BLOCK:
+ 8899 return m_BlockAllocation.m_Offset;
+ 8900 case ALLOCATION_TYPE_DEDICATED:
+
+
+
+
+
+
- 8908 json.WriteString(
"UsedBytes");
-
-
- 8911 json.WriteString(
"UnusedBytes");
-
-
-
-
- 8916 json.WriteString(
"AllocationSize");
- 8917 json.BeginObject(
true);
- 8918 json.WriteString(
"Min");
-
- 8920 json.WriteString(
"Avg");
-
- 8922 json.WriteString(
"Max");
-
-
-
-
-
-
- 8929 json.WriteString(
"UnusedRangeSize");
- 8930 json.BeginObject(
true);
- 8931 json.WriteString(
"Min");
-
- 8933 json.WriteString(
"Avg");
-
- 8935 json.WriteString(
"Max");
-
-
-
-
-
-
-
-
-
- 8945 struct VmaSuballocationItemSizeLess
-
-
- 8948 const VmaSuballocationList::iterator lhs,
- 8949 const VmaSuballocationList::iterator rhs)
const
-
- 8951 return lhs->size < rhs->size;
-
-
- 8954 const VmaSuballocationList::iterator lhs,
- 8955 VkDeviceSize rhsSize)
const
-
- 8957 return lhs->size < rhsSize;
+ 8908 VkDeviceMemory VmaAllocation_T::GetMemory()
const
+
+
+
+ 8912 case ALLOCATION_TYPE_BLOCK:
+ 8913 return m_BlockAllocation.m_Block->GetDeviceMemory();
+ 8914 case ALLOCATION_TYPE_DEDICATED:
+ 8915 return m_DedicatedAllocation.m_hMemory;
+
+
+ 8918 return VK_NULL_HANDLE;
+
+
+
+ 8922 void* VmaAllocation_T::GetMappedData()
const
+
+
+
+ 8926 case ALLOCATION_TYPE_BLOCK:
+
+
+ 8929 void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
+ 8930 VMA_ASSERT(pBlockData != VMA_NULL);
+ 8931 return (
char*)pBlockData + m_BlockAllocation.m_Offset;
+
+
+
+
+
+
+ 8938 case ALLOCATION_TYPE_DEDICATED:
+ 8939 VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
+ 8940 return m_DedicatedAllocation.m_pMappedData;
+
+
+
+
+
+
+ 8947 bool VmaAllocation_T::CanBecomeLost()
const
+
+
+
+ 8951 case ALLOCATION_TYPE_BLOCK:
+ 8952 return m_BlockAllocation.m_CanBecomeLost;
+ 8953 case ALLOCATION_TYPE_DEDICATED:
+
+
+
+
-
+
-
-
+ 8961 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
+ 8963 VMA_ASSERT(CanBecomeLost());
- 8965 VmaBlockMetadata::VmaBlockMetadata(
VmaAllocator hAllocator) :
-
- 8967 m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
-
-
-
- 8971 #if VMA_STATS_STRING_ENABLED
-
- 8973 void VmaBlockMetadata::PrintDetailedMap_Begin(
class VmaJsonWriter& json,
- 8974 VkDeviceSize unusedBytes,
- 8975 size_t allocationCount,
- 8976 size_t unusedRangeCount)
const
-
-
-
- 8980 json.WriteString(
"TotalBytes");
- 8981 json.WriteNumber(GetSize());
-
- 8983 json.WriteString(
"UnusedBytes");
- 8984 json.WriteNumber(unusedBytes);
-
- 8986 json.WriteString(
"Allocations");
- 8987 json.WriteNumber((uint64_t)allocationCount);
-
- 8989 json.WriteString(
"UnusedRanges");
- 8990 json.WriteNumber((uint64_t)unusedRangeCount);
-
- 8992 json.WriteString(
"Suballocations");
-
-
-
- 8996 void VmaBlockMetadata::PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
- 8997 VkDeviceSize offset,
-
-
- 9000 json.BeginObject(
true);
-
- 9002 json.WriteString(
"Offset");
- 9003 json.WriteNumber(offset);
+
+
+
+
+ 8969 uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
+
+
+ 8972 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
+
+
+
+ 8977 else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
+
+
+
+
+
+ 8983 if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
+
+
+
+
+
+
+
+
+
+ 8993 #if VMA_STATS_STRING_ENABLED
+
+
+ 8996 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
+
+
+
+
+
+
+
- 9005 hAllocation->PrintParameters(json);
-
-
-
+ 9005 void VmaAllocation_T::PrintParameters(
class VmaJsonWriter& json)
const
+
+ 9007 json.WriteString(
"Type");
+ 9008 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
- 9010 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
- 9011 VkDeviceSize offset,
- 9012 VkDeviceSize size)
const
-
- 9014 json.BeginObject(
true);
-
- 9016 json.WriteString(
"Offset");
- 9017 json.WriteNumber(offset);
-
- 9019 json.WriteString(
"Type");
- 9020 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
-
- 9022 json.WriteString(
"Size");
- 9023 json.WriteNumber(size);
-
-
-
+ 9010 json.WriteString(
"Size");
+ 9011 json.WriteNumber(m_Size);
+
+ 9013 if(m_pUserData != VMA_NULL)
+
+ 9015 json.WriteString(
"UserData");
+ 9016 if(IsUserDataString())
+
+ 9018 json.WriteString((
const char*)m_pUserData);
+
+
+
+
+ 9023 json.ContinueString_Pointer(m_pUserData);
+
+
+
- 9028 void VmaBlockMetadata::PrintDetailedMap_End(
class VmaJsonWriter& json)
const
-
-
-
-
+ 9028 json.WriteString(
"CreationFrameIndex");
+ 9029 json.WriteNumber(m_CreationFrameIndex);
+
+ 9031 json.WriteString(
"LastUseFrameIndex");
+ 9032 json.WriteNumber(GetLastUseFrameIndex());
-
-
-
-
- 9039 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(
VmaAllocator hAllocator) :
- 9040 VmaBlockMetadata(hAllocator),
-
-
- 9043 m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
- 9044 m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
-
-
-
- 9048 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
-
-
-
- 9052 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
-
- 9054 VmaBlockMetadata::Init(size);
-
-
- 9057 m_SumFreeSize = size;
-
- 9059 VmaSuballocation suballoc = {};
- 9060 suballoc.offset = 0;
- 9061 suballoc.size = size;
- 9062 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
- 9063 suballoc.hAllocation = VK_NULL_HANDLE;
-
- 9065 VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
- 9066 m_Suballocations.push_back(suballoc);
- 9067 VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
-
- 9069 m_FreeSuballocationsBySize.push_back(suballocItem);
-
-
- 9072 bool VmaBlockMetadata_Generic::Validate()
const
-
- 9074 VMA_VALIDATE(!m_Suballocations.empty());
-
-
- 9077 VkDeviceSize calculatedOffset = 0;
-
- 9079 uint32_t calculatedFreeCount = 0;
-
- 9081 VkDeviceSize calculatedSumFreeSize = 0;
-
-
- 9084 size_t freeSuballocationsToRegister = 0;
-
- 9086 bool prevFree =
false;
-
- 9088 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- 9089 suballocItem != m_Suballocations.cend();
-
-
- 9092 const VmaSuballocation& subAlloc = *suballocItem;
-
-
- 9095 VMA_VALIDATE(subAlloc.offset == calculatedOffset);
-
- 9097 const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9099 VMA_VALIDATE(!prevFree || !currFree);
-
- 9101 VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
-
-
-
- 9105 calculatedSumFreeSize += subAlloc.size;
- 9106 ++calculatedFreeCount;
- 9107 if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
- 9109 ++freeSuballocationsToRegister;
-
-
-
- 9113 VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
-
-
-
- 9117 VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
- 9118 VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
-
-
- 9121 VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
-
-
- 9124 calculatedOffset += subAlloc.size;
- 9125 prevFree = currFree;
-
-
-
-
- 9130 VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
-
- 9132 VkDeviceSize lastSize = 0;
- 9133 for(
size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
-
- 9135 VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
-
-
- 9138 VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9140 VMA_VALIDATE(suballocItem->size >= lastSize);
+ 9034 if(m_BufferImageUsage != 0)
+
+ 9036 json.WriteString(
"Usage");
+ 9037 json.WriteNumber(m_BufferImageUsage);
+
+
+
+
+
+ 9043 void VmaAllocation_T::FreeUserDataString(
VmaAllocator hAllocator)
+
+ 9045 VMA_ASSERT(IsUserDataString());
+ 9046 VmaFreeString(hAllocator->GetAllocationCallbacks(), (
char*)m_pUserData);
+ 9047 m_pUserData = VMA_NULL;
+
+
+ 9050 void VmaAllocation_T::BlockAllocMap()
+
+ 9052 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
+ 9054 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
+
+
+
+
+
+ 9060 VMA_ASSERT(0 &&
"Allocation mapped too many times simultaneously.");
+
+
+
+ 9064 void VmaAllocation_T::BlockAllocUnmap()
+
+ 9066 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
+ 9068 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
+
+
+
+
+
+ 9074 VMA_ASSERT(0 &&
"Unmapping allocation not previously mapped.");
+
+
+
+ 9078 VkResult VmaAllocation_T::DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData)
+
+ 9080 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
+
+
+ 9084 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
+
+ 9086 VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
+ 9087 *ppData = m_DedicatedAllocation.m_pMappedData;
+
+
+
+
+
+ 9093 VMA_ASSERT(0 &&
"Dedicated allocation mapped too many times simultaneously.");
+ 9094 return VK_ERROR_MEMORY_MAP_FAILED;
+
+
+
+
+ 9099 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+ 9100 hAllocator->m_hDevice,
+ 9101 m_DedicatedAllocation.m_hMemory,
+
+
+
+
+ 9106 if(result == VK_SUCCESS)
+
+ 9108 m_DedicatedAllocation.m_pMappedData = *ppData;
+
+
+
+
+
+
+ 9115 void VmaAllocation_T::DedicatedAllocUnmap(
VmaAllocator hAllocator)
+
+ 9117 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
+ 9119 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
+
+
+
+
+ 9124 m_DedicatedAllocation.m_pMappedData = VMA_NULL;
+ 9125 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
+ 9126 hAllocator->m_hDevice,
+ 9127 m_DedicatedAllocation.m_hMemory);
+
+
+
+
+ 9132 VMA_ASSERT(0 &&
"Unmapping dedicated allocation not previously mapped.");
+
+
+
+ 9136 #if VMA_STATS_STRING_ENABLED
+
+ 9138 static void VmaPrintStatInfo(VmaJsonWriter& json,
const VmaStatInfo& stat)
+
+
- 9142 lastSize = suballocItem->size;
-
+ 9142 json.WriteString(
"Blocks");
+
-
- 9146 VMA_VALIDATE(ValidateFreeSuballocationList());
- 9147 VMA_VALIDATE(calculatedOffset == GetSize());
- 9148 VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
- 9149 VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
+ 9145 json.WriteString(
"Allocations");
+
+
+ 9148 json.WriteString(
"UnusedRanges");
+
-
-
+ 9151 json.WriteString(
"UsedBytes");
+
- 9154 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax()
const
-
- 9156 if(!m_FreeSuballocationsBySize.empty())
-
- 9158 return m_FreeSuballocationsBySize.back()->size;
-
-
-
-
-
-
-
- 9166 bool VmaBlockMetadata_Generic::IsEmpty()
const
-
- 9168 return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
-
-
- 9171 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
-
-
-
- 9175 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-
-
-
-
-
-
-
-
-
-
-
- 9187 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- 9188 suballocItem != m_Suballocations.cend();
-
-
- 9191 const VmaSuballocation& suballoc = *suballocItem;
- 9192 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
-
-
-
-
-
-
-
-
-
-
+ 9154 json.WriteString(
"UnusedBytes");
+
+
+
+
+ 9159 json.WriteString(
"AllocationSize");
+ 9160 json.BeginObject(
true);
+ 9161 json.WriteString(
"Min");
+
+ 9163 json.WriteString(
"Avg");
+
+ 9165 json.WriteString(
"Max");
+
+
+
+
+
+
+ 9172 json.WriteString(
"UnusedRangeSize");
+ 9173 json.BeginObject(
true);
+ 9174 json.WriteString(
"Min");
+
+ 9176 json.WriteString(
"Avg");
+
+ 9178 json.WriteString(
"Max");
+
+
+
+
+
+
+
+
+
+ 9188 struct VmaSuballocationItemSizeLess
+
+
+ 9191 const VmaSuballocationList::iterator lhs,
+ 9192 const VmaSuballocationList::iterator rhs)
const
+
+ 9194 return lhs->size < rhs->size;
+
+
+ 9197 const VmaSuballocationList::iterator lhs,
+ 9198 VkDeviceSize rhsSize)
const
+
+ 9200 return lhs->size < rhsSize;
+
+
+
- 9205 void VmaBlockMetadata_Generic::AddPoolStats(
VmaPoolStats& inoutStats)
const
-
- 9207 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-
- 9209 inoutStats.
size += GetSize();
-
-
-
-
-
+
+
+ 9208 VmaBlockMetadata::VmaBlockMetadata(
VmaAllocator hAllocator) :
+
+ 9210 m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
+
+
+
+ 9214 #if VMA_STATS_STRING_ENABLED
- 9216 #if VMA_STATS_STRING_ENABLED
-
- 9218 void VmaBlockMetadata_Generic::PrintDetailedMap(
class VmaJsonWriter& json)
const
-
- 9220 PrintDetailedMap_Begin(json,
-
- 9222 m_Suballocations.size() - (
size_t)m_FreeCount,
-
-
-
- 9226 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- 9227 suballocItem != m_Suballocations.cend();
- 9228 ++suballocItem, ++i)
-
- 9230 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
- 9232 PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
-
-
-
- 9236 PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
-
-
-
- 9240 PrintDetailedMap_End(json);
-
-
-
+ 9216 void VmaBlockMetadata::PrintDetailedMap_Begin(
class VmaJsonWriter& json,
+ 9217 VkDeviceSize unusedBytes,
+ 9218 size_t allocationCount,
+ 9219 size_t unusedRangeCount)
const
+
+
+
+ 9223 json.WriteString(
"TotalBytes");
+ 9224 json.WriteNumber(GetSize());
+
+ 9226 json.WriteString(
"UnusedBytes");
+ 9227 json.WriteNumber(unusedBytes);
+
+ 9229 json.WriteString(
"Allocations");
+ 9230 json.WriteNumber((uint64_t)allocationCount);
+
+ 9232 json.WriteString(
"UnusedRanges");
+ 9233 json.WriteNumber((uint64_t)unusedRangeCount);
+
+ 9235 json.WriteString(
"Suballocations");
+
+
+
+ 9239 void VmaBlockMetadata::PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
+ 9240 VkDeviceSize offset,
+
+
+ 9243 json.BeginObject(
true);
- 9245 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
- 9246 uint32_t currentFrameIndex,
- 9247 uint32_t frameInUseCount,
- 9248 VkDeviceSize bufferImageGranularity,
- 9249 VkDeviceSize allocSize,
- 9250 VkDeviceSize allocAlignment,
-
- 9252 VmaSuballocationType allocType,
- 9253 bool canMakeOtherLost,
-
- 9255 VmaAllocationRequest* pAllocationRequest)
-
- 9257 VMA_ASSERT(allocSize > 0);
- 9258 VMA_ASSERT(!upperAddress);
- 9259 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
- 9260 VMA_ASSERT(pAllocationRequest != VMA_NULL);
- 9261 VMA_HEAVY_ASSERT(Validate());
-
- 9263 pAllocationRequest->type = VmaAllocationRequestType::Normal;
+ 9245 json.WriteString(
"Offset");
+ 9246 json.WriteNumber(offset);
+
+ 9248 hAllocation->PrintParameters(json);
+
+
+
+
+ 9253 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
+ 9254 VkDeviceSize offset,
+ 9255 VkDeviceSize size)
const
+
+ 9257 json.BeginObject(
true);
+
+ 9259 json.WriteString(
"Offset");
+ 9260 json.WriteNumber(offset);
+
+ 9262 json.WriteString(
"Type");
+ 9263 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
-
- 9266 if(canMakeOtherLost ==
false &&
- 9267 m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
-
-
-
-
-
- 9273 const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
- 9274 if(freeSuballocCount > 0)
-
-
-
-
- 9279 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
- 9280 m_FreeSuballocationsBySize.data(),
- 9281 m_FreeSuballocationsBySize.data() + freeSuballocCount,
- 9282 allocSize + 2 * VMA_DEBUG_MARGIN,
- 9283 VmaSuballocationItemSizeLess());
- 9284 size_t index = it - m_FreeSuballocationsBySize.data();
- 9285 for(; index < freeSuballocCount; ++index)
-
-
-
-
- 9290 bufferImageGranularity,
-
-
-
- 9294 m_FreeSuballocationsBySize[index],
-
- 9296 &pAllocationRequest->offset,
- 9297 &pAllocationRequest->itemsToMakeLostCount,
- 9298 &pAllocationRequest->sumFreeSize,
- 9299 &pAllocationRequest->sumItemSize))
-
- 9301 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-
-
-
-
- 9306 else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
-
- 9308 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
- 9309 it != m_Suballocations.end();
-
-
- 9312 if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
-
-
- 9315 bufferImageGranularity,
-
-
-
-
-
- 9321 &pAllocationRequest->offset,
- 9322 &pAllocationRequest->itemsToMakeLostCount,
- 9323 &pAllocationRequest->sumFreeSize,
- 9324 &pAllocationRequest->sumItemSize))
-
- 9326 pAllocationRequest->item = it;
-
-
-
-
-
-
-
- 9334 for(
size_t index = freeSuballocCount; index--; )
-
-
-
-
- 9339 bufferImageGranularity,
-
-
-
- 9343 m_FreeSuballocationsBySize[index],
-
- 9345 &pAllocationRequest->offset,
- 9346 &pAllocationRequest->itemsToMakeLostCount,
- 9347 &pAllocationRequest->sumFreeSize,
- 9348 &pAllocationRequest->sumItemSize))
-
- 9350 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-
-
+ 9265 json.WriteString(
"Size");
+ 9266 json.WriteNumber(size);
+
+
+
+
+ 9271 void VmaBlockMetadata::PrintDetailedMap_End(
class VmaJsonWriter& json)
const
+
+
+
+
+
+
+
+
+
+ 9282 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(
VmaAllocator hAllocator) :
+ 9283 VmaBlockMetadata(hAllocator),
+
+
+ 9286 m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+ 9287 m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
+
+
+
+ 9291 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
+
+
+
+ 9295 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
+
+ 9297 VmaBlockMetadata::Init(size);
+
+
+ 9300 m_SumFreeSize = size;
+
+ 9302 VmaSuballocation suballoc = {};
+ 9303 suballoc.offset = 0;
+ 9304 suballoc.size = size;
+ 9305 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ 9306 suballoc.hAllocation = VK_NULL_HANDLE;
+
+ 9308 VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+ 9309 m_Suballocations.push_back(suballoc);
+ 9310 VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
+
+ 9312 m_FreeSuballocationsBySize.push_back(suballocItem);
+
+
+ 9315 bool VmaBlockMetadata_Generic::Validate()
const
+
+ 9317 VMA_VALIDATE(!m_Suballocations.empty());
+
+
+ 9320 VkDeviceSize calculatedOffset = 0;
+
+ 9322 uint32_t calculatedFreeCount = 0;
+
+ 9324 VkDeviceSize calculatedSumFreeSize = 0;
+
+
+ 9327 size_t freeSuballocationsToRegister = 0;
+
+ 9329 bool prevFree =
false;
+
+ 9331 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ 9332 suballocItem != m_Suballocations.cend();
+
+
+ 9335 const VmaSuballocation& subAlloc = *suballocItem;
+
+
+ 9338 VMA_VALIDATE(subAlloc.offset == calculatedOffset);
+
+ 9340 const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ 9342 VMA_VALIDATE(!prevFree || !currFree);
+
+ 9344 VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
+
+
+
+ 9348 calculatedSumFreeSize += subAlloc.size;
+ 9349 ++calculatedFreeCount;
+ 9350 if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+ 9352 ++freeSuballocationsToRegister;
-
-
-
- 9357 if(canMakeOtherLost)
-
-
-
-
- 9362 VmaAllocationRequest tmpAllocRequest = {};
- 9363 tmpAllocRequest.type = VmaAllocationRequestType::Normal;
- 9364 for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
- 9365 suballocIt != m_Suballocations.end();
-
-
- 9368 if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
- 9369 suballocIt->hAllocation->CanBecomeLost())
-
-
-
-
- 9374 bufferImageGranularity,
-
-
-
-
-
- 9380 &tmpAllocRequest.offset,
- 9381 &tmpAllocRequest.itemsToMakeLostCount,
- 9382 &tmpAllocRequest.sumFreeSize,
- 9383 &tmpAllocRequest.sumItemSize))
-
-
-
- 9387 *pAllocationRequest = tmpAllocRequest;
- 9388 pAllocationRequest->item = suballocIt;
-
-
- 9391 if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
-
- 9393 *pAllocationRequest = tmpAllocRequest;
- 9394 pAllocationRequest->item = suballocIt;
-
-
-
-
-
-
-
+
+
+ 9356 VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
+
+
+
+ 9360 VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
+ 9361 VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
+
+
+ 9364 VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
+
+
+ 9367 calculatedOffset += subAlloc.size;
+ 9368 prevFree = currFree;
+
+
+
+
+ 9373 VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
+
+ 9375 VkDeviceSize lastSize = 0;
+ 9376 for(
size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
+
+ 9378 VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
+
+
+ 9381 VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ 9383 VMA_VALIDATE(suballocItem->size >= lastSize);
+
+ 9385 lastSize = suballocItem->size;
+
+
+
+ 9389 VMA_VALIDATE(ValidateFreeSuballocationList());
+ 9390 VMA_VALIDATE(calculatedOffset == GetSize());
+ 9391 VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
+ 9392 VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
+
+
+
+
+ 9397 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax()
const
+
+ 9399 if(!m_FreeSuballocationsBySize.empty())
+
+ 9401 return m_FreeSuballocationsBySize.back()->size;
-
-
-
-
- 9407 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
- 9408 uint32_t currentFrameIndex,
- 9409 uint32_t frameInUseCount,
- 9410 VmaAllocationRequest* pAllocationRequest)
-
- 9412 VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
+
+
+
+
+
+
+ 9409 bool VmaBlockMetadata_Generic::IsEmpty()
const
+
+ 9411 return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
+
- 9414 while(pAllocationRequest->itemsToMakeLostCount > 0)
-
- 9416 if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
-
- 9418 ++pAllocationRequest->item;
-
- 9420 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
- 9421 VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
- 9422 VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
- 9423 if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
- 9425 pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
- 9426 --pAllocationRequest->itemsToMakeLostCount;
-
-
-
-
-
-
-
- 9434 VMA_HEAVY_ASSERT(Validate());
- 9435 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
- 9436 VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
-
-
-
- 9441 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
- 9443 uint32_t lostAllocationCount = 0;
- 9444 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
- 9445 it != m_Suballocations.end();
-
-
- 9448 if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
- 9449 it->hAllocation->CanBecomeLost() &&
- 9450 it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
- 9452 it = FreeSuballocation(it);
- 9453 ++lostAllocationCount;
-
-
- 9456 return lostAllocationCount;
+ 9414 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
+
+
+
+ 9418 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+
+
+
+
+
+
+
+
+
+
+
+ 9430 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ 9431 suballocItem != m_Suballocations.cend();
+
+
+ 9434 const VmaSuballocation& suballoc = *suballocItem;
+ 9435 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
+
+
+
+
+
+
+
+
+
+
+
+ 9448 void VmaBlockMetadata_Generic::AddPoolStats(
VmaPoolStats& inoutStats)
const
+
+ 9450 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+
+ 9452 inoutStats.
size += GetSize();
+
+
+
+
- 9459 VkResult VmaBlockMetadata_Generic::CheckCorruption(
const void* pBlockData)
-
- 9461 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
- 9462 it != m_Suballocations.end();
-
-
- 9465 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
-
- 9467 if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
-
- 9469 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
- 9470 return VK_ERROR_VALIDATION_FAILED_EXT;
-
- 9472 if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
-
- 9474 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
- 9475 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-
-
-
-
-
+ 9459 #if VMA_STATS_STRING_ENABLED
+
+ 9461 void VmaBlockMetadata_Generic::PrintDetailedMap(
class VmaJsonWriter& json)
const
+
+ 9463 PrintDetailedMap_Begin(json,
+
+ 9465 m_Suballocations.size() - (
size_t)m_FreeCount,
+
+
+
+ 9469 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ 9470 suballocItem != m_Suballocations.cend();
+ 9471 ++suballocItem, ++i)
+
+ 9473 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
+ 9475 PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
+
+
+
+ 9479 PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
+
+
- 9483 void VmaBlockMetadata_Generic::Alloc(
- 9484 const VmaAllocationRequest& request,
- 9485 VmaSuballocationType type,
- 9486 VkDeviceSize allocSize,
-
-
- 9489 VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
- 9490 VMA_ASSERT(request.item != m_Suballocations.end());
- 9491 VmaSuballocation& suballoc = *request.item;
-
- 9493 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9495 VMA_ASSERT(request.offset >= suballoc.offset);
- 9496 const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
- 9497 VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
- 9498 const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
-
-
-
- 9502 UnregisterFreeSuballocation(request.item);
-
- 9504 suballoc.offset = request.offset;
- 9505 suballoc.size = allocSize;
- 9506 suballoc.type = type;
- 9507 suballoc.hAllocation = hAllocation;
-
-
-
+ 9483 PrintDetailedMap_End(json);
+
+
+
+
+ 9488 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
+ 9489 uint32_t currentFrameIndex,
+ 9490 uint32_t frameInUseCount,
+ 9491 VkDeviceSize bufferImageGranularity,
+ 9492 VkDeviceSize allocSize,
+ 9493 VkDeviceSize allocAlignment,
+
+ 9495 VmaSuballocationType allocType,
+ 9496 bool canMakeOtherLost,
+
+ 9498 VmaAllocationRequest* pAllocationRequest)
+
+ 9500 VMA_ASSERT(allocSize > 0);
+ 9501 VMA_ASSERT(!upperAddress);
+ 9502 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+ 9503 VMA_ASSERT(pAllocationRequest != VMA_NULL);
+ 9504 VMA_HEAVY_ASSERT(Validate());
+
+ 9506 pAllocationRequest->type = VmaAllocationRequestType::Normal;
+
+
+ 9509 if(canMakeOtherLost ==
false &&
+ 9510 m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
- 9512 VmaSuballocation paddingSuballoc = {};
- 9513 paddingSuballoc.offset = request.offset + allocSize;
- 9514 paddingSuballoc.size = paddingEnd;
- 9515 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
- 9516 VmaSuballocationList::iterator next = request.item;
-
- 9518 const VmaSuballocationList::iterator paddingEndItem =
- 9519 m_Suballocations.insert(next, paddingSuballoc);
- 9520 RegisterFreeSuballocation(paddingEndItem);
-
-
-
-
-
- 9526 VmaSuballocation paddingSuballoc = {};
- 9527 paddingSuballoc.offset = request.offset - paddingBegin;
- 9528 paddingSuballoc.size = paddingBegin;
- 9529 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
- 9530 const VmaSuballocationList::iterator paddingBeginItem =
- 9531 m_Suballocations.insert(request.item, paddingSuballoc);
- 9532 RegisterFreeSuballocation(paddingBeginItem);
-
-
-
- 9536 m_FreeCount = m_FreeCount - 1;
- 9537 if(paddingBegin > 0)
-
-
-
-
-
-
-
- 9545 m_SumFreeSize -= allocSize;
-
-
- 9548 void VmaBlockMetadata_Generic::Free(
const VmaAllocation allocation)
-
- 9550 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
- 9551 suballocItem != m_Suballocations.end();
-
-
- 9554 VmaSuballocation& suballoc = *suballocItem;
- 9555 if(suballoc.hAllocation == allocation)
-
- 9557 FreeSuballocation(suballocItem);
- 9558 VMA_HEAVY_ASSERT(Validate());
-
-
-
- 9562 VMA_ASSERT(0 &&
"Not found!");
-
-
- 9565 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
-
- 9567 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
- 9568 suballocItem != m_Suballocations.end();
-
-
- 9571 VmaSuballocation& suballoc = *suballocItem;
- 9572 if(suballoc.offset == offset)
-
- 9574 FreeSuballocation(suballocItem);
-
-
-
- 9578 VMA_ASSERT(0 &&
"Not found!");
-
-
- 9581 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList()
const
-
- 9583 VkDeviceSize lastSize = 0;
- 9584 for(
size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
-
- 9586 const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
-
- 9588 VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
- 9589 VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
- 9590 VMA_VALIDATE(it->size >= lastSize);
- 9591 lastSize = it->size;
-
-
-
-
- 9596 bool VmaBlockMetadata_Generic::CheckAllocation(
- 9597 uint32_t currentFrameIndex,
- 9598 uint32_t frameInUseCount,
- 9599 VkDeviceSize bufferImageGranularity,
- 9600 VkDeviceSize allocSize,
- 9601 VkDeviceSize allocAlignment,
- 9602 VmaSuballocationType allocType,
- 9603 VmaSuballocationList::const_iterator suballocItem,
- 9604 bool canMakeOtherLost,
- 9605 VkDeviceSize* pOffset,
- 9606 size_t* itemsToMakeLostCount,
- 9607 VkDeviceSize* pSumFreeSize,
- 9608 VkDeviceSize* pSumItemSize)
const
-
- 9610 VMA_ASSERT(allocSize > 0);
- 9611 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
- 9612 VMA_ASSERT(suballocItem != m_Suballocations.cend());
- 9613 VMA_ASSERT(pOffset != VMA_NULL);
-
- 9615 *itemsToMakeLostCount = 0;
-
-
-
- 9619 if(canMakeOtherLost)
-
- 9621 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
- 9623 *pSumFreeSize = suballocItem->size;
-
-
-
- 9627 if(suballocItem->hAllocation->CanBecomeLost() &&
- 9628 suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
- 9630 ++*itemsToMakeLostCount;
- 9631 *pSumItemSize = suballocItem->size;
-
-
-
-
-
-
-
-
- 9640 if(GetSize() - suballocItem->offset < allocSize)
-
-
-
-
-
- 9646 *pOffset = suballocItem->offset;
-
-
- 9649 if(VMA_DEBUG_MARGIN > 0)
-
- 9651 *pOffset += VMA_DEBUG_MARGIN;
-
-
-
- 9655 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
+
+
+
+ 9516 const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
+ 9517 if(freeSuballocCount > 0)
+
+
+
+
+ 9522 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
+ 9523 m_FreeSuballocationsBySize.data(),
+ 9524 m_FreeSuballocationsBySize.data() + freeSuballocCount,
+ 9525 allocSize + 2 * VMA_DEBUG_MARGIN,
+ 9526 VmaSuballocationItemSizeLess());
+ 9527 size_t index = it - m_FreeSuballocationsBySize.data();
+ 9528 for(; index < freeSuballocCount; ++index)
+
+
+
+
+ 9533 bufferImageGranularity,
+
+
+
+ 9537 m_FreeSuballocationsBySize[index],
+
+ 9539 &pAllocationRequest->offset,
+ 9540 &pAllocationRequest->itemsToMakeLostCount,
+ 9541 &pAllocationRequest->sumFreeSize,
+ 9542 &pAllocationRequest->sumItemSize))
+
+ 9544 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+
+
+
+
+ 9549 else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
+
+ 9551 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+ 9552 it != m_Suballocations.end();
+
+
+ 9555 if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
+
+
+ 9558 bufferImageGranularity,
+
+
+
+
+
+ 9564 &pAllocationRequest->offset,
+ 9565 &pAllocationRequest->itemsToMakeLostCount,
+ 9566 &pAllocationRequest->sumFreeSize,
+ 9567 &pAllocationRequest->sumItemSize))
+
+ 9569 pAllocationRequest->item = it;
+
+
+
+
+
+
+
+ 9577 for(
size_t index = freeSuballocCount; index--; )
+
+
+
+
+ 9582 bufferImageGranularity,
+
+
+
+ 9586 m_FreeSuballocationsBySize[index],
+
+ 9588 &pAllocationRequest->offset,
+ 9589 &pAllocationRequest->itemsToMakeLostCount,
+ 9590 &pAllocationRequest->sumFreeSize,
+ 9591 &pAllocationRequest->sumItemSize))
+
+ 9593 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+
+
+
+
+
+
+ 9600 if(canMakeOtherLost)
+
+
+
+
+ 9605 VmaAllocationRequest tmpAllocRequest = {};
+ 9606 tmpAllocRequest.type = VmaAllocationRequestType::Normal;
+ 9607 for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
+ 9608 suballocIt != m_Suballocations.end();
+
+
+ 9611 if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
+ 9612 suballocIt->hAllocation->CanBecomeLost())
+
+
+
+
+ 9617 bufferImageGranularity,
+
+
+
+
+
+ 9623 &tmpAllocRequest.offset,
+ 9624 &tmpAllocRequest.itemsToMakeLostCount,
+ 9625 &tmpAllocRequest.sumFreeSize,
+ 9626 &tmpAllocRequest.sumItemSize))
+
+
+
+ 9630 *pAllocationRequest = tmpAllocRequest;
+ 9631 pAllocationRequest->item = suballocIt;
+
+
+ 9634 if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
+
+ 9636 *pAllocationRequest = tmpAllocRequest;
+ 9637 pAllocationRequest->item = suballocIt;
+
+
+
+
+
+
+
+
+
+
+
+
+ 9650 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
+ 9651 uint32_t currentFrameIndex,
+ 9652 uint32_t frameInUseCount,
+ 9653 VmaAllocationRequest* pAllocationRequest)
+
+ 9655 VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
-
-
- 9659 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
+ 9657 while(pAllocationRequest->itemsToMakeLostCount > 0)
+
+ 9659 if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
- 9661 bool bufferImageGranularityConflict =
false;
- 9662 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
- 9663 while(prevSuballocItem != m_Suballocations.cbegin())
-
-
- 9666 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
- 9667 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
-
- 9669 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
- 9671 bufferImageGranularityConflict =
true;
-
-
-
-
-
-
-
- 9679 if(bufferImageGranularityConflict)
-
- 9681 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
-
-
-
-
-
- 9687 if(*pOffset >= suballocItem->offset + suballocItem->size)
-
-
-
-
-
- 9693 const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
-
-
- 9696 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
-
- 9698 const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
-
- 9700 if(suballocItem->offset + totalSize > GetSize())
-
-
-
-
-
-
- 9707 VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
- 9708 if(totalSize > suballocItem->size)
+ 9661 ++pAllocationRequest->item;
+
+ 9663 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+ 9664 VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
+ 9665 VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
+ 9666 if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
+ 9668 pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
+ 9669 --pAllocationRequest->itemsToMakeLostCount;
+
+
+
+
+
+
+
+ 9677 VMA_HEAVY_ASSERT(Validate());
+ 9678 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+ 9679 VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+
+
+
+ 9684 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
+ 9686 uint32_t lostAllocationCount = 0;
+ 9687 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+ 9688 it != m_Suballocations.end();
+
+
+ 9691 if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
+ 9692 it->hAllocation->CanBecomeLost() &&
+ 9693 it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
+ 9695 it = FreeSuballocation(it);
+ 9696 ++lostAllocationCount;
+
+
+ 9699 return lostAllocationCount;
+
+
+ 9702 VkResult VmaBlockMetadata_Generic::CheckCorruption(
const void* pBlockData)
+
+ 9704 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
+ 9705 it != m_Suballocations.end();
+
+
+ 9708 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
- 9710 VkDeviceSize remainingSize = totalSize - suballocItem->size;
- 9711 while(remainingSize > 0)
-
-
- 9714 if(lastSuballocItem == m_Suballocations.cend())
-
-
-
- 9718 if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
- 9720 *pSumFreeSize += lastSuballocItem->size;
-
-
-
- 9724 VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
- 9725 if(lastSuballocItem->hAllocation->CanBecomeLost() &&
- 9726 lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
- 9728 ++*itemsToMakeLostCount;
- 9729 *pSumItemSize += lastSuballocItem->size;
-
-
-
-
-
-
- 9736 remainingSize = (lastSuballocItem->size < remainingSize) ?
- 9737 remainingSize - lastSuballocItem->size : 0;
-
-
-
-
-
- 9743 if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
-
- 9745 VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
-
- 9747 while(nextSuballocItem != m_Suballocations.cend())
-
- 9749 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
- 9750 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
- 9752 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
- 9754 VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
- 9755 if(nextSuballoc.hAllocation->CanBecomeLost() &&
- 9756 nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
- 9758 ++*itemsToMakeLostCount;
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 9777 const VmaSuballocation& suballoc = *suballocItem;
- 9778 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9780 *pSumFreeSize = suballoc.size;
-
-
- 9783 if(suballoc.size < allocSize)
-
-
-
-
-
- 9789 *pOffset = suballoc.offset;
+ 9710 if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
+
+ 9712 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+ 9713 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+ 9715 if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
+
+ 9717 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+ 9718 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+
+
+
+
+
+ 9726 void VmaBlockMetadata_Generic::Alloc(
+ 9727 const VmaAllocationRequest& request,
+ 9728 VmaSuballocationType type,
+ 9729 VkDeviceSize allocSize,
+
+
+ 9732 VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+ 9733 VMA_ASSERT(request.item != m_Suballocations.end());
+ 9734 VmaSuballocation& suballoc = *request.item;
+
+ 9736 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ 9738 VMA_ASSERT(request.offset >= suballoc.offset);
+ 9739 const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
+ 9740 VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
+ 9741 const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
+
+
+
+ 9745 UnregisterFreeSuballocation(request.item);
+
+ 9747 suballoc.offset = request.offset;
+ 9748 suballoc.size = allocSize;
+ 9749 suballoc.type = type;
+ 9750 suballoc.hAllocation = hAllocation;
+
+
+
+
+ 9755 VmaSuballocation paddingSuballoc = {};
+ 9756 paddingSuballoc.offset = request.offset + allocSize;
+ 9757 paddingSuballoc.size = paddingEnd;
+ 9758 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ 9759 VmaSuballocationList::iterator next = request.item;
+
+ 9761 const VmaSuballocationList::iterator paddingEndItem =
+ 9762 m_Suballocations.insert(next, paddingSuballoc);
+ 9763 RegisterFreeSuballocation(paddingEndItem);
+
+
+
+
+
+ 9769 VmaSuballocation paddingSuballoc = {};
+ 9770 paddingSuballoc.offset = request.offset - paddingBegin;
+ 9771 paddingSuballoc.size = paddingBegin;
+ 9772 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ 9773 const VmaSuballocationList::iterator paddingBeginItem =
+ 9774 m_Suballocations.insert(request.item, paddingSuballoc);
+ 9775 RegisterFreeSuballocation(paddingBeginItem);
+
+
+
+ 9779 m_FreeCount = m_FreeCount - 1;
+ 9780 if(paddingBegin > 0)
+
+
+
+
+
+
+
+ 9788 m_SumFreeSize -= allocSize;
+
-
- 9792 if(VMA_DEBUG_MARGIN > 0)
-
- 9794 *pOffset += VMA_DEBUG_MARGIN;
-
-
-
- 9798 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
-
-
-
- 9802 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
-
- 9804 bool bufferImageGranularityConflict =
false;
- 9805 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
- 9806 while(prevSuballocItem != m_Suballocations.cbegin())
-
-
- 9809 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
- 9810 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
-
- 9812 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
- 9814 bufferImageGranularityConflict =
true;
-
-
-
-
-
-
-
- 9822 if(bufferImageGranularityConflict)
-
- 9824 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
-
-
-
-
- 9829 const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
+ 9791 void VmaBlockMetadata_Generic::Free(
const VmaAllocation allocation)
+
+ 9793 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+ 9794 suballocItem != m_Suballocations.end();
+
+
+ 9797 VmaSuballocation& suballoc = *suballocItem;
+ 9798 if(suballoc.hAllocation == allocation)
+
+ 9800 FreeSuballocation(suballocItem);
+ 9801 VMA_HEAVY_ASSERT(Validate());
+
+
+
+ 9805 VMA_ASSERT(0 &&
"Not found!");
+
+
+ 9808 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
+
+ 9810 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+ 9811 suballocItem != m_Suballocations.end();
+
+
+ 9814 VmaSuballocation& suballoc = *suballocItem;
+ 9815 if(suballoc.offset == offset)
+
+ 9817 FreeSuballocation(suballocItem);
+
+
+
+ 9821 VMA_ASSERT(0 &&
"Not found!");
+
+
+ 9824 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList()
const
+
+ 9826 VkDeviceSize lastSize = 0;
+ 9827 for(
size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
+
+ 9829 const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
-
- 9832 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
-
-
- 9835 if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
-
-
-
-
-
-
- 9842 if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
-
- 9844 VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
-
- 9846 while(nextSuballocItem != m_Suballocations.cend())
-
- 9848 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
- 9849 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
- 9851 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 9870 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
-
- 9872 VMA_ASSERT(item != m_Suballocations.end());
- 9873 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9875 VmaSuballocationList::iterator nextItem = item;
-
- 9877 VMA_ASSERT(nextItem != m_Suballocations.end());
- 9878 VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-
- 9880 item->size += nextItem->size;
-
- 9882 m_Suballocations.erase(nextItem);
-
-
- 9885 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
-
-
- 9888 VmaSuballocation& suballoc = *suballocItem;
- 9889 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
- 9890 suballoc.hAllocation = VK_NULL_HANDLE;
-
-
-
- 9894 m_SumFreeSize += suballoc.size;
-
-
- 9897 bool mergeWithNext =
false;
- 9898 bool mergeWithPrev =
false;
+ 9831 VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
+ 9832 VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+ 9833 VMA_VALIDATE(it->size >= lastSize);
+ 9834 lastSize = it->size;
+
+
+
+
+ 9839 bool VmaBlockMetadata_Generic::CheckAllocation(
+ 9840 uint32_t currentFrameIndex,
+ 9841 uint32_t frameInUseCount,
+ 9842 VkDeviceSize bufferImageGranularity,
+ 9843 VkDeviceSize allocSize,
+ 9844 VkDeviceSize allocAlignment,
+ 9845 VmaSuballocationType allocType,
+ 9846 VmaSuballocationList::const_iterator suballocItem,
+ 9847 bool canMakeOtherLost,
+ 9848 VkDeviceSize* pOffset,
+ 9849 size_t* itemsToMakeLostCount,
+ 9850 VkDeviceSize* pSumFreeSize,
+ 9851 VkDeviceSize* pSumItemSize)
const
+
+ 9853 VMA_ASSERT(allocSize > 0);
+ 9854 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+ 9855 VMA_ASSERT(suballocItem != m_Suballocations.cend());
+ 9856 VMA_ASSERT(pOffset != VMA_NULL);
+
+ 9858 *itemsToMakeLostCount = 0;
+
+
+
+ 9862 if(canMakeOtherLost)
+
+ 9864 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
+ 9866 *pSumFreeSize = suballocItem->size;
+
+
+
+ 9870 if(suballocItem->hAllocation->CanBecomeLost() &&
+ 9871 suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
+ 9873 ++*itemsToMakeLostCount;
+ 9874 *pSumItemSize = suballocItem->size;
+
+
+
+
+
+
+
+
+ 9883 if(GetSize() - suballocItem->offset < allocSize)
+
+
+
+
+
+ 9889 *pOffset = suballocItem->offset;
+
+
+ 9892 if(VMA_DEBUG_MARGIN > 0)
+
+ 9894 *pOffset += VMA_DEBUG_MARGIN;
+
+
+
+ 9898 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
- 9900 VmaSuballocationList::iterator nextItem = suballocItem;
-
- 9902 if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
-
- 9904 mergeWithNext =
true;
-
-
- 9907 VmaSuballocationList::iterator prevItem = suballocItem;
- 9908 if(suballocItem != m_Suballocations.begin())
-
-
- 9911 if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-
- 9913 mergeWithPrev =
true;
-
-
-
-
-
- 9919 UnregisterFreeSuballocation(nextItem);
- 9920 MergeFreeWithNext(suballocItem);
-
-
-
-
- 9925 UnregisterFreeSuballocation(prevItem);
- 9926 MergeFreeWithNext(prevItem);
- 9927 RegisterFreeSuballocation(prevItem);
-
-
-
-
- 9932 RegisterFreeSuballocation(suballocItem);
- 9933 return suballocItem;
-
-
-
- 9937 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
-
- 9939 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
- 9940 VMA_ASSERT(item->size > 0);
-
-
-
- 9944 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-
- 9946 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
- 9948 if(m_FreeSuballocationsBySize.empty())
-
- 9950 m_FreeSuballocationsBySize.push_back(item);
-
-
-
- 9954 VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
-
-
-
-
-
-
-
- 9962 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
-
- 9964 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
- 9965 VMA_ASSERT(item->size > 0);
-
-
-
- 9969 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-
- 9971 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
- 9973 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
- 9974 m_FreeSuballocationsBySize.data(),
- 9975 m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
-
- 9977 VmaSuballocationItemSizeLess());
- 9978 for(
size_t index = it - m_FreeSuballocationsBySize.data();
- 9979 index < m_FreeSuballocationsBySize.size();
-
-
- 9982 if(m_FreeSuballocationsBySize[index] == item)
-
- 9984 VmaVectorRemove(m_FreeSuballocationsBySize, index);
-
-
- 9987 VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) &&
"Not found.");
-
- 9989 VMA_ASSERT(0 &&
"Not found.");
-
-
-
-
-
- 9995 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
- 9996 VkDeviceSize bufferImageGranularity,
- 9997 VmaSuballocationType& inOutPrevSuballocType)
const
-
- 9999 if(bufferImageGranularity == 1 || IsEmpty())
-
-
-
-
-10004 VkDeviceSize minAlignment = VK_WHOLE_SIZE;
-10005 bool typeConflictFound =
false;
-10006 for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
-10007 it != m_Suballocations.cend();
-
-
-10010 const VmaSuballocationType suballocType = it->type;
-10011 if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
-
-10013 minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
-10014 if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
-
-10016 typeConflictFound =
true;
-
-10018 inOutPrevSuballocType = suballocType;
-
-
-
-10022 return typeConflictFound || minAlignment >= bufferImageGranularity;
-
+
+
+ 9902 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
+
+ 9904 bool bufferImageGranularityConflict =
false;
+ 9905 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+ 9906 while(prevSuballocItem != m_Suballocations.cbegin())
+
+
+ 9909 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
+ 9910 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
+
+ 9912 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
+ 9914 bufferImageGranularityConflict =
true;
+
+
+
+
+
+
+
+ 9922 if(bufferImageGranularityConflict)
+
+ 9924 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+
+
+
+
+
+ 9930 if(*pOffset >= suballocItem->offset + suballocItem->size)
+
+
+
+
+
+ 9936 const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
+
+
+ 9939 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
+
+ 9941 const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
+
+ 9943 if(suballocItem->offset + totalSize > GetSize())
+
+
+
+
+
+
+ 9950 VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
+ 9951 if(totalSize > suballocItem->size)
+
+ 9953 VkDeviceSize remainingSize = totalSize - suballocItem->size;
+ 9954 while(remainingSize > 0)
+
+
+ 9957 if(lastSuballocItem == m_Suballocations.cend())
+
+
+
+ 9961 if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
+ 9963 *pSumFreeSize += lastSuballocItem->size;
+
+
+
+ 9967 VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
+ 9968 if(lastSuballocItem->hAllocation->CanBecomeLost() &&
+ 9969 lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
+ 9971 ++*itemsToMakeLostCount;
+ 9972 *pSumItemSize += lastSuballocItem->size;
+
+
+
+
+
+
+ 9979 remainingSize = (lastSuballocItem->size < remainingSize) ?
+ 9980 remainingSize - lastSuballocItem->size : 0;
+
+
+
+
+
+ 9986 if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
+
+ 9988 VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
+
+ 9990 while(nextSuballocItem != m_Suballocations.cend())
+
+ 9992 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
+ 9993 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
+ 9995 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
+ 9997 VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
+ 9998 if(nextSuballoc.hAllocation->CanBecomeLost() &&
+ 9999 nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
+10001 ++*itemsToMakeLostCount;
+
+
+
+
+
+
+
+
+
+
+
+
+10014 ++nextSuballocItem;
+
+
+
+
+
+10020 const VmaSuballocation& suballoc = *suballocItem;
+10021 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10023 *pSumFreeSize = suballoc.size;
-
-
-10028 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(
VmaAllocator hAllocator) :
-10029 VmaBlockMetadata(hAllocator),
-
-10031 m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
-10032 m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
-10033 m_1stVectorIndex(0),
-10034 m_2ndVectorMode(SECOND_VECTOR_EMPTY),
-10035 m_1stNullItemsBeginCount(0),
-10036 m_1stNullItemsMiddleCount(0),
-10037 m_2ndNullItemsCount(0)
-
-
-
-10041 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
-
-
-
-10045 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
-
-10047 VmaBlockMetadata::Init(size);
-10048 m_SumFreeSize = size;
-
-
-10051 bool VmaBlockMetadata_Linear::Validate()
const
-
-10053 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-10054 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
-10056 VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
-10057 VMA_VALIDATE(!suballocations1st.empty() ||
-10058 suballocations2nd.empty() ||
-10059 m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
-
-10061 if(!suballocations1st.empty())
-
-
-10064 VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
-
-10066 VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
-
-10068 if(!suballocations2nd.empty())
-
-
-10071 VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
-
+
+10026 if(suballoc.size < allocSize)
+
+
+
+
+
+10032 *pOffset = suballoc.offset;
+
+
+10035 if(VMA_DEBUG_MARGIN > 0)
+
+10037 *pOffset += VMA_DEBUG_MARGIN;
+
+
+
+10041 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
+
+
+10045 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
+
+10047 bool bufferImageGranularityConflict =
false;
+10048 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+10049 while(prevSuballocItem != m_Suballocations.cbegin())
+
+10051 --prevSuballocItem;
+10052 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
+10053 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
+
+10055 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
+10057 bufferImageGranularityConflict =
true;
+
+
+
+
+
+
+
+10065 if(bufferImageGranularityConflict)
+
+10067 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+
+
+
+
+10072 const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
-10074 VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
-10075 VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
+
+10075 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
-10077 VkDeviceSize sumUsedSize = 0;
-10078 const size_t suballoc1stCount = suballocations1st.size();
-10079 VkDeviceSize offset = VMA_DEBUG_MARGIN;
-
-10081 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10083 const size_t suballoc2ndCount = suballocations2nd.size();
-10084 size_t nullItem2ndCount = 0;
-10085 for(
size_t i = 0; i < suballoc2ndCount; ++i)
+
+10078 if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
+
+
+
+
+
+
+10085 if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
-10087 const VmaSuballocation& suballoc = suballocations2nd[i];
-10088 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
-10090 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-10091 VMA_VALIDATE(suballoc.offset >= offset);
-
-
-
-10095 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-10096 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-10097 sumUsedSize += suballoc.size;
-
-
-
-10101 ++nullItem2ndCount;
-
-
-10104 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
-
-
-10107 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-
-
-10110 for(
size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
-
-10112 const VmaSuballocation& suballoc = suballocations1st[i];
-10113 VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
-10114 suballoc.hAllocation == VK_NULL_HANDLE);
-
-
-10117 size_t nullItem1stCount = m_1stNullItemsBeginCount;
-
-10119 for(
size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
-
-10121 const VmaSuballocation& suballoc = suballocations1st[i];
-10122 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
-10124 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-10125 VMA_VALIDATE(suballoc.offset >= offset);
-10126 VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
+10087 VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
+10088 ++nextSuballocItem;
+10089 while(nextSuballocItem != m_Suballocations.cend())
+
+10091 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
+10092 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
+10094 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
+
+
+
+
+
+
+
+
+10104 ++nextSuballocItem;
+
+
+
+
+
+
+
+
+10113 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
+
+10115 VMA_ASSERT(item != m_Suballocations.end());
+10116 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10118 VmaSuballocationList::iterator nextItem = item;
+
+10120 VMA_ASSERT(nextItem != m_Suballocations.end());
+10121 VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10123 item->size += nextItem->size;
+
+10125 m_Suballocations.erase(nextItem);
+
-
-
-10130 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-10131 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-10132 sumUsedSize += suballoc.size;
-
-
-
-10136 ++nullItem1stCount;
-
+10128 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
+
+
+10131 VmaSuballocation& suballoc = *suballocItem;
+10132 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+10133 suballoc.hAllocation = VK_NULL_HANDLE;
+
+
+
+10137 m_SumFreeSize += suballoc.size;
-10139 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
-
-10141 VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
+
+10140 bool mergeWithNext =
false;
+10141 bool mergeWithPrev =
false;
-10143 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-10145 const size_t suballoc2ndCount = suballocations2nd.size();
-10146 size_t nullItem2ndCount = 0;
-10147 for(
size_t i = suballoc2ndCount; i--; )
-
-10149 const VmaSuballocation& suballoc = suballocations2nd[i];
-10150 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-
-10152 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
-10153 VMA_VALIDATE(suballoc.offset >= offset);
-
-
-
-10157 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
-10158 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
-10159 sumUsedSize += suballoc.size;
-
-
-
-10163 ++nullItem2ndCount;
-
+10143 VmaSuballocationList::iterator nextItem = suballocItem;
+
+10145 if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
+
+10147 mergeWithNext =
true;
+
+
+10150 VmaSuballocationList::iterator prevItem = suballocItem;
+10151 if(suballocItem != m_Suballocations.begin())
+
+
+10154 if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+
+10156 mergeWithPrev =
true;
+
+
+
+
+
+10162 UnregisterFreeSuballocation(nextItem);
+10163 MergeFreeWithNext(suballocItem);
+
-10166 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
-
-
-10169 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-
-
-10172 VMA_VALIDATE(offset <= GetSize());
-10173 VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
-
-
-
-
-10178 size_t VmaBlockMetadata_Linear::GetAllocationCount()
const
-
-10180 return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
-10181 AccessSuballocations2nd().size() - m_2ndNullItemsCount;
-
-
-10184 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax()
const
-
-10186 const VkDeviceSize size = GetSize();
-
-
-
-
-
-
-
-
-
-
-
-10198 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
-10200 switch(m_2ndVectorMode)
-
-10202 case SECOND_VECTOR_EMPTY:
-
-
-
-
-
-10208 const size_t suballocations1stCount = suballocations1st.size();
-10209 VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
-10210 const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-10211 const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
-
-10213 firstSuballoc.offset,
-10214 size - (lastSuballoc.offset + lastSuballoc.size));
-
-
-
-10218 case SECOND_VECTOR_RING_BUFFER:
-
-
-
-
-10223 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-10224 const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
-10225 const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
-10226 return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
-
-
-
-10230 case SECOND_VECTOR_DOUBLE_STACK:
-
-
-
-
-10235 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-10236 const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
-10237 const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
-10238 return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
-
-
-
-
-
-
+
+
+10168 UnregisterFreeSuballocation(prevItem);
+10169 MergeFreeWithNext(prevItem);
+10170 RegisterFreeSuballocation(prevItem);
+
+
+
+
+10175 RegisterFreeSuballocation(suballocItem);
+10176 return suballocItem;
+
+
+
+10180 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
+
+10182 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+10183 VMA_ASSERT(item->size > 0);
+
+
+
+10187 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
+10189 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+10191 if(m_FreeSuballocationsBySize.empty())
+
+10193 m_FreeSuballocationsBySize.push_back(item);
+
+
+
+10197 VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
+
+
+
+
+
+
+
+10205 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
+
+10207 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+10208 VMA_ASSERT(item->size > 0);
+
+
+
+10212 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
+10214 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+10216 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
+10217 m_FreeSuballocationsBySize.data(),
+10218 m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
+
+10220 VmaSuballocationItemSizeLess());
+10221 for(
size_t index = it - m_FreeSuballocationsBySize.data();
+10222 index < m_FreeSuballocationsBySize.size();
+
+
+10225 if(m_FreeSuballocationsBySize[index] == item)
+
+10227 VmaVectorRemove(m_FreeSuballocationsBySize, index);
+
+
+10230 VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) &&
"Not found.");
+
+10232 VMA_ASSERT(0 &&
"Not found.");
+
+
+
+
+
+10238 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
+10239 VkDeviceSize bufferImageGranularity,
+10240 VmaSuballocationType& inOutPrevSuballocType)
const
+
+10242 if(bufferImageGranularity == 1 || IsEmpty())
+
+
-
-
-10248 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
-
-10250 const VkDeviceSize size = GetSize();
-10251 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-10252 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-10253 const size_t suballoc1stCount = suballocations1st.size();
-10254 const size_t suballoc2ndCount = suballocations2nd.size();
-
-
-
-
-
-
-
-
-
+
+10247 VkDeviceSize minAlignment = VK_WHOLE_SIZE;
+10248 bool typeConflictFound =
false;
+10249 for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
+10250 it != m_Suballocations.cend();
+
+
+10253 const VmaSuballocationType suballocType = it->type;
+10254 if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
+
+10256 minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
+10257 if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
+
+10259 typeConflictFound =
true;
+
+10261 inOutPrevSuballocType = suballocType;
+
+
-10265 VkDeviceSize lastOffset = 0;
-
-10267 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10269 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-10270 size_t nextAlloc2ndIndex = 0;
-10271 while(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10274 while(nextAlloc2ndIndex < suballoc2ndCount &&
-10275 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10277 ++nextAlloc2ndIndex;
-
-
-
-10281 if(nextAlloc2ndIndex < suballoc2ndCount)
-
-10283 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10286 if(lastOffset < suballoc.offset)
-
-
-10289 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
-
-
-
-
-
-
-
-
-10303 lastOffset = suballoc.offset + suballoc.size;
-10304 ++nextAlloc2ndIndex;
-
-
-
-
-
-10310 if(lastOffset < freeSpace2ndTo1stEnd)
-
-10312 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-
-
-
-
-
-
-
-10320 lastOffset = freeSpace2ndTo1stEnd;
-
-
-
-
-10325 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-10326 const VkDeviceSize freeSpace1stTo2ndEnd =
-10327 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-10328 while(lastOffset < freeSpace1stTo2ndEnd)
-
-
-10331 while(nextAlloc1stIndex < suballoc1stCount &&
-10332 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
-10334 ++nextAlloc1stIndex;
-
-
-
-10338 if(nextAlloc1stIndex < suballoc1stCount)
-
-10340 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
-
-10343 if(lastOffset < suballoc.offset)
-
-
-10346 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
+10265 return typeConflictFound || minAlignment >= bufferImageGranularity;
+
+
+
+
+10271 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(
VmaAllocator hAllocator) :
+10272 VmaBlockMetadata(hAllocator),
+
+10274 m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+10275 m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+10276 m_1stVectorIndex(0),
+10277 m_2ndVectorMode(SECOND_VECTOR_EMPTY),
+10278 m_1stNullItemsBeginCount(0),
+10279 m_1stNullItemsMiddleCount(0),
+10280 m_2ndNullItemsCount(0)
+
+
+
+10284 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
+
+
+
+10288 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
+
+10290 VmaBlockMetadata::Init(size);
+10291 m_SumFreeSize = size;
+
+
+10294 bool VmaBlockMetadata_Linear::Validate()
const
+
+10296 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+10297 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+10299 VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
+10300 VMA_VALIDATE(!suballocations1st.empty() ||
+10301 suballocations2nd.empty() ||
+10302 m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
+
+10304 if(!suballocations1st.empty())
+
+
+10307 VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
+
+10309 VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
+
+10311 if(!suballocations2nd.empty())
+
+
+10314 VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
+
+
+10317 VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
+10318 VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
+
+10320 VkDeviceSize sumUsedSize = 0;
+10321 const size_t suballoc1stCount = suballocations1st.size();
+10322 VkDeviceSize offset = VMA_DEBUG_MARGIN;
+
+10324 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+10326 const size_t suballoc2ndCount = suballocations2nd.size();
+10327 size_t nullItem2ndCount = 0;
+10328 for(
size_t i = 0; i < suballoc2ndCount; ++i)
+
+10330 const VmaSuballocation& suballoc = suballocations2nd[i];
+10331 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10333 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+10334 VMA_VALIDATE(suballoc.offset >= offset);
+
+
+
+10338 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+10339 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+10340 sumUsedSize += suballoc.size;
+
+
+
+10344 ++nullItem2ndCount;
+
+
+10347 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
+
+10350 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+
-
-
-
-
-
-
-
-10360 lastOffset = suballoc.offset + suballoc.size;
-10361 ++nextAlloc1stIndex;
-
-
-
-
-
-10367 if(lastOffset < freeSpace1stTo2ndEnd)
-
-10369 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-
-
-
-
-
-
-
-10377 lastOffset = freeSpace1stTo2ndEnd;
-
-
-
-10381 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-10383 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-10384 while(lastOffset < size)
-
-
-10387 while(nextAlloc2ndIndex != SIZE_MAX &&
-10388 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10390 --nextAlloc2ndIndex;
-
-
-
-10394 if(nextAlloc2ndIndex != SIZE_MAX)
-
-10396 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+10353 for(
size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
+
+10355 const VmaSuballocation& suballoc = suballocations1st[i];
+10356 VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
+10357 suballoc.hAllocation == VK_NULL_HANDLE);
+
+
+10360 size_t nullItem1stCount = m_1stNullItemsBeginCount;
+
+10362 for(
size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
+
+10364 const VmaSuballocation& suballoc = suballocations1st[i];
+10365 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10367 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+10368 VMA_VALIDATE(suballoc.offset >= offset);
+10369 VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
+
+
+
+10373 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+10374 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+10375 sumUsedSize += suballoc.size;
+
+
+
+10379 ++nullItem1stCount;
+
+
+10382 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
+10384 VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
+
+10386 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+10388 const size_t suballoc2ndCount = suballocations2nd.size();
+10389 size_t nullItem2ndCount = 0;
+10390 for(
size_t i = suballoc2ndCount; i--; )
+
+10392 const VmaSuballocation& suballoc = suballocations2nd[i];
+10393 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+10395 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+10396 VMA_VALIDATE(suballoc.offset >= offset);
-
-10399 if(lastOffset < suballoc.offset)
-
-
-10402 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
+
+
+10400 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+10401 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+10402 sumUsedSize += suballoc.size;
+
+
+
+10406 ++nullItem2ndCount;
+
-
-
-
-
-
+10409 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+
+
+10412 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+
-
-10416 lastOffset = suballoc.offset + suballoc.size;
-10417 --nextAlloc2ndIndex;
-
-
-
-
-
-10423 if(lastOffset < size)
-
-10425 const VkDeviceSize unusedRangeSize = size - lastOffset;
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+10415 VMA_VALIDATE(offset <= GetSize());
+10416 VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
+
+
+
+
+10421 size_t VmaBlockMetadata_Linear::GetAllocationCount()
const
+
+10423 return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
+10424 AccessSuballocations2nd().size() - m_2ndNullItemsCount;
+
+
+10427 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax()
const
+
+10429 const VkDeviceSize size = GetSize();
+
+
+
+
+
+
+
+
+
+
-10441 void VmaBlockMetadata_Linear::AddPoolStats(
VmaPoolStats& inoutStats)
const
-
-10443 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-10444 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-10445 const VkDeviceSize size = GetSize();
-10446 const size_t suballoc1stCount = suballocations1st.size();
-10447 const size_t suballoc2ndCount = suballocations2nd.size();
-
-10449 inoutStats.
size += size;
-
-10451 VkDeviceSize lastOffset = 0;
-
-10453 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10455 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-10456 size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
-10457 while(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10460 while(nextAlloc2ndIndex < suballoc2ndCount &&
-10461 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10463 ++nextAlloc2ndIndex;
-
-
-
-10467 if(nextAlloc2ndIndex < suballoc2ndCount)
-
-10469 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10472 if(lastOffset < suballoc.offset)
-
-
-10475 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
-
-
-
+10441 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
+10443 switch(m_2ndVectorMode)
+
+10445 case SECOND_VECTOR_EMPTY:
+
+
+
+
+
+10451 const size_t suballocations1stCount = suballocations1st.size();
+10452 VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
+10453 const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+10454 const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
+
+10456 firstSuballoc.offset,
+10457 size - (lastSuballoc.offset + lastSuballoc.size));
+
+
+
+10461 case SECOND_VECTOR_RING_BUFFER:
+
+
+
+
+10466 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10467 const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
+10468 const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
+10469 return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
+
+
+
+10473 case SECOND_VECTOR_DOUBLE_STACK:
+
+
+
+
+10478 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10479 const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
+10480 const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
+10481 return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
+
+
-
-10486 lastOffset = suballoc.offset + suballoc.size;
-10487 ++nextAlloc2ndIndex;
-
-
-
-
-10492 if(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10495 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-
-
-
-
-
-
-10502 lastOffset = freeSpace2ndTo1stEnd;
-
-
-
-
-10507 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-10508 const VkDeviceSize freeSpace1stTo2ndEnd =
-10509 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-10510 while(lastOffset < freeSpace1stTo2ndEnd)
+
+
+
+
+
+
+10491 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
+
+10493 const VkDeviceSize size = GetSize();
+10494 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+10495 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10496 const size_t suballoc1stCount = suballocations1st.size();
+10497 const size_t suballoc2ndCount = suballocations2nd.size();
+
+
+
+
+
+
+
+
+
+
+10508 VkDeviceSize lastOffset = 0;
+
+10510 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10513 while(nextAlloc1stIndex < suballoc1stCount &&
-10514 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+10512 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+10513 size_t nextAlloc2ndIndex = 0;
+10514 while(lastOffset < freeSpace2ndTo1stEnd)
-10516 ++nextAlloc1stIndex;
-
-
-
-10520 if(nextAlloc1stIndex < suballoc1stCount)
-
-10522 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
-
-10525 if(lastOffset < suballoc.offset)
-
-
-10528 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
-
-
-
-
-
-10539 lastOffset = suballoc.offset + suballoc.size;
-10540 ++nextAlloc1stIndex;
-
-
-
-
-10545 if(lastOffset < freeSpace1stTo2ndEnd)
-
-
-10548 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-
-
-
-
-
-
-10555 lastOffset = freeSpace1stTo2ndEnd;
-
-
-
-10559 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-10561 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-10562 while(lastOffset < size)
-
-
-10565 while(nextAlloc2ndIndex != SIZE_MAX &&
-10566 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10568 --nextAlloc2ndIndex;
-
-
-
-10572 if(nextAlloc2ndIndex != SIZE_MAX)
-
-10574 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10577 if(lastOffset < suballoc.offset)
-
-
-10580 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-
-
-
-
-
-
-
-
-
-
-10591 lastOffset = suballoc.offset + suballoc.size;
-10592 --nextAlloc2ndIndex;
-
-
-
-
-10597 if(lastOffset < size)
-
-
-10600 const VkDeviceSize unusedRangeSize = size - lastOffset;
-
-
-
-
-
-
-
-
-
-
-
-
-10613 #if VMA_STATS_STRING_ENABLED
-10614 void VmaBlockMetadata_Linear::PrintDetailedMap(
class VmaJsonWriter& json)
const
-
-10616 const VkDeviceSize size = GetSize();
-10617 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-10618 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-10619 const size_t suballoc1stCount = suballocations1st.size();
-10620 const size_t suballoc2ndCount = suballocations2nd.size();
-
-
+
+10517 while(nextAlloc2ndIndex < suballoc2ndCount &&
+10518 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10520 ++nextAlloc2ndIndex;
+
+
+
+10524 if(nextAlloc2ndIndex < suballoc2ndCount)
+
+10526 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10529 if(lastOffset < suballoc.offset)
+
+
+10532 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+10546 lastOffset = suballoc.offset + suballoc.size;
+10547 ++nextAlloc2ndIndex;
+
+
+
+
+
+10553 if(lastOffset < freeSpace2ndTo1stEnd)
+
+10555 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+
+
+
+
+
+
+
+10563 lastOffset = freeSpace2ndTo1stEnd;
+
+
+
+
+10568 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+10569 const VkDeviceSize freeSpace1stTo2ndEnd =
+10570 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+10571 while(lastOffset < freeSpace1stTo2ndEnd)
+
+
+10574 while(nextAlloc1stIndex < suballoc1stCount &&
+10575 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
+10577 ++nextAlloc1stIndex;
+
+
+
+10581 if(nextAlloc1stIndex < suballoc1stCount)
+
+10583 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
+
+10586 if(lastOffset < suballoc.offset)
+
+
+10589 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+10603 lastOffset = suballoc.offset + suballoc.size;
+10604 ++nextAlloc1stIndex;
+
+
+
+
+
+10610 if(lastOffset < freeSpace1stTo2ndEnd)
+
+10612 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+
+
+
+
+
+
+
+10620 lastOffset = freeSpace1stTo2ndEnd;
+
+
-10624 size_t unusedRangeCount = 0;
-10625 VkDeviceSize usedBytes = 0;
-
-10627 VkDeviceSize lastOffset = 0;
-
-10629 size_t alloc2ndCount = 0;
-10630 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10632 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-10633 size_t nextAlloc2ndIndex = 0;
-10634 while(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10637 while(nextAlloc2ndIndex < suballoc2ndCount &&
-10638 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10640 ++nextAlloc2ndIndex;
-
-
-
-10644 if(nextAlloc2ndIndex < suballoc2ndCount)
-
-10646 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10649 if(lastOffset < suballoc.offset)
-
-
-10652 ++unusedRangeCount;
-
-
-
-
-
-10658 usedBytes += suballoc.size;
-
-
-10661 lastOffset = suballoc.offset + suballoc.size;
-10662 ++nextAlloc2ndIndex;
-
-
-
-
-10667 if(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10670 ++unusedRangeCount;
-
-
-
-10674 lastOffset = freeSpace2ndTo1stEnd;
-
-
-
-
-10679 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-10680 size_t alloc1stCount = 0;
-10681 const VkDeviceSize freeSpace1stTo2ndEnd =
-10682 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-10683 while(lastOffset < freeSpace1stTo2ndEnd)
-
-
-10686 while(nextAlloc1stIndex < suballoc1stCount &&
-10687 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
-10689 ++nextAlloc1stIndex;
-
+10624 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+10626 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+10627 while(lastOffset < size)
+
+
+10630 while(nextAlloc2ndIndex != SIZE_MAX &&
+10631 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10633 --nextAlloc2ndIndex;
+
+
+
+10637 if(nextAlloc2ndIndex != SIZE_MAX)
+
+10639 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10642 if(lastOffset < suballoc.offset)
+
+
+10645 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+10659 lastOffset = suballoc.offset + suballoc.size;
+10660 --nextAlloc2ndIndex;
+
+
+
+
+
+10666 if(lastOffset < size)
+
+10668 const VkDeviceSize unusedRangeSize = size - lastOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+10684 void VmaBlockMetadata_Linear::AddPoolStats(
VmaPoolStats& inoutStats)
const
+
+10686 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+10687 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10688 const VkDeviceSize size = GetSize();
+10689 const size_t suballoc1stCount = suballocations1st.size();
+10690 const size_t suballoc2ndCount = suballocations2nd.size();
-
-10693 if(nextAlloc1stIndex < suballoc1stCount)
-
-10695 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
-
-10698 if(lastOffset < suballoc.offset)
-
-
-10701 ++unusedRangeCount;
-
-
-
-
-
-10707 usedBytes += suballoc.size;
+10692 inoutStats.
size += size;
+
+10694 VkDeviceSize lastOffset = 0;
+
+10696 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+10698 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+10699 size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
+10700 while(lastOffset < freeSpace2ndTo1stEnd)
+
+
+10703 while(nextAlloc2ndIndex < suballoc2ndCount &&
+10704 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10706 ++nextAlloc2ndIndex;
+
-
-10710 lastOffset = suballoc.offset + suballoc.size;
-10711 ++nextAlloc1stIndex;
-
-
-
-
-10716 if(lastOffset < size)
-
-
-10719 ++unusedRangeCount;
-
-
-
-10723 lastOffset = freeSpace1stTo2ndEnd;
-
-
-
-10727 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-10729 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-10730 while(lastOffset < size)
-
-
-10733 while(nextAlloc2ndIndex != SIZE_MAX &&
-10734 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10736 --nextAlloc2ndIndex;
-
-
-
-10740 if(nextAlloc2ndIndex != SIZE_MAX)
-
-10742 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+10710 if(nextAlloc2ndIndex < suballoc2ndCount)
+
+10712 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10715 if(lastOffset < suballoc.offset)
+
+
+10718 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
+
+
+10729 lastOffset = suballoc.offset + suballoc.size;
+10730 ++nextAlloc2ndIndex;
+
+
+
+
+10735 if(lastOffset < freeSpace2ndTo1stEnd)
+
+
+10738 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+
+
+
+
-
-10745 if(lastOffset < suballoc.offset)
-
-
-10748 ++unusedRangeCount;
-
-
-
-
-
-10754 usedBytes += suballoc.size;
-
-
-10757 lastOffset = suballoc.offset + suballoc.size;
-10758 --nextAlloc2ndIndex;
-
-
-
-
-10763 if(lastOffset < size)
-
-
-10766 ++unusedRangeCount;
-
-
-
-
-
-
-
-
-10775 const VkDeviceSize unusedBytes = size - usedBytes;
-10776 PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
-
-
-
+
+10745 lastOffset = freeSpace2ndTo1stEnd;
+
+
+
+
+10750 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+10751 const VkDeviceSize freeSpace1stTo2ndEnd =
+10752 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+10753 while(lastOffset < freeSpace1stTo2ndEnd)
+
+
+10756 while(nextAlloc1stIndex < suballoc1stCount &&
+10757 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
+10759 ++nextAlloc1stIndex;
+
+
+
+10763 if(nextAlloc1stIndex < suballoc1stCount)
+
+10765 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
+
+10768 if(lastOffset < suballoc.offset)
+
+
+10771 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
-10781 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-10783 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-10784 size_t nextAlloc2ndIndex = 0;
-10785 while(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10788 while(nextAlloc2ndIndex < suballoc2ndCount &&
-10789 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10791 ++nextAlloc2ndIndex;
-
-
-
-10795 if(nextAlloc2ndIndex < suballoc2ndCount)
-
-10797 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10800 if(lastOffset < suballoc.offset)
-
-
-10803 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-10804 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
-
-
-
-10809 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
-
-10812 lastOffset = suballoc.offset + suballoc.size;
-10813 ++nextAlloc2ndIndex;
-
-
-
-
-10818 if(lastOffset < freeSpace2ndTo1stEnd)
-
-
-10821 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-10822 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
-
-
-10826 lastOffset = freeSpace2ndTo1stEnd;
-
-
-
-
-10831 nextAlloc1stIndex = m_1stNullItemsBeginCount;
-10832 while(lastOffset < freeSpace1stTo2ndEnd)
-
-
-10835 while(nextAlloc1stIndex < suballoc1stCount &&
-10836 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
-
-10838 ++nextAlloc1stIndex;
-
-
-
-10842 if(nextAlloc1stIndex < suballoc1stCount)
-
-10844 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
-
-10847 if(lastOffset < suballoc.offset)
-
-
-10850 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-10851 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
-
-
-
-10856 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
-
-10859 lastOffset = suballoc.offset + suballoc.size;
-10860 ++nextAlloc1stIndex;
-
-
-
-
-10865 if(lastOffset < freeSpace1stTo2ndEnd)
-
-
-10868 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-10869 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
+
+10782 lastOffset = suballoc.offset + suballoc.size;
+10783 ++nextAlloc1stIndex;
+
+
+
+
+10788 if(lastOffset < freeSpace1stTo2ndEnd)
+
+
+10791 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+
+
+
+
+
+
+10798 lastOffset = freeSpace1stTo2ndEnd;
+
+
+
+10802 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+10804 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+10805 while(lastOffset < size)
+
+
+10808 while(nextAlloc2ndIndex != SIZE_MAX &&
+10809 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10811 --nextAlloc2ndIndex;
+
+
+
+10815 if(nextAlloc2ndIndex != SIZE_MAX)
+
+10817 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10820 if(lastOffset < suballoc.offset)
+
+
+10823 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+
+
+
+
+
+
+
+
+
+
+10834 lastOffset = suballoc.offset + suballoc.size;
+10835 --nextAlloc2ndIndex;
+
+
+
+
+10840 if(lastOffset < size)
+
+
+10843 const VkDeviceSize unusedRangeSize = size - lastOffset;
+
+
+
+
+
+
+
+
+
+
+
+
+10856 #if VMA_STATS_STRING_ENABLED
+10857 void VmaBlockMetadata_Linear::PrintDetailedMap(
class VmaJsonWriter& json)
const
+
+10859 const VkDeviceSize size = GetSize();
+10860 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+10861 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10862 const size_t suballoc1stCount = suballocations1st.size();
+10863 const size_t suballoc2ndCount = suballocations2nd.size();
+
+
+
+10867 size_t unusedRangeCount = 0;
+10868 VkDeviceSize usedBytes = 0;
+
+10870 VkDeviceSize lastOffset = 0;
-
-10873 lastOffset = freeSpace1stTo2ndEnd;
-
-
-
-10877 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-10879 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-10880 while(lastOffset < size)
-
-
-10883 while(nextAlloc2ndIndex != SIZE_MAX &&
-10884 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
-
-10886 --nextAlloc2ndIndex;
-
-
-
-10890 if(nextAlloc2ndIndex != SIZE_MAX)
-
-10892 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
-
-10895 if(lastOffset < suballoc.offset)
-
-
-10898 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-10899 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
-
-
-
-10904 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
-
-10907 lastOffset = suballoc.offset + suballoc.size;
-10908 --nextAlloc2ndIndex;
-
-
-
-
-10913 if(lastOffset < size)
-
-
-10916 const VkDeviceSize unusedRangeSize = size - lastOffset;
-10917 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-
-
-
-
-
-
-
-
-10926 PrintDetailedMap_End(json);
-
-
-
-10930 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
-10931 uint32_t currentFrameIndex,
-10932 uint32_t frameInUseCount,
-10933 VkDeviceSize bufferImageGranularity,
-10934 VkDeviceSize allocSize,
-10935 VkDeviceSize allocAlignment,
-
-10937 VmaSuballocationType allocType,
-10938 bool canMakeOtherLost,
-
-10940 VmaAllocationRequest* pAllocationRequest)
-
-10942 VMA_ASSERT(allocSize > 0);
-10943 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-10944 VMA_ASSERT(pAllocationRequest != VMA_NULL);
-10945 VMA_HEAVY_ASSERT(Validate());
-10946 return upperAddress ?
-10947 CreateAllocationRequest_UpperAddress(
-10948 currentFrameIndex, frameInUseCount, bufferImageGranularity,
-10949 allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
-10950 CreateAllocationRequest_LowerAddress(
-10951 currentFrameIndex, frameInUseCount, bufferImageGranularity,
-10952 allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
-
-
-10955 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
-10956 uint32_t currentFrameIndex,
-10957 uint32_t frameInUseCount,
-10958 VkDeviceSize bufferImageGranularity,
-10959 VkDeviceSize allocSize,
-10960 VkDeviceSize allocAlignment,
-10961 VmaSuballocationType allocType,
-10962 bool canMakeOtherLost,
-
-10964 VmaAllocationRequest* pAllocationRequest)
-
-10966 const VkDeviceSize size = GetSize();
-10967 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-10968 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+10872 size_t alloc2ndCount = 0;
+10873 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+10875 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+10876 size_t nextAlloc2ndIndex = 0;
+10877 while(lastOffset < freeSpace2ndTo1stEnd)
+
+
+10880 while(nextAlloc2ndIndex < suballoc2ndCount &&
+10881 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10883 ++nextAlloc2ndIndex;
+
+
+
+10887 if(nextAlloc2ndIndex < suballoc2ndCount)
+
+10889 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10892 if(lastOffset < suballoc.offset)
+
+
+10895 ++unusedRangeCount;
+
+
+
+
+
+10901 usedBytes += suballoc.size;
+
+
+10904 lastOffset = suballoc.offset + suballoc.size;
+10905 ++nextAlloc2ndIndex;
+
+
+
+
+10910 if(lastOffset < freeSpace2ndTo1stEnd)
+
+
+10913 ++unusedRangeCount;
+
+
+
+10917 lastOffset = freeSpace2ndTo1stEnd;
+
+
+
+
+10922 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+10923 size_t alloc1stCount = 0;
+10924 const VkDeviceSize freeSpace1stTo2ndEnd =
+10925 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+10926 while(lastOffset < freeSpace1stTo2ndEnd)
+
+
+10929 while(nextAlloc1stIndex < suballoc1stCount &&
+10930 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
+10932 ++nextAlloc1stIndex;
+
+
+
+10936 if(nextAlloc1stIndex < suballoc1stCount)
+
+10938 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
+
+10941 if(lastOffset < suballoc.offset)
+
+
+10944 ++unusedRangeCount;
+
+
+
+
+
+10950 usedBytes += suballoc.size;
+
+
+10953 lastOffset = suballoc.offset + suballoc.size;
+10954 ++nextAlloc1stIndex;
+
+
+
+
+10959 if(lastOffset < size)
+
+
+10962 ++unusedRangeCount;
+
+
+
+10966 lastOffset = freeSpace1stTo2ndEnd;
+
+
-10970 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+10970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-10972 VMA_ASSERT(0 &&
"Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
-
-
-
-
-10977 if(allocSize > size)
-
-
-
-10981 VkDeviceSize resultBaseOffset = size - allocSize;
-10982 if(!suballocations2nd.empty())
-
-10984 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-10985 resultBaseOffset = lastSuballoc.offset - allocSize;
-10986 if(allocSize > lastSuballoc.offset)
-
-
-
-
-
-
-10993 VkDeviceSize resultOffset = resultBaseOffset;
-
-
-10996 if(VMA_DEBUG_MARGIN > 0)
-
-10998 if(resultOffset < VMA_DEBUG_MARGIN)
-
-
-
-11002 resultOffset -= VMA_DEBUG_MARGIN;
-
-
-
-11006 resultOffset = VmaAlignDown(resultOffset, allocAlignment);
-
-
-
-11010 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-
-11012 bool bufferImageGranularityConflict =
false;
-11013 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-
-11015 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-11016 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
-11018 if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
-
-11020 bufferImageGranularityConflict =
true;
-
-
-
-
-
-
-
-11028 if(bufferImageGranularityConflict)
+10972 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+10973 while(lastOffset < size)
+
+
+10976 while(nextAlloc2ndIndex != SIZE_MAX &&
+10977 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+10979 --nextAlloc2ndIndex;
+
+
+
+10983 if(nextAlloc2ndIndex != SIZE_MAX)
+
+10985 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+10988 if(lastOffset < suballoc.offset)
+
+
+10991 ++unusedRangeCount;
+
+
+
+
+
+10997 usedBytes += suballoc.size;
+
+
+11000 lastOffset = suballoc.offset + suballoc.size;
+11001 --nextAlloc2ndIndex;
+
+
+
+
+11006 if(lastOffset < size)
+
+
+11009 ++unusedRangeCount;
+
+
+
+
+
+
+
+
+11018 const VkDeviceSize unusedBytes = size - usedBytes;
+11019 PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
+
+
+
+
+11024 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+11026 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+11027 size_t nextAlloc2ndIndex = 0;
+11028 while(lastOffset < freeSpace2ndTo1stEnd)
-11030 resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
-
-
-
-
-11035 const VkDeviceSize endOf1st = !suballocations1st.empty() ?
-11036 suballocations1st.back().offset + suballocations1st.back().size :
-
-11038 if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
-
-
-
-11042 if(bufferImageGranularity > 1)
-
-11044 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-
-11046 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-11047 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
-11049 if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
-
-
-
-
-
-
-
-
-
-
-
-
-
-11063 pAllocationRequest->offset = resultOffset;
-11064 pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
-11065 pAllocationRequest->sumItemSize = 0;
-
-11067 pAllocationRequest->itemsToMakeLostCount = 0;
-11068 pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
-
-
-
-
-
-
-11075 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
-11076 uint32_t currentFrameIndex,
-11077 uint32_t frameInUseCount,
-11078 VkDeviceSize bufferImageGranularity,
-11079 VkDeviceSize allocSize,
-11080 VkDeviceSize allocAlignment,
-11081 VmaSuballocationType allocType,
-11082 bool canMakeOtherLost,
-
-11084 VmaAllocationRequest* pAllocationRequest)
-
-11086 const VkDeviceSize size = GetSize();
-11087 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-11088 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
-11090 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-
-
-11094 VkDeviceSize resultBaseOffset = 0;
-11095 if(!suballocations1st.empty())
-
-11097 const VmaSuballocation& lastSuballoc = suballocations1st.back();
-11098 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
-
+
+11031 while(nextAlloc2ndIndex < suballoc2ndCount &&
+11032 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+11034 ++nextAlloc2ndIndex;
+
+
+
+11038 if(nextAlloc2ndIndex < suballoc2ndCount)
+
+11040 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+11043 if(lastOffset < suballoc.offset)
+
+
+11046 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+11047 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+
+11052 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
+
+11055 lastOffset = suballoc.offset + suballoc.size;
+11056 ++nextAlloc2ndIndex;
+
+
+
+
+11061 if(lastOffset < freeSpace2ndTo1stEnd)
+
+
+11064 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+11065 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+11069 lastOffset = freeSpace2ndTo1stEnd;
+
+
+
+
+11074 nextAlloc1stIndex = m_1stNullItemsBeginCount;
+11075 while(lastOffset < freeSpace1stTo2ndEnd)
+
+
+11078 while(nextAlloc1stIndex < suballoc1stCount &&
+11079 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
+
+11081 ++nextAlloc1stIndex;
+
+
+
+11085 if(nextAlloc1stIndex < suballoc1stCount)
+
+11087 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+
+
+11090 if(lastOffset < suballoc.offset)
+
+
+11093 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+11094 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+
+11099 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
-11102 VkDeviceSize resultOffset = resultBaseOffset;
-
-
-11105 if(VMA_DEBUG_MARGIN > 0)
-
-11107 resultOffset += VMA_DEBUG_MARGIN;
-
-
-
-11111 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-
-
-
-11115 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
-
-11117 bool bufferImageGranularityConflict =
false;
-11118 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-
-11120 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-11121 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
-11123 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
-11125 bufferImageGranularityConflict =
true;
-
-
-
-
-
-
-
-11133 if(bufferImageGranularityConflict)
+
+11102 lastOffset = suballoc.offset + suballoc.size;
+11103 ++nextAlloc1stIndex;
+
+
+
+
+11108 if(lastOffset < freeSpace1stTo2ndEnd)
+
+
+11111 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+11112 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+11116 lastOffset = freeSpace1stTo2ndEnd;
+
+
+
+11120 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+11122 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+11123 while(lastOffset < size)
+
+
+11126 while(nextAlloc2ndIndex != SIZE_MAX &&
+11127 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
+
+11129 --nextAlloc2ndIndex;
+
+
+
+11133 if(nextAlloc2ndIndex != SIZE_MAX)
-11135 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-
-
-
-11139 const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
-11140 suballocations2nd.back().offset : size;
-
-
-11143 if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
-
-
-
-11147 if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-11149 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-
-11151 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-11152 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
-11154 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
-
-
-
-
-
-
-
-
-
+11135 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+
+11138 if(lastOffset < suballoc.offset)
+
+
+11141 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+11142 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+
+11147 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
+
+11150 lastOffset = suballoc.offset + suballoc.size;
+11151 --nextAlloc2ndIndex;
+
+
+
+
+11156 if(lastOffset < size)
+
+
+11159 const VkDeviceSize unusedRangeSize = size - lastOffset;
+11160 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+
+
+
+
-
-
-11168 pAllocationRequest->offset = resultOffset;
-11169 pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
-11170 pAllocationRequest->sumItemSize = 0;
-
-11172 pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
-11173 pAllocationRequest->itemsToMakeLostCount = 0;
-
-
-
-
-
-
-11180 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-11182 VMA_ASSERT(!suballocations1st.empty());
-
-11184 VkDeviceSize resultBaseOffset = 0;
-11185 if(!suballocations2nd.empty())
-
-11187 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-11188 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
-
-
-
-11192 VkDeviceSize resultOffset = resultBaseOffset;
-
-
-11195 if(VMA_DEBUG_MARGIN > 0)
-
-11197 resultOffset += VMA_DEBUG_MARGIN;
-
-
-
-11201 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-
-
-
-11205 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-
-11207 bool bufferImageGranularityConflict =
false;
-11208 for(
size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
-
-11210 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
-11211 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-
-11213 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-
-11215 bufferImageGranularityConflict =
true;
-
-
-
-
-
-
-
-11223 if(bufferImageGranularityConflict)
-
-11225 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-
-
-
-11229 pAllocationRequest->itemsToMakeLostCount = 0;
-11230 pAllocationRequest->sumItemSize = 0;
-11231 size_t index1st = m_1stNullItemsBeginCount;
-
-11233 if(canMakeOtherLost)
-
-11235 while(index1st < suballocations1st.size() &&
-11236 resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
-
-
-11239 const VmaSuballocation& suballoc = suballocations1st[index1st];
-11240 if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
-
-
-
-
-
-11246 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
-11247 if(suballoc.hAllocation->CanBecomeLost() &&
-11248 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
-11250 ++pAllocationRequest->itemsToMakeLostCount;
-11251 pAllocationRequest->sumItemSize += suballoc.size;
-
-
-
-
-
-
-
-
-
-
-
-11263 if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
-
-11265 while(index1st < suballocations1st.size())
-
-11267 const VmaSuballocation& suballoc = suballocations1st[index1st];
-11268 if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
-
-11270 if(suballoc.hAllocation != VK_NULL_HANDLE)
-
-
-11273 if(suballoc.hAllocation->CanBecomeLost() &&
-11274 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
-
-11276 ++pAllocationRequest->itemsToMakeLostCount;
-11277 pAllocationRequest->sumItemSize += suballoc.size;
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-11295 if(index1st == suballocations1st.size() &&
-11296 resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
-
-
-11299 VMA_DEBUG_LOG(
"Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
-
-
-
-
-11304 if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
-11305 (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
-
-
-
-11309 if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
-
-11311 for(
size_t nextSuballocIndex = index1st;
-11312 nextSuballocIndex < suballocations1st.size();
-11313 nextSuballocIndex++)
-
-11315 const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
-11316 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-
-11318 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-
-
-
-
-
-
-
-
-
-
-
-
-
-11332 pAllocationRequest->offset = resultOffset;
-11333 pAllocationRequest->sumFreeSize =
-11334 (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
-
-11336 - pAllocationRequest->sumItemSize;
-11337 pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
-
-
-
-
-
-
-
-
-11346 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
-11347 uint32_t currentFrameIndex,
-11348 uint32_t frameInUseCount,
-11349 VmaAllocationRequest* pAllocationRequest)
-
-11351 if(pAllocationRequest->itemsToMakeLostCount == 0)
-
-
-
+
+
+
+11169 PrintDetailedMap_End(json);
+
+
+
+11173 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
+11174 uint32_t currentFrameIndex,
+11175 uint32_t frameInUseCount,
+11176 VkDeviceSize bufferImageGranularity,
+11177 VkDeviceSize allocSize,
+11178 VkDeviceSize allocAlignment,
+
+11180 VmaSuballocationType allocType,
+11181 bool canMakeOtherLost,
+
+11183 VmaAllocationRequest* pAllocationRequest)
+
+11185 VMA_ASSERT(allocSize > 0);
+11186 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+11187 VMA_ASSERT(pAllocationRequest != VMA_NULL);
+11188 VMA_HEAVY_ASSERT(Validate());
+11189 return upperAddress ?
+11190 CreateAllocationRequest_UpperAddress(
+11191 currentFrameIndex, frameInUseCount, bufferImageGranularity,
+11192 allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
+11193 CreateAllocationRequest_LowerAddress(
+11194 currentFrameIndex, frameInUseCount, bufferImageGranularity,
+11195 allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
+
+
+11198 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
+11199 uint32_t currentFrameIndex,
+11200 uint32_t frameInUseCount,
+11201 VkDeviceSize bufferImageGranularity,
+11202 VkDeviceSize allocSize,
+11203 VkDeviceSize allocAlignment,
+11204 VmaSuballocationType allocType,
+11205 bool canMakeOtherLost,
+
+11207 VmaAllocationRequest* pAllocationRequest)
+
+11209 const VkDeviceSize size = GetSize();
+11210 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11211 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+11213 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+11215 VMA_ASSERT(0 &&
"Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
+
+
+
+
+11220 if(allocSize > size)
+
+
+
+11224 VkDeviceSize resultBaseOffset = size - allocSize;
+11225 if(!suballocations2nd.empty())
+
+11227 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+11228 resultBaseOffset = lastSuballoc.offset - allocSize;
+11229 if(allocSize > lastSuballoc.offset)
+
+
+
+
+
+
+11236 VkDeviceSize resultOffset = resultBaseOffset;
+
+
+11239 if(VMA_DEBUG_MARGIN > 0)
+
+11241 if(resultOffset < VMA_DEBUG_MARGIN)
+
+
+
+11245 resultOffset -= VMA_DEBUG_MARGIN;
+
+
+
+11249 resultOffset = VmaAlignDown(resultOffset, allocAlignment);
+
+
+
+11253 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
+
+11255 bool bufferImageGranularityConflict =
false;
+11256 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+
+11258 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+11259 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
+11261 if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
+
+11263 bufferImageGranularityConflict =
true;
+
+
+
+
+
+
+
+11271 if(bufferImageGranularityConflict)
+
+11273 resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
+
+
+
+
+11278 const VkDeviceSize endOf1st = !suballocations1st.empty() ?
+11279 suballocations1st.back().offset + suballocations1st.back().size :
+
+11281 if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
+
+
+
+11285 if(bufferImageGranularity > 1)
+
+11287 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+
+11289 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+11290 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
+11292 if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
+
+
+
+
+
+
+
+
+
+
+
+
+
+11306 pAllocationRequest->offset = resultOffset;
+11307 pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
+11308 pAllocationRequest->sumItemSize = 0;
+
+11310 pAllocationRequest->itemsToMakeLostCount = 0;
+11311 pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
+
+
+
+
+
+
+11318 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
+11319 uint32_t currentFrameIndex,
+11320 uint32_t frameInUseCount,
+11321 VkDeviceSize bufferImageGranularity,
+11322 VkDeviceSize allocSize,
+11323 VkDeviceSize allocAlignment,
+11324 VmaSuballocationType allocType,
+11325 bool canMakeOtherLost,
+
+11327 VmaAllocationRequest* pAllocationRequest)
+
+11329 const VkDeviceSize size = GetSize();
+11330 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11331 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+11333 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+
+
+11337 VkDeviceSize resultBaseOffset = 0;
+11338 if(!suballocations1st.empty())
+
+11340 const VmaSuballocation& lastSuballoc = suballocations1st.back();
+11341 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+
+
+
+11345 VkDeviceSize resultOffset = resultBaseOffset;
+
+
+11348 if(VMA_DEBUG_MARGIN > 0)
+
+11350 resultOffset += VMA_DEBUG_MARGIN;
+
+
+
+11354 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-11356 VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
-
-
-11359 SuballocationVectorType* suballocations = &AccessSuballocations1st();
-11360 size_t index = m_1stNullItemsBeginCount;
-11361 size_t madeLostCount = 0;
-11362 while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
-
-11364 if(index == suballocations->size())
-
-
-
-11368 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-11370 suballocations = &AccessSuballocations2nd();
-
-
-
-11374 VMA_ASSERT(!suballocations->empty());
-
-11376 VmaSuballocation& suballoc = (*suballocations)[index];
-11377 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
-11379 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
-11380 VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
-11381 if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
-11383 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-11384 suballoc.hAllocation = VK_NULL_HANDLE;
-11385 m_SumFreeSize += suballoc.size;
-11386 if(suballocations == &AccessSuballocations1st())
-
-11388 ++m_1stNullItemsMiddleCount;
-
-
-
-11392 ++m_2ndNullItemsCount;
-
-
-
-
-
-
-
-
-
-
-
-11404 CleanupAfterFree();
-
-
-
-
+
+
+11358 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
+
+11360 bool bufferImageGranularityConflict =
false;
+11361 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+
+11363 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+11364 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
+11366 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
+11368 bufferImageGranularityConflict =
true;
+
+
+
+
+
+
+
+11376 if(bufferImageGranularityConflict)
+
+11378 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+
+
+
+11382 const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
+11383 suballocations2nd.back().offset : size;
+
+
+11386 if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
+
+
+
+11390 if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+11392 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+
+11394 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+11395 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
+11397 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
+
+
+
+
+
+
+
+
+
+
-11410 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
-11412 uint32_t lostAllocationCount = 0;
-
-11414 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-11415 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-
-11417 VmaSuballocation& suballoc = suballocations1st[i];
-11418 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
-11419 suballoc.hAllocation->CanBecomeLost() &&
-11420 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
-11422 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-11423 suballoc.hAllocation = VK_NULL_HANDLE;
-11424 ++m_1stNullItemsMiddleCount;
-11425 m_SumFreeSize += suballoc.size;
-11426 ++lostAllocationCount;
-
-
-
-11430 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-11431 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-
-11433 VmaSuballocation& suballoc = suballocations2nd[i];
-11434 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
-11435 suballoc.hAllocation->CanBecomeLost() &&
-11436 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
-
-11438 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-11439 suballoc.hAllocation = VK_NULL_HANDLE;
-11440 ++m_2ndNullItemsCount;
-11441 m_SumFreeSize += suballoc.size;
-11442 ++lostAllocationCount;
-
-
+
+11411 pAllocationRequest->offset = resultOffset;
+11412 pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
+11413 pAllocationRequest->sumItemSize = 0;
+
+11415 pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
+11416 pAllocationRequest->itemsToMakeLostCount = 0;
+
+
+
+
+
+
+11423 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+11425 VMA_ASSERT(!suballocations1st.empty());
+
+11427 VkDeviceSize resultBaseOffset = 0;
+11428 if(!suballocations2nd.empty())
+
+11430 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+11431 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+
+
+
+11435 VkDeviceSize resultOffset = resultBaseOffset;
+
+
+11438 if(VMA_DEBUG_MARGIN > 0)
+
+11440 resultOffset += VMA_DEBUG_MARGIN;
+
+
+
+11444 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-11446 if(lostAllocationCount)
-
-11448 CleanupAfterFree();
-
-
-11451 return lostAllocationCount;
-
-
-11454 VkResult VmaBlockMetadata_Linear::CheckCorruption(
const void* pBlockData)
-
-11456 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-11457 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-
-11459 const VmaSuballocation& suballoc = suballocations1st[i];
-11460 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
-11462 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
-
-11464 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
-11465 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-11467 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-
-11469 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-11470 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-
-
-
-11475 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-11476 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-
-11478 const VmaSuballocation& suballoc = suballocations2nd[i];
-11479 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-
-11481 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
-
-11483 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
-11484 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-11486 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-
-11488 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-11489 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-
-
-
-
-
-
-11497 void VmaBlockMetadata_Linear::Alloc(
-11498 const VmaAllocationRequest& request,
-11499 VmaSuballocationType type,
-11500 VkDeviceSize allocSize,
-
-
-11503 const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
-
-11505 switch(request.type)
-
-11507 case VmaAllocationRequestType::UpperAddress:
-
-11509 VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
-11510 "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
-11511 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-11512 suballocations2nd.push_back(newSuballoc);
-11513 m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
-
-
-11516 case VmaAllocationRequestType::EndOf1st:
-
-11518 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
-11520 VMA_ASSERT(suballocations1st.empty() ||
-11521 request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
-
-11523 VMA_ASSERT(request.offset + allocSize <= GetSize());
-
-11525 suballocations1st.push_back(newSuballoc);
-
-
-11528 case VmaAllocationRequestType::EndOf2nd:
-
-11530 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-
-11532 VMA_ASSERT(!suballocations1st.empty() &&
-11533 request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
-11534 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
-11536 switch(m_2ndVectorMode)
-
-11538 case SECOND_VECTOR_EMPTY:
-
-11540 VMA_ASSERT(suballocations2nd.empty());
-11541 m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
-
-11543 case SECOND_VECTOR_RING_BUFFER:
-
-11545 VMA_ASSERT(!suballocations2nd.empty());
-
-11547 case SECOND_VECTOR_DOUBLE_STACK:
-11548 VMA_ASSERT(0 &&
"CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
-
-
-
-
-
-11554 suballocations2nd.push_back(newSuballoc);
-
-
-
-11558 VMA_ASSERT(0 &&
"CRITICAL INTERNAL ERROR.");
-
-
-11561 m_SumFreeSize -= newSuballoc.size;
-
-
-11564 void VmaBlockMetadata_Linear::Free(
const VmaAllocation allocation)
-
-11566 FreeAtOffset(allocation->GetOffset());
-
-
-11569 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
-
-11571 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-11572 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+
+11448 if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
+
+11450 bool bufferImageGranularityConflict =
false;
+11451 for(
size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
+
+11453 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
+11454 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+
+11456 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+
+11458 bufferImageGranularityConflict =
true;
+
+
+
+
+
+
+
+11466 if(bufferImageGranularityConflict)
+
+11468 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+
+
+
+11472 pAllocationRequest->itemsToMakeLostCount = 0;
+11473 pAllocationRequest->sumItemSize = 0;
+11474 size_t index1st = m_1stNullItemsBeginCount;
+
+11476 if(canMakeOtherLost)
+
+11478 while(index1st < suballocations1st.size() &&
+11479 resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
+
+
+11482 const VmaSuballocation& suballoc = suballocations1st[index1st];
+11483 if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
+
+
+
+
+
+11489 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+11490 if(suballoc.hAllocation->CanBecomeLost() &&
+11491 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
+11493 ++pAllocationRequest->itemsToMakeLostCount;
+11494 pAllocationRequest->sumItemSize += suballoc.size;
+
+
+
+
+
+
+
+
+
+
+
+11506 if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
+
+11508 while(index1st < suballocations1st.size())
+
+11510 const VmaSuballocation& suballoc = suballocations1st[index1st];
+11511 if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
+
+11513 if(suballoc.hAllocation != VK_NULL_HANDLE)
+
+
+11516 if(suballoc.hAllocation->CanBecomeLost() &&
+11517 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
+
+11519 ++pAllocationRequest->itemsToMakeLostCount;
+11520 pAllocationRequest->sumItemSize += suballoc.size;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+11538 if(index1st == suballocations1st.size() &&
+11539 resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
+
+
+11542 VMA_DEBUG_LOG(
"Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
+
+
+
+
+11547 if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
+11548 (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
+
+
+
+11552 if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
+
+11554 for(
size_t nextSuballocIndex = index1st;
+11555 nextSuballocIndex < suballocations1st.size();
+11556 nextSuballocIndex++)
+
+11558 const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
+11559 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+
+11561 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+
+
+
+
+
+
+
+
+
+
+
-11574 if(!suballocations1st.empty())
-
-
-11577 VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-11578 if(firstSuballoc.offset == offset)
-
-11580 firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-11581 firstSuballoc.hAllocation = VK_NULL_HANDLE;
-11582 m_SumFreeSize += firstSuballoc.size;
-11583 ++m_1stNullItemsBeginCount;
-11584 CleanupAfterFree();
-
-
-
+
+11575 pAllocationRequest->offset = resultOffset;
+11576 pAllocationRequest->sumFreeSize =
+11577 (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
+
+11579 - pAllocationRequest->sumItemSize;
+11580 pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
+
+
+
+
+
+
+
-
-11590 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
-11591 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-
-11593 VmaSuballocation& lastSuballoc = suballocations2nd.back();
-11594 if(lastSuballoc.offset == offset)
-
-11596 m_SumFreeSize += lastSuballoc.size;
-11597 suballocations2nd.pop_back();
-11598 CleanupAfterFree();
-
-
-
-
-11603 else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
-
-11605 VmaSuballocation& lastSuballoc = suballocations1st.back();
-11606 if(lastSuballoc.offset == offset)
-
-11608 m_SumFreeSize += lastSuballoc.size;
-11609 suballocations1st.pop_back();
-11610 CleanupAfterFree();
-
-
-
-
-
-
-11617 VmaSuballocation refSuballoc;
-11618 refSuballoc.offset = offset;
-
-11620 SuballocationVectorType::iterator it = VmaBinaryFindSorted(
-11621 suballocations1st.begin() + m_1stNullItemsBeginCount,
-11622 suballocations1st.end(),
-
-11624 VmaSuballocationOffsetLess());
-11625 if(it != suballocations1st.end())
-
-11627 it->type = VMA_SUBALLOCATION_TYPE_FREE;
-11628 it->hAllocation = VK_NULL_HANDLE;
-11629 ++m_1stNullItemsMiddleCount;
-11630 m_SumFreeSize += it->size;
-11631 CleanupAfterFree();
-
-
-
-
-11636 if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-
-
-11639 VmaSuballocation refSuballoc;
-11640 refSuballoc.offset = offset;
-
-11642 SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-11643 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-11644 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-11645 if(it != suballocations2nd.end())
-
-11647 it->type = VMA_SUBALLOCATION_TYPE_FREE;
-11648 it->hAllocation = VK_NULL_HANDLE;
-11649 ++m_2ndNullItemsCount;
-11650 m_SumFreeSize += it->size;
-11651 CleanupAfterFree();
-
-
-
-
-11656 VMA_ASSERT(0 &&
"Allocation to free not found in linear allocator!");
-
-
-11659 bool VmaBlockMetadata_Linear::ShouldCompact1st()
const
-
-11661 const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-11662 const size_t suballocCount = AccessSuballocations1st().size();
-11663 return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
-
-
-11666 void VmaBlockMetadata_Linear::CleanupAfterFree()
-
-11668 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-11669 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-
-
-
-11673 suballocations1st.clear();
-11674 suballocations2nd.clear();
-11675 m_1stNullItemsBeginCount = 0;
-11676 m_1stNullItemsMiddleCount = 0;
-11677 m_2ndNullItemsCount = 0;
-11678 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-
-
-
-11682 const size_t suballoc1stCount = suballocations1st.size();
-11683 const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-11684 VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
-
-
-11687 while(m_1stNullItemsBeginCount < suballoc1stCount &&
-11688 suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
-
-11690 ++m_1stNullItemsBeginCount;
-11691 --m_1stNullItemsMiddleCount;
-
+11589 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
+11590 uint32_t currentFrameIndex,
+11591 uint32_t frameInUseCount,
+11592 VmaAllocationRequest* pAllocationRequest)
+
+11594 if(pAllocationRequest->itemsToMakeLostCount == 0)
+
+
+
+
+11599 VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
+
+
+11602 SuballocationVectorType* suballocations = &AccessSuballocations1st();
+11603 size_t index = m_1stNullItemsBeginCount;
+11604 size_t madeLostCount = 0;
+11605 while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
+
+11607 if(index == suballocations->size())
+
+
+
+11611 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+11613 suballocations = &AccessSuballocations2nd();
+
+
+
+11617 VMA_ASSERT(!suballocations->empty());
+
+11619 VmaSuballocation& suballoc = (*suballocations)[index];
+11620 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
+11622 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+11623 VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
+11624 if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
+11626 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+11627 suballoc.hAllocation = VK_NULL_HANDLE;
+11628 m_SumFreeSize += suballoc.size;
+11629 if(suballocations == &AccessSuballocations1st())
+
+11631 ++m_1stNullItemsMiddleCount;
+
+
+
+11635 ++m_2ndNullItemsCount;
+
+
+
+
+
+
+
+
+
+
+
+11647 CleanupAfterFree();
+
+
+
+
+
+11653 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
+11655 uint32_t lostAllocationCount = 0;
+
+11657 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11658 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
+
+11660 VmaSuballocation& suballoc = suballocations1st[i];
+11661 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+11662 suballoc.hAllocation->CanBecomeLost() &&
+11663 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
+11665 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+11666 suballoc.hAllocation = VK_NULL_HANDLE;
+11667 ++m_1stNullItemsMiddleCount;
+11668 m_SumFreeSize += suballoc.size;
+11669 ++lostAllocationCount;
+
+
+
+11673 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+11674 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
+
+11676 VmaSuballocation& suballoc = suballocations2nd[i];
+11677 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+11678 suballoc.hAllocation->CanBecomeLost() &&
+11679 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
+
+11681 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+11682 suballoc.hAllocation = VK_NULL_HANDLE;
+11683 ++m_2ndNullItemsCount;
+11684 m_SumFreeSize += suballoc.size;
+11685 ++lostAllocationCount;
+
+
+
+11689 if(lostAllocationCount)
+
+11691 CleanupAfterFree();
+
-
-11695 while(m_1stNullItemsMiddleCount > 0 &&
-11696 suballocations1st.back().hAllocation == VK_NULL_HANDLE)
-
-11698 --m_1stNullItemsMiddleCount;
-11699 suballocations1st.pop_back();
-
-
-
-11703 while(m_2ndNullItemsCount > 0 &&
-11704 suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
-
-11706 --m_2ndNullItemsCount;
-11707 suballocations2nd.pop_back();
-
-
-
-11711 while(m_2ndNullItemsCount > 0 &&
-11712 suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
-
-11714 --m_2ndNullItemsCount;
-11715 VmaVectorRemove(suballocations2nd, 0);
-
+11694 return lostAllocationCount;
+
+
+11697 VkResult VmaBlockMetadata_Linear::CheckCorruption(
const void* pBlockData)
+
+11699 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11700 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
+
+11702 const VmaSuballocation& suballoc = suballocations1st[i];
+11703 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
+11705 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
+
+11707 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+11708 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+11710 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+
+11712 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+11713 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+
-11718 if(ShouldCompact1st())
-
-11720 const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
-11721 size_t srcIndex = m_1stNullItemsBeginCount;
-11722 for(
size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
-
-11724 while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
-
-
-
-11728 if(dstIndex != srcIndex)
-
-11730 suballocations1st[dstIndex] = suballocations1st[srcIndex];
-
-
+11718 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+11719 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
+
+11721 const VmaSuballocation& suballoc = suballocations2nd[i];
+11722 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+
+11724 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
+
+11726 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+11727 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+11729 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+
+11731 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+11732 return VK_ERROR_VALIDATION_FAILED_EXT;
-11734 suballocations1st.resize(nonNullItemCount);
-11735 m_1stNullItemsBeginCount = 0;
-11736 m_1stNullItemsMiddleCount = 0;
-
-
-
-11740 if(suballocations2nd.empty())
-
-11742 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-
-
-
-11746 if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
-
-11748 suballocations1st.clear();
-11749 m_1stNullItemsBeginCount = 0;
-
-11751 if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-
-
-11754 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-11755 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
-11756 while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
-11757 suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
-
-11759 ++m_1stNullItemsBeginCount;
-11760 --m_1stNullItemsMiddleCount;
-
-11762 m_2ndNullItemsCount = 0;
-11763 m_1stVectorIndex ^= 1;
-
-
-
+
+
+
+
+
+
+11740 void VmaBlockMetadata_Linear::Alloc(
+11741 const VmaAllocationRequest& request,
+11742 VmaSuballocationType type,
+11743 VkDeviceSize allocSize,
+
+
+11746 const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
+
+11748 switch(request.type)
+
+11750 case VmaAllocationRequestType::UpperAddress:
+
+11752 VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
+11753 "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
+11754 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+11755 suballocations2nd.push_back(newSuballoc);
+11756 m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
+
+
+11759 case VmaAllocationRequestType::EndOf1st:
+
+11761 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
+11763 VMA_ASSERT(suballocations1st.empty() ||
+11764 request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
+
+11766 VMA_ASSERT(request.offset + allocSize <= GetSize());
-11768 VMA_HEAVY_ASSERT(Validate());
-
-
-
-
-
-11775 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(
VmaAllocator hAllocator) :
-11776 VmaBlockMetadata(hAllocator),
-
-11778 m_AllocationCount(0),
-
-
-
-11782 memset(m_FreeList, 0,
sizeof(m_FreeList));
-
-
-11785 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
-
-11787 DeleteNode(m_Root);
-
-
-11790 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
-
-11792 VmaBlockMetadata::Init(size);
-
-11794 m_UsableSize = VmaPrevPow2(size);
-11795 m_SumFreeSize = m_UsableSize;
+11768 suballocations1st.push_back(newSuballoc);
+
+
+11771 case VmaAllocationRequestType::EndOf2nd:
+
+11773 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+
+11775 VMA_ASSERT(!suballocations1st.empty() &&
+11776 request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
+11777 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+11779 switch(m_2ndVectorMode)
+
+11781 case SECOND_VECTOR_EMPTY:
+
+11783 VMA_ASSERT(suballocations2nd.empty());
+11784 m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
+
+11786 case SECOND_VECTOR_RING_BUFFER:
+
+11788 VMA_ASSERT(!suballocations2nd.empty());
+
+11790 case SECOND_VECTOR_DOUBLE_STACK:
+11791 VMA_ASSERT(0 &&
"CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
+
+
+
+
-
-
-11799 while(m_LevelCount < MAX_LEVELS &&
-11800 LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
-
-
-
-
-11805 Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
-11806 rootNode->offset = 0;
-11807 rootNode->type = Node::TYPE_FREE;
-11808 rootNode->parent = VMA_NULL;
-11809 rootNode->buddy = VMA_NULL;
-
-
-11812 AddToFreeListFront(0, rootNode);
-
-
-11815 bool VmaBlockMetadata_Buddy::Validate()
const
-
-
-11818 ValidationContext ctx;
-11819 if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
-
-11821 VMA_VALIDATE(
false &&
"ValidateNode failed.");
-
-11823 VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
-11824 VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
-
-
-11827 for(uint32_t level = 0; level < m_LevelCount; ++level)
-
-11829 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
-11830 m_FreeList[level].front->free.prev == VMA_NULL);
+11797 suballocations2nd.push_back(newSuballoc);
+
+
+
+11801 VMA_ASSERT(0 &&
"CRITICAL INTERNAL ERROR.");
+
+
+11804 m_SumFreeSize -= newSuballoc.size;
+
+
+11807 void VmaBlockMetadata_Linear::Free(
const VmaAllocation allocation)
+
+11809 FreeAtOffset(allocation->GetOffset());
+
+
+11812 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
+
+11814 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11815 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+11817 if(!suballocations1st.empty())
+
+
+11820 VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+11821 if(firstSuballoc.offset == offset)
+
+11823 firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+11824 firstSuballoc.hAllocation = VK_NULL_HANDLE;
+11825 m_SumFreeSize += firstSuballoc.size;
+11826 ++m_1stNullItemsBeginCount;
+11827 CleanupAfterFree();
+
+
+
-11832 for(Node* node = m_FreeList[level].front;
-
-11834 node = node->free.next)
-
-11836 VMA_VALIDATE(node->type == Node::TYPE_FREE);
-
-11838 if(node->free.next == VMA_NULL)
-
-11840 VMA_VALIDATE(m_FreeList[level].back == node);
-
-
-
-11844 VMA_VALIDATE(node->free.next->free.prev == node);
-
-
-
-
-
-11850 for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
-
-11852 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
-
-
-
-
+
+11833 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
+11834 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+
+11836 VmaSuballocation& lastSuballoc = suballocations2nd.back();
+11837 if(lastSuballoc.offset == offset)
+
+11839 m_SumFreeSize += lastSuballoc.size;
+11840 suballocations2nd.pop_back();
+11841 CleanupAfterFree();
+
+
+
+
+11846 else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
+
+11848 VmaSuballocation& lastSuballoc = suballocations1st.back();
+11849 if(lastSuballoc.offset == offset)
+
+11851 m_SumFreeSize += lastSuballoc.size;
+11852 suballocations1st.pop_back();
+11853 CleanupAfterFree();
+
+
+
-11858 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax()
const
-
-11860 for(uint32_t level = 0; level < m_LevelCount; ++level)
-
-11862 if(m_FreeList[level].front != VMA_NULL)
-
-11864 return LevelToNodeSize(level);
-
-
-
-
-
-11870 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
-
-11872 const VkDeviceSize unusableSize = GetUnusableSize();
-
-
-
-
-
+
+
+11860 VmaSuballocation refSuballoc;
+11861 refSuballoc.offset = offset;
+
+11863 SuballocationVectorType::iterator it = VmaBinaryFindSorted(
+11864 suballocations1st.begin() + m_1stNullItemsBeginCount,
+11865 suballocations1st.end(),
+
+11867 VmaSuballocationOffsetLess());
+11868 if(it != suballocations1st.end())
+
+11870 it->type = VMA_SUBALLOCATION_TYPE_FREE;
+11871 it->hAllocation = VK_NULL_HANDLE;
+11872 ++m_1stNullItemsMiddleCount;
+11873 m_SumFreeSize += it->size;
+11874 CleanupAfterFree();
+
+
+
-
-
-
-
-11883 CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
-
-11885 if(unusableSize > 0)
-
-
-
-
-
-
-
-
-11894 void VmaBlockMetadata_Buddy::AddPoolStats(
VmaPoolStats& inoutStats)
const
-
-11896 const VkDeviceSize unusableSize = GetUnusableSize();
-
-11898 inoutStats.
size += GetSize();
-11899 inoutStats.
unusedSize += m_SumFreeSize + unusableSize;
-
-
-
-
-11904 if(unusableSize > 0)
-
-
-
-
-
-
-11911 #if VMA_STATS_STRING_ENABLED
-
-11913 void VmaBlockMetadata_Buddy::PrintDetailedMap(
class VmaJsonWriter& json)
const
-
-
-
-11917 CalcAllocationStatInfo(stat);
-
-11919 PrintDetailedMap_Begin(
-
-
-
-
-
-11925 PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
-
-11927 const VkDeviceSize unusableSize = GetUnusableSize();
-11928 if(unusableSize > 0)
-
-11930 PrintDetailedMap_UnusedRange(json,
-
-
-
-
-11935 PrintDetailedMap_End(json);
-
-
-
-
-11940 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
-11941 uint32_t currentFrameIndex,
-11942 uint32_t frameInUseCount,
-11943 VkDeviceSize bufferImageGranularity,
-11944 VkDeviceSize allocSize,
-11945 VkDeviceSize allocAlignment,
-
-11947 VmaSuballocationType allocType,
-11948 bool canMakeOtherLost,
-
-11950 VmaAllocationRequest* pAllocationRequest)
-
-11952 VMA_ASSERT(!upperAddress &&
"VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
-
-
-
-11956 if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-11957 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-11958 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-
-11960 allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
-11961 allocSize = VMA_MAX(allocSize, bufferImageGranularity);
-
-
-11964 if(allocSize > m_UsableSize)
-
-
-
-
-11969 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-11970 for(uint32_t level = targetLevel + 1; level--; )
-
-11972 for(Node* freeNode = m_FreeList[level].front;
-11973 freeNode != VMA_NULL;
-11974 freeNode = freeNode->free.next)
-
-11976 if(freeNode->offset % allocAlignment == 0)
-
-11978 pAllocationRequest->type = VmaAllocationRequestType::Normal;
-11979 pAllocationRequest->offset = freeNode->offset;
-11980 pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
-11981 pAllocationRequest->sumItemSize = 0;
-11982 pAllocationRequest->itemsToMakeLostCount = 0;
-11983 pAllocationRequest->customData = (
void*)(uintptr_t)level;
-
-
+11879 if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
+
+
+11882 VmaSuballocation refSuballoc;
+11883 refSuballoc.offset = offset;
+
+11885 SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
+11886 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
+11887 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
+11888 if(it != suballocations2nd.end())
+
+11890 it->type = VMA_SUBALLOCATION_TYPE_FREE;
+11891 it->hAllocation = VK_NULL_HANDLE;
+11892 ++m_2ndNullItemsCount;
+11893 m_SumFreeSize += it->size;
+11894 CleanupAfterFree();
+
+
+
+
+11899 VMA_ASSERT(0 &&
"Allocation to free not found in linear allocator!");
+
+
+11902 bool VmaBlockMetadata_Linear::ShouldCompact1st()
const
+
+11904 const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+11905 const size_t suballocCount = AccessSuballocations1st().size();
+11906 return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
+
+
+11909 void VmaBlockMetadata_Linear::CleanupAfterFree()
+
+11911 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+11912 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+
+
+
+11916 suballocations1st.clear();
+11917 suballocations2nd.clear();
+11918 m_1stNullItemsBeginCount = 0;
+11919 m_1stNullItemsMiddleCount = 0;
+11920 m_2ndNullItemsCount = 0;
+11921 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+
+
+
+11925 const size_t suballoc1stCount = suballocations1st.size();
+11926 const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+11927 VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
+
+
+11930 while(m_1stNullItemsBeginCount < suballoc1stCount &&
+11931 suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
+
+11933 ++m_1stNullItemsBeginCount;
+11934 --m_1stNullItemsMiddleCount;
+
+
+
+11938 while(m_1stNullItemsMiddleCount > 0 &&
+11939 suballocations1st.back().hAllocation == VK_NULL_HANDLE)
+
+11941 --m_1stNullItemsMiddleCount;
+11942 suballocations1st.pop_back();
+
+
+
+11946 while(m_2ndNullItemsCount > 0 &&
+11947 suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
+
+11949 --m_2ndNullItemsCount;
+11950 suballocations2nd.pop_back();
+
+
+
+11954 while(m_2ndNullItemsCount > 0 &&
+11955 suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
+
+11957 --m_2ndNullItemsCount;
+11958 VmaVectorRemove(suballocations2nd, 0);
+
+
+11961 if(ShouldCompact1st())
+
+11963 const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
+11964 size_t srcIndex = m_1stNullItemsBeginCount;
+11965 for(
size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
+
+11967 while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
+
+
+
+11971 if(dstIndex != srcIndex)
+
+11973 suballocations1st[dstIndex] = suballocations1st[srcIndex];
+
+
+
+11977 suballocations1st.resize(nonNullItemCount);
+11978 m_1stNullItemsBeginCount = 0;
+11979 m_1stNullItemsMiddleCount = 0;
+
+
+
+11983 if(suballocations2nd.empty())
+
+11985 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-
-
-
-
-
-11992 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
-11993 uint32_t currentFrameIndex,
-11994 uint32_t frameInUseCount,
-11995 VmaAllocationRequest* pAllocationRequest)
-
-
-
-
-
-12001 return pAllocationRequest->itemsToMakeLostCount == 0;
-
-
-12004 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
-
-
-
-
-
-
-
-
-12013 void VmaBlockMetadata_Buddy::Alloc(
-12014 const VmaAllocationRequest& request,
-12015 VmaSuballocationType type,
-12016 VkDeviceSize allocSize,
-
-
-12019 VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-
-12021 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-12022 uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
-
-12024 Node* currNode = m_FreeList[currLevel].front;
-12025 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-12026 while(currNode->offset != request.offset)
-
-12028 currNode = currNode->free.next;
-12029 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-
-
-
-12033 while(currLevel < targetLevel)
-
-
-
-12037 RemoveFromFreeList(currLevel, currNode);
-
-12039 const uint32_t childrenLevel = currLevel + 1;
-
-
-12042 Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
-12043 Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
-
-12045 leftChild->offset = currNode->offset;
-12046 leftChild->type = Node::TYPE_FREE;
-12047 leftChild->parent = currNode;
-12048 leftChild->buddy = rightChild;
-
-12050 rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
-12051 rightChild->type = Node::TYPE_FREE;
-12052 rightChild->parent = currNode;
-12053 rightChild->buddy = leftChild;
-
-
-12056 currNode->type = Node::TYPE_SPLIT;
-12057 currNode->split.leftChild = leftChild;
-
-
-12060 AddToFreeListFront(childrenLevel, rightChild);
-12061 AddToFreeListFront(childrenLevel, leftChild);
-
-
-
-
-12066 currNode = m_FreeList[currLevel].front;
-
-
-
-
-
-
-
-
-12075 VMA_ASSERT(currLevel == targetLevel &&
-12076 currNode != VMA_NULL &&
-12077 currNode->type == Node::TYPE_FREE);
-12078 RemoveFromFreeList(currLevel, currNode);
-
-
-12081 currNode->type = Node::TYPE_ALLOCATION;
-12082 currNode->allocation.alloc = hAllocation;
-
-12084 ++m_AllocationCount;
-
-12086 m_SumFreeSize -= allocSize;
-
-
-12089 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
-
-12091 if(node->type == Node::TYPE_SPLIT)
-
-12093 DeleteNode(node->split.leftChild->buddy);
-12094 DeleteNode(node->split.leftChild);
-
-
-12097 vma_delete(GetAllocationCallbacks(), node);
-
-
-12100 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const
-
-12102 VMA_VALIDATE(level < m_LevelCount);
-12103 VMA_VALIDATE(curr->parent == parent);
-12104 VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
-12105 VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
-
-
-12108 case Node::TYPE_FREE:
-
-12110 ctx.calculatedSumFreeSize += levelNodeSize;
-12111 ++ctx.calculatedFreeCount;
-
-12113 case Node::TYPE_ALLOCATION:
-12114 ++ctx.calculatedAllocationCount;
-12115 ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
-12116 VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
-
-12118 case Node::TYPE_SPLIT:
-
-12120 const uint32_t childrenLevel = level + 1;
-12121 const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
-12122 const Node*
const leftChild = curr->split.leftChild;
-12123 VMA_VALIDATE(leftChild != VMA_NULL);
-12124 VMA_VALIDATE(leftChild->offset == curr->offset);
-12125 if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
-
-12127 VMA_VALIDATE(
false &&
"ValidateNode for left child failed.");
-
-12129 const Node*
const rightChild = leftChild->buddy;
-12130 VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
-12131 if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
-
-12133 VMA_VALIDATE(
false &&
"ValidateNode for right child failed.");
-
-
-
-
-
-
+
+
+11989 if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
+
+11991 suballocations1st.clear();
+11992 m_1stNullItemsBeginCount = 0;
+
+11994 if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+
+
+11997 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+11998 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
+11999 while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
+12000 suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
+
+12002 ++m_1stNullItemsBeginCount;
+12003 --m_1stNullItemsMiddleCount;
+
+12005 m_2ndNullItemsCount = 0;
+12006 m_1stVectorIndex ^= 1;
+
+
+
+
+12011 VMA_HEAVY_ASSERT(Validate());
+
+
+
+
+
+12018 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(
VmaAllocator hAllocator) :
+12019 VmaBlockMetadata(hAllocator),
+
+12021 m_AllocationCount(0),
+
+
+
+12025 memset(m_FreeList, 0,
sizeof(m_FreeList));
+
+
+12028 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
+
+12030 DeleteNode(m_Root);
+
+
+12033 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
+
+12035 VmaBlockMetadata::Init(size);
+
+12037 m_UsableSize = VmaPrevPow2(size);
+12038 m_SumFreeSize = m_UsableSize;
+
+
+
+12042 while(m_LevelCount < MAX_LEVELS &&
+12043 LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
+
+
+
+
+12048 Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
+12049 rootNode->offset = 0;
+12050 rootNode->type = Node::TYPE_FREE;
+12051 rootNode->parent = VMA_NULL;
+12052 rootNode->buddy = VMA_NULL;
+
+
+12055 AddToFreeListFront(0, rootNode);
+
+
+12058 bool VmaBlockMetadata_Buddy::Validate()
const
+
+
+12061 ValidationContext ctx;
+12062 if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
+
+12064 VMA_VALIDATE(
false &&
"ValidateNode failed.");
+
+12066 VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
+12067 VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
+
+
+12070 for(uint32_t level = 0; level < m_LevelCount; ++level)
+
+12072 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
+12073 m_FreeList[level].front->free.prev == VMA_NULL);
+
+12075 for(Node* node = m_FreeList[level].front;
+
+12077 node = node->free.next)
+
+12079 VMA_VALIDATE(node->type == Node::TYPE_FREE);
+
+12081 if(node->free.next == VMA_NULL)
+
+12083 VMA_VALIDATE(m_FreeList[level].back == node);
+
+
+
+12087 VMA_VALIDATE(node->free.next->free.prev == node);
+
+
+
+
+
+12093 for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
+
+12095 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
+
+
+
+
+
+12101 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax()
const
+
+12103 for(uint32_t level = 0; level < m_LevelCount; ++level)
+
+12105 if(m_FreeList[level].front != VMA_NULL)
+
+12107 return LevelToNodeSize(level);
+
+
+
+
+
+12113 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const
+
+12115 const VkDeviceSize unusableSize = GetUnusableSize();
+
+
+
+
+
+
+
+
+
+
+12126 CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
+
+12128 if(unusableSize > 0)
+
+
+
+
+
+
+
+
+12137 void VmaBlockMetadata_Buddy::AddPoolStats(
VmaPoolStats& inoutStats)
const
+
+12139 const VkDeviceSize unusableSize = GetUnusableSize();
-
-
-
-12144 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize)
const
-
-
-12147 uint32_t level = 0;
-12148 VkDeviceSize currLevelNodeSize = m_UsableSize;
-12149 VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
-12150 while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
-
-
-12153 currLevelNodeSize = nextLevelNodeSize;
-12154 nextLevelNodeSize = currLevelNodeSize >> 1;
-
-
-
-
-12159 void VmaBlockMetadata_Buddy::FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset)
-
-
-12162 Node* node = m_Root;
-12163 VkDeviceSize nodeOffset = 0;
-12164 uint32_t level = 0;
-12165 VkDeviceSize levelNodeSize = LevelToNodeSize(0);
-12166 while(node->type == Node::TYPE_SPLIT)
-
-12168 const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
-12169 if(offset < nodeOffset + nextLevelSize)
-
-12171 node = node->split.leftChild;
-
-
-
-12175 node = node->split.leftChild->buddy;
-12176 nodeOffset += nextLevelSize;
-
-
-12179 levelNodeSize = nextLevelSize;
-
-
-12182 VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
-12183 VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
-
-
-12186 --m_AllocationCount;
-12187 m_SumFreeSize += alloc->GetSize();
-
-12189 node->type = Node::TYPE_FREE;
-
-
-12192 while(level > 0 && node->buddy->type == Node::TYPE_FREE)
-
-12194 RemoveFromFreeList(level, node->buddy);
-12195 Node*
const parent = node->parent;
+12141 inoutStats.
size += GetSize();
+12142 inoutStats.
unusedSize += m_SumFreeSize + unusableSize;
+
+
+
+
+12147 if(unusableSize > 0)
+
+
+
+
+
+
+12154 #if VMA_STATS_STRING_ENABLED
+
+12156 void VmaBlockMetadata_Buddy::PrintDetailedMap(
class VmaJsonWriter& json)
const
+
+
+
+12160 CalcAllocationStatInfo(stat);
+
+12162 PrintDetailedMap_Begin(
+
+
+
+
+
+12168 PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
+
+12170 const VkDeviceSize unusableSize = GetUnusableSize();
+12171 if(unusableSize > 0)
+
+12173 PrintDetailedMap_UnusedRange(json,
+
+
+
+
+12178 PrintDetailedMap_End(json);
+
+
+
+
+12183 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
+12184 uint32_t currentFrameIndex,
+12185 uint32_t frameInUseCount,
+12186 VkDeviceSize bufferImageGranularity,
+12187 VkDeviceSize allocSize,
+12188 VkDeviceSize allocAlignment,
+
+12190 VmaSuballocationType allocType,
+12191 bool canMakeOtherLost,
+
+12193 VmaAllocationRequest* pAllocationRequest)
+
+12195 VMA_ASSERT(!upperAddress &&
"VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
-12197 vma_delete(GetAllocationCallbacks(), node->buddy);
-12198 vma_delete(GetAllocationCallbacks(), node);
-12199 parent->type = Node::TYPE_FREE;
-
-
-
-
-
+
+
+12199 if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
+12200 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+12201 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
+
+12203 allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
+12204 allocSize = VMA_MAX(allocSize, bufferImageGranularity);
-12207 AddToFreeListFront(level, node);
-
-
-12210 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const
-
-
-
-12214 case Node::TYPE_FREE:
-
-
-
-
-
-12220 case Node::TYPE_ALLOCATION:
-
-12222 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
-
-
-
-
-
-12228 const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
-12229 if(unusedRangeSize > 0)
-
-
-
-
-
-
-
-
-12238 case Node::TYPE_SPLIT:
-
-12240 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-12241 const Node*
const leftChild = node->split.leftChild;
-12242 CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
-12243 const Node*
const rightChild = leftChild->buddy;
-12244 CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
-
-
-
-
-
-
-
-12252 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
-
-12254 VMA_ASSERT(node->type == Node::TYPE_FREE);
+12207 if(allocSize > m_UsableSize)
+
+
+
+
+12212 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+12213 for(uint32_t level = targetLevel + 1; level--; )
+
+12215 for(Node* freeNode = m_FreeList[level].front;
+12216 freeNode != VMA_NULL;
+12217 freeNode = freeNode->free.next)
+
+12219 if(freeNode->offset % allocAlignment == 0)
+
+12221 pAllocationRequest->type = VmaAllocationRequestType::Normal;
+12222 pAllocationRequest->offset = freeNode->offset;
+12223 pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
+12224 pAllocationRequest->sumItemSize = 0;
+12225 pAllocationRequest->itemsToMakeLostCount = 0;
+12226 pAllocationRequest->customData = (
void*)(uintptr_t)level;
+
+
+
+
+
+
+
+
+12235 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
+12236 uint32_t currentFrameIndex,
+12237 uint32_t frameInUseCount,
+12238 VmaAllocationRequest* pAllocationRequest)
+
+
+
+
+
+12244 return pAllocationRequest->itemsToMakeLostCount == 0;
+
+
+12247 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
+
+
+
+
+
+
+
-
-12257 Node*
const frontNode = m_FreeList[level].front;
-12258 if(frontNode == VMA_NULL)
-
-12260 VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
-12261 node->free.prev = node->free.next = VMA_NULL;
-12262 m_FreeList[level].front = m_FreeList[level].back = node;
-
-
-
-12266 VMA_ASSERT(frontNode->free.prev == VMA_NULL);
-12267 node->free.prev = VMA_NULL;
-12268 node->free.next = frontNode;
-12269 frontNode->free.prev = node;
-12270 m_FreeList[level].front = node;
-
-
-
-12274 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
-
-12276 VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
-
-
-12279 if(node->free.prev == VMA_NULL)
-
-12281 VMA_ASSERT(m_FreeList[level].front == node);
-12282 m_FreeList[level].front = node->free.next;
-
-
-
-12286 Node*
const prevFreeNode = node->free.prev;
-12287 VMA_ASSERT(prevFreeNode->free.next == node);
-12288 prevFreeNode->free.next = node->free.next;
-
-
-
-12292 if(node->free.next == VMA_NULL)
-
-12294 VMA_ASSERT(m_FreeList[level].back == node);
-12295 m_FreeList[level].back = node->free.prev;
-
-
-
-12299 Node*
const nextFreeNode = node->free.next;
-12300 VMA_ASSERT(nextFreeNode->free.prev == node);
-12301 nextFreeNode->free.prev = node->free.prev;
-
-
-
-12305 #if VMA_STATS_STRING_ENABLED
-12306 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const
-
-
-
-12310 case Node::TYPE_FREE:
-12311 PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
-
-12313 case Node::TYPE_ALLOCATION:
-
-12315 PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
-12316 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
-12317 if(allocSize < levelNodeSize)
-
-12319 PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
-
-
-
-12323 case Node::TYPE_SPLIT:
-
-12325 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-12326 const Node*
const leftChild = node->split.leftChild;
-12327 PrintDetailedMapNode(json, leftChild, childrenNodeSize);
-12328 const Node*
const rightChild = leftChild->buddy;
-12329 PrintDetailedMapNode(json, rightChild, childrenNodeSize);
-
-
-
-
-
-
-
-
-
-
-
-12342 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(
VmaAllocator hAllocator) :
-12343 m_pMetadata(VMA_NULL),
-12344 m_MemoryTypeIndex(UINT32_MAX),
-
-12346 m_hMemory(VK_NULL_HANDLE),
-
-12348 m_pMappedData(VMA_NULL)
-
-
-
-12352 void VmaDeviceMemoryBlock::Init(
-
-
-12355 uint32_t newMemoryTypeIndex,
-12356 VkDeviceMemory newMemory,
-12357 VkDeviceSize newSize,
-
-12359 uint32_t algorithm)
-
-12361 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
-
-12363 m_hParentPool = hParentPool;
-12364 m_MemoryTypeIndex = newMemoryTypeIndex;
-
-12366 m_hMemory = newMemory;
-
-
-
-
-12371 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
-
-
-12374 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
-
-
-
-
-
-12380 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
-
-12382 m_pMetadata->Init(newSize);
-
-
-12385 void VmaDeviceMemoryBlock::Destroy(
VmaAllocator allocator)
-
-
-
-12389 VMA_ASSERT(m_pMetadata->IsEmpty() &&
"Some allocations were not freed before destruction of this memory block!");
-
-12391 VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
-12392 allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
-12393 m_hMemory = VK_NULL_HANDLE;
-
-12395 vma_delete(allocator, m_pMetadata);
-12396 m_pMetadata = VMA_NULL;
-
-
-12399 bool VmaDeviceMemoryBlock::Validate()
const
-
-12401 VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
-12402 (m_pMetadata->GetSize() != 0));
-
-12404 return m_pMetadata->Validate();
-
-
-12407 VkResult VmaDeviceMemoryBlock::CheckCorruption(
VmaAllocator hAllocator)
-
-12409 void* pData =
nullptr;
-12410 VkResult res = Map(hAllocator, 1, &pData);
-12411 if(res != VK_SUCCESS)
-
-
-
-
-12416 res = m_pMetadata->CheckCorruption(pData);
-
-12418 Unmap(hAllocator, 1);
-
-
-
-
-12423 VkResult VmaDeviceMemoryBlock::Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData)
-
-
-
-
-
-
-12430 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-12431 if(m_MapCount != 0)
-
-12433 m_MapCount += count;
-12434 VMA_ASSERT(m_pMappedData != VMA_NULL);
-12435 if(ppData != VMA_NULL)
-
-12437 *ppData = m_pMappedData;
-
-
-
-
-
-12443 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-12444 hAllocator->m_hDevice,
-
-
-
-
-
-12450 if(result == VK_SUCCESS)
-
-12452 if(ppData != VMA_NULL)
-
-12454 *ppData = m_pMappedData;
-
-12456 m_MapCount = count;
-
-
-
-
-
-12462 void VmaDeviceMemoryBlock::Unmap(
VmaAllocator hAllocator, uint32_t count)
-
-
-
-
-
-
-12469 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-12470 if(m_MapCount >= count)
-
-12472 m_MapCount -= count;
-12473 if(m_MapCount == 0)
-
-12475 m_pMappedData = VMA_NULL;
-12476 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-
-
-
-
-12481 VMA_ASSERT(0 &&
"VkDeviceMemory block is being unmapped while it was not previously mapped.");
-
-
-
-12485 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-
-12487 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-12488 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
-
-
-12491 VkResult res = Map(hAllocator, 1, &pData);
-12492 if(res != VK_SUCCESS)
-
-
-
-
-12497 VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
-12498 VmaWriteMagicValue(pData, allocOffset + allocSize);
-
-12500 Unmap(hAllocator, 1);
-
-
-
-
-12505 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-
-12507 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-12508 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
-
-
-12511 VkResult res = Map(hAllocator, 1, &pData);
-12512 if(res != VK_SUCCESS)
-
-
-
+12256 void VmaBlockMetadata_Buddy::Alloc(
+12257 const VmaAllocationRequest& request,
+12258 VmaSuballocationType type,
+12259 VkDeviceSize allocSize,
+
+
+12262 VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+
+12264 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+12265 uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
+
+12267 Node* currNode = m_FreeList[currLevel].front;
+12268 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+12269 while(currNode->offset != request.offset)
+
+12271 currNode = currNode->free.next;
+12272 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+
+
+
+12276 while(currLevel < targetLevel)
+
+
+
+12280 RemoveFromFreeList(currLevel, currNode);
+
+12282 const uint32_t childrenLevel = currLevel + 1;
+
+
+12285 Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
+12286 Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
+
+12288 leftChild->offset = currNode->offset;
+12289 leftChild->type = Node::TYPE_FREE;
+12290 leftChild->parent = currNode;
+12291 leftChild->buddy = rightChild;
+
+12293 rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
+12294 rightChild->type = Node::TYPE_FREE;
+12295 rightChild->parent = currNode;
+12296 rightChild->buddy = leftChild;
+
+
+12299 currNode->type = Node::TYPE_SPLIT;
+12300 currNode->split.leftChild = leftChild;
+
+
+12303 AddToFreeListFront(childrenLevel, rightChild);
+12304 AddToFreeListFront(childrenLevel, leftChild);
+
+
+
+
+12309 currNode = m_FreeList[currLevel].front;
+
+
+
+
+
+
+
+
+12318 VMA_ASSERT(currLevel == targetLevel &&
+12319 currNode != VMA_NULL &&
+12320 currNode->type == Node::TYPE_FREE);
+12321 RemoveFromFreeList(currLevel, currNode);
+
+
+12324 currNode->type = Node::TYPE_ALLOCATION;
+12325 currNode->allocation.alloc = hAllocation;
+
+12327 ++m_AllocationCount;
+
+12329 m_SumFreeSize -= allocSize;
+
+
+12332 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
+
+12334 if(node->type == Node::TYPE_SPLIT)
+
+12336 DeleteNode(node->split.leftChild->buddy);
+12337 DeleteNode(node->split.leftChild);
+
+
+12340 vma_delete(GetAllocationCallbacks(), node);
+
+
+12343 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const
+
+12345 VMA_VALIDATE(level < m_LevelCount);
+12346 VMA_VALIDATE(curr->parent == parent);
+12347 VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
+12348 VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
+
+
+12351 case Node::TYPE_FREE:
+
+12353 ctx.calculatedSumFreeSize += levelNodeSize;
+12354 ++ctx.calculatedFreeCount;
+
+12356 case Node::TYPE_ALLOCATION:
+12357 ++ctx.calculatedAllocationCount;
+12358 ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
+12359 VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
+
+12361 case Node::TYPE_SPLIT:
+
+12363 const uint32_t childrenLevel = level + 1;
+12364 const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
+12365 const Node*
const leftChild = curr->split.leftChild;
+12366 VMA_VALIDATE(leftChild != VMA_NULL);
+12367 VMA_VALIDATE(leftChild->offset == curr->offset);
+12368 if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
+
+12370 VMA_VALIDATE(
false &&
"ValidateNode for left child failed.");
+
+12372 const Node*
const rightChild = leftChild->buddy;
+12373 VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
+12374 if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
+
+12376 VMA_VALIDATE(
false &&
"ValidateNode for right child failed.");
+
+
+
+
+
+
+
+
+
+
+12387 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize)
const
+
+
+12390 uint32_t level = 0;
+12391 VkDeviceSize currLevelNodeSize = m_UsableSize;
+12392 VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
+12393 while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
+
+
+12396 currLevelNodeSize = nextLevelNodeSize;
+12397 nextLevelNodeSize = currLevelNodeSize >> 1;
+
+
+
+
+12402 void VmaBlockMetadata_Buddy::FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset)
+
+
+12405 Node* node = m_Root;
+12406 VkDeviceSize nodeOffset = 0;
+12407 uint32_t level = 0;
+12408 VkDeviceSize levelNodeSize = LevelToNodeSize(0);
+12409 while(node->type == Node::TYPE_SPLIT)
+
+12411 const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
+12412 if(offset < nodeOffset + nextLevelSize)
+
+12414 node = node->split.leftChild;
+
+
+
+12418 node = node->split.leftChild->buddy;
+12419 nodeOffset += nextLevelSize;
+
+
+12422 levelNodeSize = nextLevelSize;
+
+
+12425 VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
+12426 VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
+
+
+12429 --m_AllocationCount;
+12430 m_SumFreeSize += alloc->GetSize();
+
+12432 node->type = Node::TYPE_FREE;
+
+
+12435 while(level > 0 && node->buddy->type == Node::TYPE_FREE)
+
+12437 RemoveFromFreeList(level, node->buddy);
+12438 Node*
const parent = node->parent;
+
+12440 vma_delete(GetAllocationCallbacks(), node->buddy);
+12441 vma_delete(GetAllocationCallbacks(), node);
+12442 parent->type = Node::TYPE_FREE;
+
+
+
+
+
+
+
+12450 AddToFreeListFront(level, node);
+
+
+12453 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const
+
+
+
+12457 case Node::TYPE_FREE:
+
+
+
+
+
+12463 case Node::TYPE_ALLOCATION:
+
+12465 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+
+
+
+
+
+12471 const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
+12472 if(unusedRangeSize > 0)
+
+
+
+
+
+
+
+
+12481 case Node::TYPE_SPLIT:
+
+12483 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+12484 const Node*
const leftChild = node->split.leftChild;
+12485 CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
+12486 const Node*
const rightChild = leftChild->buddy;
+12487 CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
+
+
+
+
+
+
+
+12495 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
+
+12497 VMA_ASSERT(node->type == Node::TYPE_FREE);
+
+
+12500 Node*
const frontNode = m_FreeList[level].front;
+12501 if(frontNode == VMA_NULL)
+
+12503 VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
+12504 node->free.prev = node->free.next = VMA_NULL;
+12505 m_FreeList[level].front = m_FreeList[level].back = node;
+
+
+
+12509 VMA_ASSERT(frontNode->free.prev == VMA_NULL);
+12510 node->free.prev = VMA_NULL;
+12511 node->free.next = frontNode;
+12512 frontNode->free.prev = node;
+12513 m_FreeList[level].front = node;
+
+
-12517 if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
-
-12519 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
-
-12521 else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
-
-12523 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
-
-
-12526 Unmap(hAllocator, 1);
-
-
-
-
-12531 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
-
-
-12534 VkDeviceSize allocationLocalOffset,
-
-
-
-12538 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-12539 hAllocation->GetBlock() ==
this);
-12540 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-12541 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-12542 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-
-12544 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-12545 return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
+12517 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
+
+12519 VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
+
+
+12522 if(node->free.prev == VMA_NULL)
+
+12524 VMA_ASSERT(m_FreeList[level].front == node);
+12525 m_FreeList[level].front = node->free.next;
+
+
+
+12529 Node*
const prevFreeNode = node->free.prev;
+12530 VMA_ASSERT(prevFreeNode->free.next == node);
+12531 prevFreeNode->free.next = node->free.next;
+
+
+
+12535 if(node->free.next == VMA_NULL)
+
+12537 VMA_ASSERT(m_FreeList[level].back == node);
+12538 m_FreeList[level].back = node->free.prev;
+
+
+
+12542 Node*
const nextFreeNode = node->free.next;
+12543 VMA_ASSERT(nextFreeNode->free.prev == node);
+12544 nextFreeNode->free.prev = node->free.prev;
+
-12548 VkResult VmaDeviceMemoryBlock::BindImageMemory(
-
-
-12551 VkDeviceSize allocationLocalOffset,
-
-
-
-12555 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-12556 hAllocation->GetBlock() ==
this);
-12557 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-12558 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-12559 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-
-12561 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
-12562 return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
-
-
-
-
-12567 memset(&outInfo, 0,
sizeof(outInfo));
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-12586 static void VmaPostprocessCalcStatInfo(
VmaStatInfo& inoutInfo)
-
-
-
-
-
-
-
-12594 VmaPool_T::VmaPool_T(
-
-
-12597 VkDeviceSize preferredBlockSize) :
-
-
-
-12601 createInfo.memoryTypeIndex,
-12602 createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
-12603 createInfo.minBlockCount,
-12604 createInfo.maxBlockCount,
-
-12606 createInfo.frameInUseCount,
-12607 createInfo.blockSize != 0,
-
-12609 createInfo.priority),
-
-
-
-
-
-12615 VmaPool_T::~VmaPool_T()
-
-
-
-12619 void VmaPool_T::SetName(
const char* pName)
-
-12621 const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
-12622 VmaFreeString(allocs, m_Name);
-
-12624 if(pName != VMA_NULL)
-
-12626 m_Name = VmaCreateStringCopy(allocs, pName);
-
-
-
-
-
-
+12548 #if VMA_STATS_STRING_ENABLED
+12549 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const
+
+
+
+12553 case Node::TYPE_FREE:
+12554 PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
+
+12556 case Node::TYPE_ALLOCATION:
+
+12558 PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
+12559 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+12560 if(allocSize < levelNodeSize)
+
+12562 PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
+
+
+
+12566 case Node::TYPE_SPLIT:
+
+12568 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+12569 const Node*
const leftChild = node->split.leftChild;
+12570 PrintDetailedMapNode(json, leftChild, childrenNodeSize);
+12571 const Node*
const rightChild = leftChild->buddy;
+12572 PrintDetailedMapNode(json, rightChild, childrenNodeSize);
+
+
+
+
+
+
+
+
+
+
+
+12585 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(
VmaAllocator hAllocator) :
+12586 m_pMetadata(VMA_NULL),
+12587 m_MemoryTypeIndex(UINT32_MAX),
+
+12589 m_hMemory(VK_NULL_HANDLE),
+
+12591 m_pMappedData(VMA_NULL)
+
+
+
+12595 void VmaDeviceMemoryBlock::Init(
+
+
+12598 uint32_t newMemoryTypeIndex,
+12599 VkDeviceMemory newMemory,
+12600 VkDeviceSize newSize,
+
+12602 uint32_t algorithm)
+
+12604 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
+12606 m_hParentPool = hParentPool;
+12607 m_MemoryTypeIndex = newMemoryTypeIndex;
+
+12609 m_hMemory = newMemory;
+
+
+
+
+12614 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
+
+
+12617 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
+
+
+
+
+
+12623 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
+
+12625 m_pMetadata->Init(newSize);
+
+
+12628 void VmaDeviceMemoryBlock::Destroy(
VmaAllocator allocator)
+
+
+
+12632 VMA_ASSERT(m_pMetadata->IsEmpty() &&
"Some allocations were not freed before destruction of this memory block!");
-12634 #if VMA_STATS_STRING_ENABLED
-
-
+12634 VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
+12635 allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
+12636 m_hMemory = VK_NULL_HANDLE;
-12638 VmaBlockVector::VmaBlockVector(
-
-
-12641 uint32_t memoryTypeIndex,
-12642 VkDeviceSize preferredBlockSize,
-12643 size_t minBlockCount,
-12644 size_t maxBlockCount,
-12645 VkDeviceSize bufferImageGranularity,
-12646 uint32_t frameInUseCount,
-12647 bool explicitBlockSize,
-12648 uint32_t algorithm,
-
-12650 m_hAllocator(hAllocator),
-12651 m_hParentPool(hParentPool),
-12652 m_MemoryTypeIndex(memoryTypeIndex),
-12653 m_PreferredBlockSize(preferredBlockSize),
-12654 m_MinBlockCount(minBlockCount),
-12655 m_MaxBlockCount(maxBlockCount),
-12656 m_BufferImageGranularity(bufferImageGranularity),
-12657 m_FrameInUseCount(frameInUseCount),
-12658 m_ExplicitBlockSize(explicitBlockSize),
-12659 m_Algorithm(algorithm),
-12660 m_Priority(priority),
-12661 m_HasEmptyBlock(false),
-12662 m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
-
-
-
-
-12667 VmaBlockVector::~VmaBlockVector()
-
-12669 for(
size_t i = m_Blocks.size(); i--; )
-
-12671 m_Blocks[i]->Destroy(m_hAllocator);
-12672 vma_delete(m_hAllocator, m_Blocks[i]);
-
-
-
-12676 VkResult VmaBlockVector::CreateMinBlocks()
-
-12678 for(
size_t i = 0; i < m_MinBlockCount; ++i)
-
-12680 VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
-12681 if(res != VK_SUCCESS)
-
-
-
-
-
-
-
-12689 void VmaBlockVector::GetPoolStats(
VmaPoolStats* pStats)
-
-12691 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-12693 const size_t blockCount = m_Blocks.size();
-
-
-
-
-
-
-
-
-12702 for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
-12704 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
-12705 VMA_ASSERT(pBlock);
-12706 VMA_HEAVY_ASSERT(pBlock->Validate());
-12707 pBlock->m_pMetadata->AddPoolStats(*pStats);
-
-
-
-12711 bool VmaBlockVector::IsEmpty()
-
-12713 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-12714 return m_Blocks.empty();
-
-
-12717 bool VmaBlockVector::IsCorruptionDetectionEnabled()
const
-
-12719 const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-12720 return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
-12721 (VMA_DEBUG_MARGIN > 0) &&
-
-12723 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
-
-
-12726 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+12638 vma_delete(allocator, m_pMetadata);
+12639 m_pMetadata = VMA_NULL;
+
+
+12642 bool VmaDeviceMemoryBlock::Validate()
const
+
+12644 VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
+12645 (m_pMetadata->GetSize() != 0));
+
+12647 return m_pMetadata->Validate();
+
+
+12650 VkResult VmaDeviceMemoryBlock::CheckCorruption(
VmaAllocator hAllocator)
+
+12652 void* pData =
nullptr;
+12653 VkResult res = Map(hAllocator, 1, &pData);
+12654 if(res != VK_SUCCESS)
+
+
+
+
+12659 res = m_pMetadata->CheckCorruption(pData);
+
+12661 Unmap(hAllocator, 1);
+
+
+
+
+12666 VkResult VmaDeviceMemoryBlock::Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData)
+
+
+
+
+
+
+12673 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+12674 if(m_MapCount != 0)
+
+12676 m_MapCount += count;
+12677 VMA_ASSERT(m_pMappedData != VMA_NULL);
+12678 if(ppData != VMA_NULL)
+
+12680 *ppData = m_pMappedData;
+
+
+
+
+
+12686 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+12687 hAllocator->m_hDevice,
+
+
+
+
+
+12693 if(result == VK_SUCCESS)
+
+12695 if(ppData != VMA_NULL)
+
+12697 *ppData = m_pMappedData;
+
+12699 m_MapCount = count;
+
+
+
+
+
+12705 void VmaDeviceMemoryBlock::Unmap(
VmaAllocator hAllocator, uint32_t count)
+
+
+
+
+
+
+12712 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+12713 if(m_MapCount >= count)
+
+12715 m_MapCount -= count;
+12716 if(m_MapCount == 0)
+
+12718 m_pMappedData = VMA_NULL;
+12719 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
+
+
+
+
+12724 VMA_ASSERT(0 &&
"VkDeviceMemory block is being unmapped while it was not previously mapped.");
+
+
-12728 VkResult VmaBlockVector::Allocate(
-12729 uint32_t currentFrameIndex,
-
-12731 VkDeviceSize alignment,
-
-12733 VmaSuballocationType suballocType,
-12734 size_t allocationCount,
-
-
-
-12738 VkResult res = VK_SUCCESS;
+12728 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+
+12730 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+12731 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
+
+12734 VkResult res = Map(hAllocator, 1, &pData);
+12735 if(res != VK_SUCCESS)
+
+
+
-12740 if(IsCorruptionDetectionEnabled())
-
-12742 size = VmaAlignUp<VkDeviceSize>(size,
sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-12743 alignment = VmaAlignUp<VkDeviceSize>(alignment,
sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-
-
-
-12747 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-12748 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
-12750 res = AllocatePage(
-
-
-
-
-
-12756 pAllocations + allocIndex);
-12757 if(res != VK_SUCCESS)
-
-
-
-
-
-
-12764 if(res != VK_SUCCESS)
+12740 VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
+12741 VmaWriteMagicValue(pData, allocOffset + allocSize);
+
+12743 Unmap(hAllocator, 1);
+
+
+
+
+12748 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+
+12750 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+12751 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
+
+12754 VkResult res = Map(hAllocator, 1, &pData);
+12755 if(res != VK_SUCCESS)
+
+
+
+
+12760 if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
+
+12762 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
+
+12764 else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
-
-12767 while(allocIndex--)
-
-12769 Free(pAllocations[allocIndex]);
-
-12771 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
-
+12766 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
+
+
+12769 Unmap(hAllocator, 1);
+
+
+
-
-
-
-12777 VkResult VmaBlockVector::AllocatePage(
-12778 uint32_t currentFrameIndex,
-
-12780 VkDeviceSize alignment,
-
-12782 VmaSuballocationType suballocType,
-
-
-
-
-
-
-
-12790 VkDeviceSize freeMemory;
-
-12792 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
-12794 m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-
-
-
-12798 const bool canFallbackToDedicated = !IsCustomPool();
-12799 const bool canCreateNewBlock =
-
-12801 (m_Blocks.size() < m_MaxBlockCount) &&
-12802 (freeMemory >= size || !canFallbackToDedicated);
-
-
-
-
-
-
-12809 canMakeOtherLost =
false;
-
-
-
-12813 if(isUpperAddress &&
-
-
-12816 return VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-
-
-
-
-
-
-
-
-
-
-
-12830 return VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-
-12834 if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
-
-12836 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-
-
-
-
-12844 if(!canMakeOtherLost || canCreateNewBlock)
-
-
-
-
-
-
-
-
-12853 if(!m_Blocks.empty())
-
-12855 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks.back();
-12856 VMA_ASSERT(pCurrBlock);
-12857 VkResult res = AllocateFromBlock(
-
-
-
-
-
-
-
-
-
-12867 if(res == VK_SUCCESS)
-
-12869 VMA_DEBUG_LOG(
" Returned from last block #%u", pCurrBlock->GetId());
-
-
-
-
-
-
-
-
-
-12879 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
-12881 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
-12882 VMA_ASSERT(pCurrBlock);
-12883 VkResult res = AllocateFromBlock(
-
-
-
-
-
-
-
-
-
-12893 if(res == VK_SUCCESS)
-
-12895 VMA_DEBUG_LOG(
" Returned from existing block #%u", pCurrBlock->GetId());
-
-
-
-
-
-
-
-12903 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
-12905 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
-12906 VMA_ASSERT(pCurrBlock);
-12907 VkResult res = AllocateFromBlock(
-
-
-
-
-
-
-
-
-
-12917 if(res == VK_SUCCESS)
-
-12919 VMA_DEBUG_LOG(
" Returned from existing block #%u", pCurrBlock->GetId());
-
-
-
-
-
-
-
-12927 if(canCreateNewBlock)
-
-
-12930 VkDeviceSize newBlockSize = m_PreferredBlockSize;
-12931 uint32_t newBlockSizeShift = 0;
-12932 const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
-
-12934 if(!m_ExplicitBlockSize)
-
-
-12937 const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
-12938 for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
-
-12940 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-12941 if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
-
-12943 newBlockSize = smallerNewBlockSize;
-12944 ++newBlockSizeShift;
-
-
-
-
-
-
-
-
-12953 size_t newBlockIndex = 0;
-12954 VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-12955 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-12957 if(!m_ExplicitBlockSize)
-
-12959 while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
-
-12961 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-12962 if(smallerNewBlockSize >= size)
-
-12964 newBlockSize = smallerNewBlockSize;
-12965 ++newBlockSizeShift;
-12966 res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-12967 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-
-
-
-
-
-12976 if(res == VK_SUCCESS)
-
-12978 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[newBlockIndex];
-12979 VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
-
-12981 res = AllocateFromBlock(
-
-
-
-
-
-
-
-
-
-12991 if(res == VK_SUCCESS)
-
-12993 VMA_DEBUG_LOG(
" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
-
-
-
-
-
-12999 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-
-
-
-13006 if(canMakeOtherLost)
-
-13008 uint32_t tryIndex = 0;
-13009 for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
-
-13011 VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
-13012 VmaAllocationRequest bestRequest = {};
-13013 VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
-
-
-
-
-
-13019 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
-13021 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
-13022 VMA_ASSERT(pCurrBlock);
-13023 VmaAllocationRequest currRequest = {};
-13024 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
-
-13027 m_BufferImageGranularity,
-
-
-
-
-
-
-
-
-13036 const VkDeviceSize currRequestCost = currRequest.CalcCost();
-13037 if(pBestRequestBlock == VMA_NULL ||
-13038 currRequestCost < bestRequestCost)
-
-13040 pBestRequestBlock = pCurrBlock;
-13041 bestRequest = currRequest;
-13042 bestRequestCost = currRequestCost;
-
-13044 if(bestRequestCost == 0)
-
-
-
-
-
-
-
-
-
-
-13055 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
-13057 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
-13058 VMA_ASSERT(pCurrBlock);
-13059 VmaAllocationRequest currRequest = {};
-13060 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
-
-13063 m_BufferImageGranularity,
-
-
-
-
-
-
-
-
-13072 const VkDeviceSize currRequestCost = currRequest.CalcCost();
-13073 if(pBestRequestBlock == VMA_NULL ||
-13074 currRequestCost < bestRequestCost ||
-
-
-13077 pBestRequestBlock = pCurrBlock;
-13078 bestRequest = currRequest;
-13079 bestRequestCost = currRequestCost;
+12774 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
+
+
+12777 VkDeviceSize allocationLocalOffset,
+
+
+
+12781 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+12782 hAllocation->GetBlock() ==
this);
+12783 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+12784 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+12785 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+
+12787 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+12788 return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
+
+
+12791 VkResult VmaDeviceMemoryBlock::BindImageMemory(
+
+
+12794 VkDeviceSize allocationLocalOffset,
+
+
+
+12798 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+12799 hAllocation->GetBlock() ==
this);
+12800 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+12801 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+12802 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+
+12804 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+12805 return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
+
+
+
+
+12810 memset(&outInfo, 0,
sizeof(outInfo));
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+12829 static void VmaPostprocessCalcStatInfo(
VmaStatInfo& inoutInfo)
+
+
+
+
+
+
+
+12837 VmaPool_T::VmaPool_T(
+
+
+12840 VkDeviceSize preferredBlockSize) :
+
+
+
+12844 createInfo.memoryTypeIndex,
+12845 createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
+12846 createInfo.minBlockCount,
+12847 createInfo.maxBlockCount,
+
+12849 createInfo.frameInUseCount,
+12850 createInfo.blockSize != 0,
+
+12852 createInfo.priority),
+
+
+
+
+
+12858 VmaPool_T::~VmaPool_T()
+
+12860 VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
+
+
+12863 void VmaPool_T::SetName(
const char* pName)
+
+12865 const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
+12866 VmaFreeString(allocs, m_Name);
+
+12868 if(pName != VMA_NULL)
+
+12870 m_Name = VmaCreateStringCopy(allocs, pName);
+
+
+
+
+
+
+
+12878 #if VMA_STATS_STRING_ENABLED
+
+
+
+12882 VmaBlockVector::VmaBlockVector(
+
+
+12885 uint32_t memoryTypeIndex,
+12886 VkDeviceSize preferredBlockSize,
+12887 size_t minBlockCount,
+12888 size_t maxBlockCount,
+12889 VkDeviceSize bufferImageGranularity,
+12890 uint32_t frameInUseCount,
+12891 bool explicitBlockSize,
+12892 uint32_t algorithm,
+
+12894 m_hAllocator(hAllocator),
+12895 m_hParentPool(hParentPool),
+12896 m_MemoryTypeIndex(memoryTypeIndex),
+12897 m_PreferredBlockSize(preferredBlockSize),
+12898 m_MinBlockCount(minBlockCount),
+12899 m_MaxBlockCount(maxBlockCount),
+12900 m_BufferImageGranularity(bufferImageGranularity),
+12901 m_FrameInUseCount(frameInUseCount),
+12902 m_ExplicitBlockSize(explicitBlockSize),
+12903 m_Algorithm(algorithm),
+12904 m_Priority(priority),
+12905 m_HasEmptyBlock(false),
+12906 m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
+
+
+
+
+12911 VmaBlockVector::~VmaBlockVector()
+
+12913 for(
size_t i = m_Blocks.size(); i--; )
+
+12915 m_Blocks[i]->Destroy(m_hAllocator);
+12916 vma_delete(m_hAllocator, m_Blocks[i]);
+
+
+
+12920 VkResult VmaBlockVector::CreateMinBlocks()
+
+12922 for(
size_t i = 0; i < m_MinBlockCount; ++i)
+
+12924 VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
+12925 if(res != VK_SUCCESS)
+
+
+
+
+
+
+
+12933 void VmaBlockVector::GetPoolStats(
VmaPoolStats* pStats)
+
+12935 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+12937 const size_t blockCount = m_Blocks.size();
+
+
+
+
+
+
+
+
+12946 for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
+12948 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
+12949 VMA_ASSERT(pBlock);
+12950 VMA_HEAVY_ASSERT(pBlock->Validate());
+12951 pBlock->m_pMetadata->AddPoolStats(*pStats);
+
+
+
+12955 bool VmaBlockVector::IsEmpty()
+
+12957 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+12958 return m_Blocks.empty();
+
+
+12961 bool VmaBlockVector::IsCorruptionDetectionEnabled()
const
+
+12963 const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+12964 return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
+12965 (VMA_DEBUG_MARGIN > 0) &&
+
+12967 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
+
+
+12970 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+
+12972 VkResult VmaBlockVector::Allocate(
+12973 uint32_t currentFrameIndex,
+
+12975 VkDeviceSize alignment,
+
+12977 VmaSuballocationType suballocType,
+12978 size_t allocationCount,
+
+
+
+12982 VkResult res = VK_SUCCESS;
+
+12984 if(IsCorruptionDetectionEnabled())
+
+12986 size = VmaAlignUp<VkDeviceSize>(size,
sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+12987 alignment = VmaAlignUp<VkDeviceSize>(alignment,
sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+
+
+
+12991 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+12992 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
+12994 res = AllocatePage(
+
+
+
+
+
+13000 pAllocations + allocIndex);
+13001 if(res != VK_SUCCESS)
+
+
+
+
+
+
+13008 if(res != VK_SUCCESS)
+
+
+13011 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+13012 while(allocIndex--)
+
+13014 VmaAllocation_T*
const alloc = pAllocations[allocIndex];
+13015 const VkDeviceSize allocSize = alloc->GetSize();
+
+13017 m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
+
+13019 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
+
+
+
+
+
+13025 VkResult VmaBlockVector::AllocatePage(
+13026 uint32_t currentFrameIndex,
+
+13028 VkDeviceSize alignment,
+
+13030 VmaSuballocationType suballocType,
+
+
+
+
+
+
+
+13038 VkDeviceSize freeMemory;
+
+13040 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
+13042 m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+
+
+
+13046 const bool canFallbackToDedicated = !IsCustomPool();
+13047 const bool canCreateNewBlock =
+
+13049 (m_Blocks.size() < m_MaxBlockCount) &&
+13050 (freeMemory >= size || !canFallbackToDedicated);
+
+
+
+
+
+
+13057 canMakeOtherLost =
false;
+
+
+
+13061 if(isUpperAddress &&
+
+
+13064 return VK_ERROR_FEATURE_NOT_PRESENT;
+
+
+
+
+
+
+
+
+
+
+
+
+
+13078 return VK_ERROR_FEATURE_NOT_PRESENT;
+
-13081 if(bestRequestCost == 0 ||
-
-
-
-
-
-
-
-
-
-13091 if(pBestRequestBlock != VMA_NULL)
-
-
-
-13095 VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
-13096 if(res != VK_SUCCESS)
-
-
-
-
-
-13102 if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
-
-
-
-
-
-13108 *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
-13109 pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
-13110 UpdateHasEmptyBlock();
-13111 (*pAllocation)->InitBlockAllocation(
-
-13113 bestRequest.offset,
-
-
-
-
-
-
-13120 VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
-13121 VMA_DEBUG_LOG(
" Returned from existing block");
-13122 (*pAllocation)->SetUserData(m_hAllocator, createInfo.
pUserData);
-13123 m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-13124 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
-13126 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
-13128 if(IsCorruptionDetectionEnabled())
-
-13130 VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
-13131 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-13146 if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
-
-13148 return VK_ERROR_TOO_MANY_OBJECTS;
-
-
-
-13152 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-13155 void VmaBlockVector::Free(
-
-
-13158 VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
-
-13160 bool budgetExceeded =
false;
-
-13162 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
-13164 m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-13165 budgetExceeded = heapBudget.
usage >= heapBudget.
budget;
-
-
-
-
-13170 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-13172 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
+13082 if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
+
+13084 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
+
+
+
+
+13092 if(!canMakeOtherLost || canCreateNewBlock)
+
+
+
+
+
+
+
+
+13101 if(!m_Blocks.empty())
+
+13103 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks.back();
+13104 VMA_ASSERT(pCurrBlock);
+13105 VkResult res = AllocateFromBlock(
+
+
+
+
+
+
+
+
+
+13115 if(res == VK_SUCCESS)
+
+13117 VMA_DEBUG_LOG(
" Returned from last block #%u", pCurrBlock->GetId());
+
+
+
+
+
+
+
+
+
+13127 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
+13129 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
+13130 VMA_ASSERT(pCurrBlock);
+13131 VkResult res = AllocateFromBlock(
+
+
+
+
+
+
+
+
+
+13141 if(res == VK_SUCCESS)
+
+13143 VMA_DEBUG_LOG(
" Returned from existing block #%u", pCurrBlock->GetId());
+
+
+
+
+
+
+
+13151 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
+13153 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
+13154 VMA_ASSERT(pCurrBlock);
+13155 VkResult res = AllocateFromBlock(
+
+
+
+
+
+
+
+
+
+13165 if(res == VK_SUCCESS)
+
+13167 VMA_DEBUG_LOG(
" Returned from existing block #%u", pCurrBlock->GetId());
+
+
+
+
+
-13174 if(IsCorruptionDetectionEnabled())
-
-13176 VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
-13177 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to validate magic value.");
-
-
-13180 if(hAllocation->IsPersistentMap())
-
-13182 pBlock->Unmap(m_hAllocator, 1);
-
-
-13185 pBlock->m_pMetadata->Free(hAllocation);
-13186 VMA_HEAVY_ASSERT(pBlock->Validate());
-
-13188 VMA_DEBUG_LOG(
" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
-
-13190 const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
-
-13192 if(pBlock->m_pMetadata->IsEmpty())
-
-
-13195 if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
-
-13197 pBlockToDelete = pBlock;
-
+
+13175 if(canCreateNewBlock)
+
+
+13178 VkDeviceSize newBlockSize = m_PreferredBlockSize;
+13179 uint32_t newBlockSizeShift = 0;
+13180 const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
+
+13182 if(!m_ExplicitBlockSize)
+
+
+13185 const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
+13186 for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
+
+13188 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+13189 if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
+
+13191 newBlockSize = smallerNewBlockSize;
+13192 ++newBlockSizeShift;
+
+
+
+
+
+
-
-
-
-
-13204 else if(m_HasEmptyBlock && canDeleteBlock)
-
-13206 VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
-13207 if(pLastBlock->m_pMetadata->IsEmpty())
-
-13209 pBlockToDelete = pLastBlock;
-13210 m_Blocks.pop_back();
-
-
-
-13214 UpdateHasEmptyBlock();
-13215 IncrementallySortBlocks();
-
-
-
-
-13220 if(pBlockToDelete != VMA_NULL)
-
-13222 VMA_DEBUG_LOG(
" Deleted empty block");
-13223 pBlockToDelete->Destroy(m_hAllocator);
-13224 vma_delete(m_hAllocator, pBlockToDelete);
-
-
-
-13228 VkDeviceSize VmaBlockVector::CalcMaxBlockSize()
const
-
-13230 VkDeviceSize result = 0;
-13231 for(
size_t i = m_Blocks.size(); i--; )
-
-13233 result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
-13234 if(result >= m_PreferredBlockSize)
-
-
-
-
-
-
-
-13242 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
-
-13244 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
-13246 if(m_Blocks[blockIndex] == pBlock)
-
-13248 VmaVectorRemove(m_Blocks, blockIndex);
-
+
+13201 size_t newBlockIndex = 0;
+13202 VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+13203 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+13205 if(!m_ExplicitBlockSize)
+
+13207 while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
+
+13209 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+13210 if(smallerNewBlockSize >= size)
+
+13212 newBlockSize = smallerNewBlockSize;
+13213 ++newBlockSizeShift;
+13214 res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+13215 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
+
+
+
+
+
+13224 if(res == VK_SUCCESS)
+
+13226 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[newBlockIndex];
+13227 VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
+
+13229 res = AllocateFromBlock(
+
+
+
+
+
+
+
+
+
+13239 if(res == VK_SUCCESS)
+
+13241 VMA_DEBUG_LOG(
" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
+
+
+
+
+
+13247 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
-
-
-
-13255 void VmaBlockVector::IncrementallySortBlocks()
-
-
-
-
-13260 for(
size_t i = 1; i < m_Blocks.size(); ++i)
-
-13262 if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
-
-13264 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
-
-
-
-
-
-
-13271 VkResult VmaBlockVector::AllocateFromBlock(
-13272 VmaDeviceMemoryBlock* pBlock,
-13273 uint32_t currentFrameIndex,
-
-13275 VkDeviceSize alignment,
-
-
-13278 VmaSuballocationType suballocType,
-
-
-
-
-
-
-
-
-13287 VmaAllocationRequest currRequest = {};
-13288 if(pBlock->m_pMetadata->CreateAllocationRequest(
-
-
-13291 m_BufferImageGranularity,
-
-
-
-
-
-
-
-
-
-13301 VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
-
-
-
-13305 VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
-13306 if(res != VK_SUCCESS)
-
-
-
-
-
-13312 *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
-13313 pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
-13314 UpdateHasEmptyBlock();
-13315 (*pAllocation)->InitBlockAllocation(
-
-13317 currRequest.offset,
-
-
-
-
-
-
-13324 VMA_HEAVY_ASSERT(pBlock->Validate());
-13325 (*pAllocation)->SetUserData(m_hAllocator, pUserData);
-13326 m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-13327 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
-13329 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
-13331 if(IsCorruptionDetectionEnabled())
-
-13333 VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
-13334 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
-
-
-
-13338 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-13341 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex)
-
-13343 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-13344 allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
-13345 allocInfo.allocationSize = blockSize;
-
-13347 #if VMA_BUFFER_DEVICE_ADDRESS
-
-13349 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-13350 if(m_hAllocator->m_UseKhrBufferDeviceAddress)
-
-13352 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-13353 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-
-
-
-13357 #if VMA_MEMORY_PRIORITY
-13358 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-13359 if(m_hAllocator->m_UseExtMemoryPriority)
-
-13361 priorityInfo.priority = m_Priority;
-13362 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-
-
-
-13366 VkDeviceMemory mem = VK_NULL_HANDLE;
-13367 VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
-
-
-
-
-
-
-
-
-13376 VmaDeviceMemoryBlock*
const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
-
-
-
-
-
-13382 allocInfo.allocationSize,
-
-
-
-13386 m_Blocks.push_back(pBlock);
-13387 if(pNewBlockIndex != VMA_NULL)
-
-13389 *pNewBlockIndex = m_Blocks.size() - 1;
-
-
-
-
-
-13395 void VmaBlockVector::ApplyDefragmentationMovesCpu(
-13396 class VmaBlockVectorDefragmentationContext* pDefragCtx,
-13397 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
-
-13399 const size_t blockCount = m_Blocks.size();
-13400 const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
-
-
-
-13404 BLOCK_FLAG_USED = 0x00000001,
-13405 BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
-
+
+
+13254 if(canMakeOtherLost)
+
+13256 uint32_t tryIndex = 0;
+13257 for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
+
+13259 VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
+13260 VmaAllocationRequest bestRequest = {};
+13261 VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
+
+
+
+
+
+13267 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
+13269 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
+13270 VMA_ASSERT(pCurrBlock);
+13271 VmaAllocationRequest currRequest = {};
+13272 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
+
+13275 m_BufferImageGranularity,
+
+
+
+
+
+
+
+
+13284 const VkDeviceSize currRequestCost = currRequest.CalcCost();
+13285 if(pBestRequestBlock == VMA_NULL ||
+13286 currRequestCost < bestRequestCost)
+
+13288 pBestRequestBlock = pCurrBlock;
+13289 bestRequest = currRequest;
+13290 bestRequestCost = currRequestCost;
+
+13292 if(bestRequestCost == 0)
+
+
+
+
+
+
+
+
+
+
+13303 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
+13305 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
+13306 VMA_ASSERT(pCurrBlock);
+13307 VmaAllocationRequest currRequest = {};
+13308 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
+
+13311 m_BufferImageGranularity,
+
+
+
+
+
+
+
+
+13320 const VkDeviceSize currRequestCost = currRequest.CalcCost();
+13321 if(pBestRequestBlock == VMA_NULL ||
+13322 currRequestCost < bestRequestCost ||
+
+
+13325 pBestRequestBlock = pCurrBlock;
+13326 bestRequest = currRequest;
+13327 bestRequestCost = currRequestCost;
+
+13329 if(bestRequestCost == 0 ||
+
+
+
+
+
+
+
+
+
+13339 if(pBestRequestBlock != VMA_NULL)
+
+
+
+13343 VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
+13344 if(res != VK_SUCCESS)
+
+
+
+
+
+13350 if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
+
+
+
+
+
+13356 *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
+13357 pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
+13358 UpdateHasEmptyBlock();
+13359 (*pAllocation)->InitBlockAllocation(
+
+13361 bestRequest.offset,
+
+
+
+
+
+
+13368 VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
+13369 VMA_DEBUG_LOG(
" Returned from existing block");
+13370 (*pAllocation)->SetUserData(m_hAllocator, createInfo.
pUserData);
+13371 m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+13372 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
+13374 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
+13376 if(IsCorruptionDetectionEnabled())
+
+13378 VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
+13379 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+13394 if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
+
+13396 return VK_ERROR_TOO_MANY_OBJECTS;
+
+
+
+13400 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+13403 void VmaBlockVector::Free(
+
+
+13406 VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
-
+13408 bool budgetExceeded =
false;
-
-
-
-13413 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
-13414 blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
-13415 memset(blockInfo.data(), 0, blockCount *
sizeof(BlockInfo));
-
-
-13418 const size_t moveCount = moves.size();
-13419 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
-13421 const VmaDefragmentationMove& move = moves[moveIndex];
-13422 blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
-13423 blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
-
-
-13426 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+13410 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
+13412 m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+13413 budgetExceeded = heapBudget.
usage >= heapBudget.
budget;
+
+
+
+
+13418 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+13420 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
+13422 if(IsCorruptionDetectionEnabled())
+
+13424 VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+13425 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to validate magic value.");
+
-
-13429 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
-13431 BlockInfo& currBlockInfo = blockInfo[blockIndex];
-13432 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-13433 if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
-
-13435 currBlockInfo.pMappedData = pBlock->GetMappedData();
-
-13437 if(currBlockInfo.pMappedData == VMA_NULL)
-
-13439 pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
-13440 if(pDefragCtx->res == VK_SUCCESS)
-
-13442 currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
-
-
-
-
-
-
-13449 if(pDefragCtx->res == VK_SUCCESS)
-
-13451 const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-13452 VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
-
-13454 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
-13456 const VmaDefragmentationMove& move = moves[moveIndex];
-
-13458 const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
-13459 const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
-
-13461 VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
-
-
-
-
-13466 VmaDeviceMemoryBlock*
const pSrcBlock = m_Blocks[move.srcBlockIndex];
-13467 memRange.memory = pSrcBlock->GetDeviceMemory();
-13468 memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
-13469 memRange.size = VMA_MIN(
-13470 VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
-13471 pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
-13472 (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
-
-
-
-13477 reinterpret_cast<char*
>(dstBlockInfo.pMappedData) + move.dstOffset,
-13478 reinterpret_cast<char*
>(srcBlockInfo.pMappedData) + move.srcOffset,
-13479 static_cast<size_t>(move.size));
-
-13481 if(IsCorruptionDetectionEnabled())
-
-13483 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
-13484 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
-
-
-
-
-
-13490 VmaDeviceMemoryBlock*
const pDstBlock = m_Blocks[move.dstBlockIndex];
-13491 memRange.memory = pDstBlock->GetDeviceMemory();
-13492 memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
-13493 memRange.size = VMA_MIN(
-13494 VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
-13495 pDstBlock->m_pMetadata->GetSize() - memRange.offset);
-13496 (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
+13428 if(hAllocation->IsPersistentMap())
+
+13430 pBlock->Unmap(m_hAllocator, 1);
+
+
+13433 pBlock->m_pMetadata->Free(hAllocation);
+13434 VMA_HEAVY_ASSERT(pBlock->Validate());
+
+13436 VMA_DEBUG_LOG(
" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
+
+13438 const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
+
+13440 if(pBlock->m_pMetadata->IsEmpty())
+
+
+13443 if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
+
+13445 pBlockToDelete = pBlock;
+
+
+
+
+
+
+13452 else if(m_HasEmptyBlock && canDeleteBlock)
+
+13454 VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
+13455 if(pLastBlock->m_pMetadata->IsEmpty())
+
+13457 pBlockToDelete = pLastBlock;
+13458 m_Blocks.pop_back();
+
+
+
+13462 UpdateHasEmptyBlock();
+13463 IncrementallySortBlocks();
+
+
+
+
+13468 if(pBlockToDelete != VMA_NULL)
+
+13470 VMA_DEBUG_LOG(
" Deleted empty block");
+13471 pBlockToDelete->Destroy(m_hAllocator);
+13472 vma_delete(m_hAllocator, pBlockToDelete);
+
+
+
+13476 VkDeviceSize VmaBlockVector::CalcMaxBlockSize()
const
+
+13478 VkDeviceSize result = 0;
+13479 for(
size_t i = m_Blocks.size(); i--; )
+
+13481 result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
+13482 if(result >= m_PreferredBlockSize)
+
+
+
+
+
+
+
+13490 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
+
+13492 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
+13494 if(m_Blocks[blockIndex] == pBlock)
+
+13496 VmaVectorRemove(m_Blocks, blockIndex);
+
-
-
-
-13503 for(
size_t blockIndex = blockCount; blockIndex--; )
-
-13505 const BlockInfo& currBlockInfo = blockInfo[blockIndex];
-13506 if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
-
-13508 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-13509 pBlock->Unmap(m_hAllocator, 1);
-
-
-
-
-13514 void VmaBlockVector::ApplyDefragmentationMovesGpu(
-13515 class VmaBlockVectorDefragmentationContext* pDefragCtx,
-13516 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-13517 VkCommandBuffer commandBuffer)
-
-13519 const size_t blockCount = m_Blocks.size();
-
-13521 pDefragCtx->blockContexts.resize(blockCount);
-13522 memset(pDefragCtx->blockContexts.data(), 0, blockCount *
sizeof(VmaBlockDefragmentationContext));
-
-
-13525 const size_t moveCount = moves.size();
-13526 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
-13528 const VmaDefragmentationMove& move = moves[moveIndex];
-
-
-
-
-13533 pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
-13534 pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
-
-
-
-13538 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
-
-
-
-13542 VkBufferCreateInfo bufCreateInfo;
-13543 VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
-
-13545 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
-13547 VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
-13548 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-13549 if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
-
-13551 bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
-13552 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
-13553 m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
-13554 if(pDefragCtx->res == VK_SUCCESS)
-
-13556 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
-13557 m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
-
-
-
-
-
-
-13564 if(pDefragCtx->res == VK_SUCCESS)
-
-13566 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
-13568 const VmaDefragmentationMove& move = moves[moveIndex];
-
-13570 const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
-13571 const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
-
-13573 VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
-
-13575 VkBufferCopy region = {
-
-
-
-13579 (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
-13580 commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion);
-
-
-
-
-13585 if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
-
-13587 pDefragCtx->res = VK_NOT_READY;
-
-
-
-
-
-13593 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
-13595 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-13596 if(pBlock->m_pMetadata->IsEmpty())
-
-13598 if(m_Blocks.size() > m_MinBlockCount)
-
-13600 if(pDefragmentationStats != VMA_NULL)
-
-
-13603 pDefragmentationStats->
bytesFreed += pBlock->m_pMetadata->GetSize();
-
-
-13606 VmaVectorRemove(m_Blocks, blockIndex);
-13607 pBlock->Destroy(m_hAllocator);
-13608 vma_delete(m_hAllocator, pBlock);
-
-
-
-
-
-
-
-13616 UpdateHasEmptyBlock();
-
-
-13619 void VmaBlockVector::UpdateHasEmptyBlock()
-
-13621 m_HasEmptyBlock =
false;
-13622 for(
size_t index = 0, count = m_Blocks.size(); index < count; ++index)
-
-13624 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[index];
-13625 if(pBlock->m_pMetadata->IsEmpty())
-
-13627 m_HasEmptyBlock =
true;
-
-
-
-
-
-13633 #if VMA_STATS_STRING_ENABLED
-
-13635 void VmaBlockVector::PrintDetailedMap(
class VmaJsonWriter& json)
-
-13637 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-13639 json.BeginObject();
-
-
-
-13643 const char* poolName = m_hParentPool->GetName();
-13644 if(poolName != VMA_NULL && poolName[0] !=
'\0')
-
-13646 json.WriteString(
"Name");
-13647 json.WriteString(poolName);
-
+
+
+
+13503 void VmaBlockVector::IncrementallySortBlocks()
+
+
+
+
+13508 for(
size_t i = 1; i < m_Blocks.size(); ++i)
+
+13510 if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
+
+13512 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
+
+
+
+
+
+
+13519 VkResult VmaBlockVector::AllocateFromBlock(
+13520 VmaDeviceMemoryBlock* pBlock,
+13521 uint32_t currentFrameIndex,
+
+13523 VkDeviceSize alignment,
+
+
+13526 VmaSuballocationType suballocType,
+
+
+
+
+
+
+
+
+13535 VmaAllocationRequest currRequest = {};
+13536 if(pBlock->m_pMetadata->CreateAllocationRequest(
+
+
+13539 m_BufferImageGranularity,
+
+
+
+
+
+
+
+
+
+13549 VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
+
+
+
+13553 VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
+13554 if(res != VK_SUCCESS)
+
+
+
+
+
+13560 *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
+13561 pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
+13562 UpdateHasEmptyBlock();
+13563 (*pAllocation)->InitBlockAllocation(
+
+13565 currRequest.offset,
+
+
+
+
+
+
+13572 VMA_HEAVY_ASSERT(pBlock->Validate());
+13573 (*pAllocation)->SetUserData(m_hAllocator, pUserData);
+13574 m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+13575 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
+13577 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
+13579 if(IsCorruptionDetectionEnabled())
+
+13581 VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
+13582 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
+
+
+
+13586 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+13589 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex)
+
+13591 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+13592 allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
+13593 allocInfo.allocationSize = blockSize;
+
+13595 #if VMA_BUFFER_DEVICE_ADDRESS
+
+13597 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+13598 if(m_hAllocator->m_UseKhrBufferDeviceAddress)
+
+13600 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+13601 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+
+
+
+13605 #if VMA_MEMORY_PRIORITY
+13606 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+13607 if(m_hAllocator->m_UseExtMemoryPriority)
+
+13609 priorityInfo.priority = m_Priority;
+13610 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+
+
+
+13614 VkDeviceMemory mem = VK_NULL_HANDLE;
+13615 VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
+
+
+
+
+
+
+
+
+13624 VmaDeviceMemoryBlock*
const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
+
+
+
+
+
+13630 allocInfo.allocationSize,
+
+
+
+13634 m_Blocks.push_back(pBlock);
+13635 if(pNewBlockIndex != VMA_NULL)
+
+13637 *pNewBlockIndex = m_Blocks.size() - 1;
+
+
+
+
+
+13643 void VmaBlockVector::ApplyDefragmentationMovesCpu(
+13644 class VmaBlockVectorDefragmentationContext* pDefragCtx,
+13645 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
+
+13647 const size_t blockCount = m_Blocks.size();
+13648 const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
-13650 json.WriteString(
"MemoryTypeIndex");
-13651 json.WriteNumber(m_MemoryTypeIndex);
-
-13653 json.WriteString(
"BlockSize");
-13654 json.WriteNumber(m_PreferredBlockSize);
+
+
+13652 BLOCK_FLAG_USED = 0x00000001,
+13653 BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
+
-13656 json.WriteString(
"BlockCount");
-13657 json.BeginObject(
true);
-13658 if(m_MinBlockCount > 0)
-
-13660 json.WriteString(
"Min");
-13661 json.WriteNumber((uint64_t)m_MinBlockCount);
-
-13663 if(m_MaxBlockCount < SIZE_MAX)
-
-13665 json.WriteString(
"Max");
-13666 json.WriteNumber((uint64_t)m_MaxBlockCount);
-
-13668 json.WriteString(
"Cur");
-13669 json.WriteNumber((uint64_t)m_Blocks.size());
-
-
-13672 if(m_FrameInUseCount > 0)
-
-13674 json.WriteString(
"FrameInUseCount");
-13675 json.WriteNumber(m_FrameInUseCount);
-
-
-13678 if(m_Algorithm != 0)
-
-13680 json.WriteString(
"Algorithm");
-13681 json.WriteString(VmaAlgorithmToStr(m_Algorithm));
-
-
-
-
-13686 json.WriteString(
"PreferredBlockSize");
-13687 json.WriteNumber(m_PreferredBlockSize);
-
-
-13690 json.WriteString(
"Blocks");
-13691 json.BeginObject();
-13692 for(
size_t i = 0; i < m_Blocks.size(); ++i)
-
-13694 json.BeginString();
-13695 json.ContinueString(m_Blocks[i]->GetId());
-
-
-13698 m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
-
-
+
+
+
+
+
+13661 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
+13662 blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
+13663 memset(blockInfo.data(), 0, blockCount *
sizeof(BlockInfo));
+
+
+13666 const size_t moveCount = moves.size();
+13667 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
+13669 const VmaDefragmentationMove& move = moves[moveIndex];
+13670 blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
+13671 blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
+
+
+13674 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
+
+13677 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
+13679 BlockInfo& currBlockInfo = blockInfo[blockIndex];
+13680 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+13681 if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
+
+13683 currBlockInfo.pMappedData = pBlock->GetMappedData();
+
+13685 if(currBlockInfo.pMappedData == VMA_NULL)
+
+13687 pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
+13688 if(pDefragCtx->res == VK_SUCCESS)
+
+13690 currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
+
+
+
+
+
+
+13697 if(pDefragCtx->res == VK_SUCCESS)
+
+13699 const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+13700 VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
-
-
-
-
-
-13707 void VmaBlockVector::Defragment(
-13708 class VmaBlockVectorDefragmentationContext* pCtx,
-
-13710 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
-13711 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
-13712 VkCommandBuffer commandBuffer)
-
-13714 pCtx->res = VK_SUCCESS;
-
-13716 const VkMemoryPropertyFlags memPropFlags =
-13717 m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
-13718 const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
-
-13720 const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
-
-13722 const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
-13723 !IsCorruptionDetectionEnabled() &&
-13724 ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
-
-
-13727 if(canDefragmentOnCpu || canDefragmentOnGpu)
-
-13729 bool defragmentOnGpu;
-
-13731 if(canDefragmentOnGpu != canDefragmentOnCpu)
-
-13733 defragmentOnGpu = canDefragmentOnGpu;
-
-
-
-
-13738 defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
-13739 m_hAllocator->IsIntegratedGpu();
-
-
-13742 bool overlappingMoveSupported = !defragmentOnGpu;
-
-13744 if(m_hAllocator->m_UseMutex)
-
-
-
-13748 if(!m_Mutex.TryLockWrite())
-
-13750 pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
-
-
-
-
-
-13756 m_Mutex.LockWrite();
-13757 pCtx->mutexLocked =
true;
-
-
-
-13761 pCtx->Begin(overlappingMoveSupported, flags);
-
-
-
-13765 const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
-13766 const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
-13767 pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
+13702 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
+13704 const VmaDefragmentationMove& move = moves[moveIndex];
+
+13706 const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
+13707 const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
+
+13709 VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
+
+
+
+
+13714 VmaDeviceMemoryBlock*
const pSrcBlock = m_Blocks[move.srcBlockIndex];
+13715 memRange.memory = pSrcBlock->GetDeviceMemory();
+13716 memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
+13717 memRange.size = VMA_MIN(
+13718 VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
+13719 pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
+13720 (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
+
+
+
+13725 reinterpret_cast<char*
>(dstBlockInfo.pMappedData) + move.dstOffset,
+13726 reinterpret_cast<char*
>(srcBlockInfo.pMappedData) + move.srcOffset,
+13727 static_cast<size_t>(move.size));
+
+13729 if(IsCorruptionDetectionEnabled())
+
+13731 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
+13732 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
+
+
+
+
+
+13738 VmaDeviceMemoryBlock*
const pDstBlock = m_Blocks[move.dstBlockIndex];
+13739 memRange.memory = pDstBlock->GetDeviceMemory();
+13740 memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
+13741 memRange.size = VMA_MIN(
+13742 VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
+13743 pDstBlock->m_pMetadata->GetSize() - memRange.offset);
+13744 (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
+
+
+
+
+
+13751 for(
size_t blockIndex = blockCount; blockIndex--; )
+
+13753 const BlockInfo& currBlockInfo = blockInfo[blockIndex];
+13754 if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
+
+13756 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+13757 pBlock->Unmap(m_hAllocator, 1);
+
+
+
+
+13762 void VmaBlockVector::ApplyDefragmentationMovesGpu(
+13763 class VmaBlockVectorDefragmentationContext* pDefragCtx,
+13764 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+13765 VkCommandBuffer commandBuffer)
+
+13767 const size_t blockCount = m_Blocks.size();
-
-13770 if(pStats != VMA_NULL)
-
-13772 const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
-13773 const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
-
-
-13776 VMA_ASSERT(bytesMoved <= maxBytesToMove);
-13777 VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
-13778 if(defragmentOnGpu)
-
-13780 maxGpuBytesToMove -= bytesMoved;
-13781 maxGpuAllocationsToMove -= allocationsMoved;
-
-
-
-13785 maxCpuBytesToMove -= bytesMoved;
-13786 maxCpuAllocationsToMove -= allocationsMoved;
-
-
-
-
-
-13792 if(m_hAllocator->m_UseMutex)
-13793 m_Mutex.UnlockWrite();
-
-13795 if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
-13796 pCtx->res = VK_NOT_READY;
-
-
-
-
-13801 if(pCtx->res >= VK_SUCCESS)
-
-13803 if(defragmentOnGpu)
-
-13805 ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
-
-
-
-13809 ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
-
-
-
-
-
-13815 void VmaBlockVector::DefragmentationEnd(
-13816 class VmaBlockVectorDefragmentationContext* pCtx,
-
-
-
-
-
-13822 VMA_ASSERT(pCtx->mutexLocked ==
false);
-
-
-
-13826 m_Mutex.LockWrite();
-13827 pCtx->mutexLocked =
true;
-
-
-
-13831 if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
-
-
-13834 for(
size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
-
-13836 VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
-13837 if(blockCtx.hBuffer)
-
-13839 (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
-
-
-
-13843 if(pCtx->res >= VK_SUCCESS)
-
-13845 FreeEmptyBlocks(pStats);
-
-
-
-13849 if(pCtx->mutexLocked)
-
-13851 VMA_ASSERT(m_hAllocator->m_UseMutex);
-13852 m_Mutex.UnlockWrite();
-
-
-
-13856 uint32_t VmaBlockVector::ProcessDefragmentations(
-13857 class VmaBlockVectorDefragmentationContext *pCtx,
-
-
-13860 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-13862 const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
-
-13864 for(uint32_t i = 0; i < moveCount; ++ i)
-
-13866 VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
-
-
-13869 pMove->
memory = move.pDstBlock->GetDeviceMemory();
-13870 pMove->
offset = move.dstOffset;
-
-
-
-
-13875 pCtx->defragmentationMovesProcessed += moveCount;
-
-
-
-
-13880 void VmaBlockVector::CommitDefragmentations(
-13881 class VmaBlockVectorDefragmentationContext *pCtx,
-
-
-13884 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-13886 for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
-
-13888 const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
-
-13890 move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
-13891 move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
-
-
-13894 pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
-13895 FreeEmptyBlocks(pStats);
-
+13769 pDefragCtx->blockContexts.resize(blockCount);
+13770 memset(pDefragCtx->blockContexts.data(), 0, blockCount *
sizeof(VmaBlockDefragmentationContext));
+
+
+13773 const size_t moveCount = moves.size();
+13774 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
+13776 const VmaDefragmentationMove& move = moves[moveIndex];
+
+
+
+
+13781 pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+13782 pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
+
+
+13786 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
+
+
+13790 VkBufferCreateInfo bufCreateInfo;
+13791 VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
+
+13793 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
+13795 VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
+13796 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+13797 if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
+
+13799 bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
+13800 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
+13801 m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
+13802 if(pDefragCtx->res == VK_SUCCESS)
+
+13804 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
+13805 m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
+
+
+
+
+
+
+13812 if(pDefragCtx->res == VK_SUCCESS)
+
+13814 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
+13816 const VmaDefragmentationMove& move = moves[moveIndex];
+
+13818 const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
+13819 const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
+
+13821 VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
+
+13823 VkBufferCopy region = {
+
+
+
+13827 (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
+13828 commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion);
+
+
+
+
+13833 if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
+
+13835 pDefragCtx->res = VK_NOT_READY;
+
+
+
+
+
+13841 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
+13843 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+13844 if(pBlock->m_pMetadata->IsEmpty())
+
+13846 if(m_Blocks.size() > m_MinBlockCount)
+
+13848 if(pDefragmentationStats != VMA_NULL)
+
+
+13851 pDefragmentationStats->
bytesFreed += pBlock->m_pMetadata->GetSize();
+
+
+13854 VmaVectorRemove(m_Blocks, blockIndex);
+13855 pBlock->Destroy(m_hAllocator);
+13856 vma_delete(m_hAllocator, pBlock);
+
+
+
+
+
+
+
+13864 UpdateHasEmptyBlock();
+
+
+13867 void VmaBlockVector::UpdateHasEmptyBlock()
+
+13869 m_HasEmptyBlock =
false;
+13870 for(
size_t index = 0, count = m_Blocks.size(); index < count; ++index)
+
+13872 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[index];
+13873 if(pBlock->m_pMetadata->IsEmpty())
+
+13875 m_HasEmptyBlock =
true;
+
+
+
+
+
+13881 #if VMA_STATS_STRING_ENABLED
+
+13883 void VmaBlockVector::PrintDetailedMap(
class VmaJsonWriter& json)
+
+13885 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+13887 json.BeginObject();
+
+
+
+13891 const char* poolName = m_hParentPool->GetName();
+13892 if(poolName != VMA_NULL && poolName[0] !=
'\0')
+
+13894 json.WriteString(
"Name");
+13895 json.WriteString(poolName);
+
-13898 size_t VmaBlockVector::CalcAllocationCount()
const
-
-
-13901 for(
size_t i = 0; i < m_Blocks.size(); ++i)
-
-13903 result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
-
-
-
-
-13908 bool VmaBlockVector::IsBufferImageGranularityConflictPossible()
const
-
-13910 if(m_BufferImageGranularity == 1)
-
-
-
-13914 VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
-13915 for(
size_t i = 0, count = m_Blocks.size(); i < count; ++i)
-
-13917 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[i];
-13918 VMA_ASSERT(m_Algorithm == 0);
-13919 VmaBlockMetadata_Generic*
const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
-13920 if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
+13898 json.WriteString(
"MemoryTypeIndex");
+13899 json.WriteNumber(m_MemoryTypeIndex);
+
+13901 json.WriteString(
"BlockSize");
+13902 json.WriteNumber(m_PreferredBlockSize);
+
+13904 json.WriteString(
"BlockCount");
+13905 json.BeginObject(
true);
+13906 if(m_MinBlockCount > 0)
+
+13908 json.WriteString(
"Min");
+13909 json.WriteNumber((uint64_t)m_MinBlockCount);
+
+13911 if(m_MaxBlockCount < SIZE_MAX)
+
+13913 json.WriteString(
"Max");
+13914 json.WriteNumber((uint64_t)m_MaxBlockCount);
+
+13916 json.WriteString(
"Cur");
+13917 json.WriteNumber((uint64_t)m_Blocks.size());
+
+
+13920 if(m_FrameInUseCount > 0)
-
-
-
-
-
-
-13928 void VmaBlockVector::MakePoolAllocationsLost(
-13929 uint32_t currentFrameIndex,
-13930 size_t* pLostAllocationCount)
-
-13932 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-13933 size_t lostAllocationCount = 0;
-13934 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
-13936 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
-13937 VMA_ASSERT(pBlock);
-13938 lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
-
-13940 if(pLostAllocationCount != VMA_NULL)
+13922 json.WriteString(
"FrameInUseCount");
+13923 json.WriteNumber(m_FrameInUseCount);
+
+
+13926 if(m_Algorithm != 0)
+
+13928 json.WriteString(
"Algorithm");
+13929 json.WriteString(VmaAlgorithmToStr(m_Algorithm));
+
+
+
+
+13934 json.WriteString(
"PreferredBlockSize");
+13935 json.WriteNumber(m_PreferredBlockSize);
+
+
+13938 json.WriteString(
"Blocks");
+13939 json.BeginObject();
+13940 for(
size_t i = 0; i < m_Blocks.size(); ++i)
-13942 *pLostAllocationCount = lostAllocationCount;
-
-
+13942 json.BeginString();
+13943 json.ContinueString(m_Blocks[i]->GetId());
+
-13946 VkResult VmaBlockVector::CheckCorruption()
-
-13948 if(!IsCorruptionDetectionEnabled())
-
-13950 return VK_ERROR_FEATURE_NOT_PRESENT;
-
+13946 m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
+
+
+
+
+
-13953 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-13954 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
-13956 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
-13957 VMA_ASSERT(pBlock);
-13958 VkResult res = pBlock->CheckCorruption(m_hAllocator);
-13959 if(res != VK_SUCCESS)
-
-
-
-
-
-
-
-13967 void VmaBlockVector::AddStats(
VmaStats* pStats)
-
-13969 const uint32_t memTypeIndex = m_MemoryTypeIndex;
-13970 const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
-
-13972 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+
+13955 void VmaBlockVector::Defragment(
+13956 class VmaBlockVectorDefragmentationContext* pCtx,
+
+13958 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
+13959 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
+13960 VkCommandBuffer commandBuffer)
+
+13962 pCtx->res = VK_SUCCESS;
+
+13964 const VkMemoryPropertyFlags memPropFlags =
+13965 m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
+13966 const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
+
+13968 const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
+
+13970 const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
+13971 !IsCorruptionDetectionEnabled() &&
+13972 ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
-13974 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
-13976 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
-13977 VMA_ASSERT(pBlock);
-13978 VMA_HEAVY_ASSERT(pBlock->Validate());
-
-13980 pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
-13981 VmaAddStatInfo(pStats->
total, allocationStatInfo);
-13982 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
-13983 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
-
-
-
-
+
+13975 if(canDefragmentOnCpu || canDefragmentOnGpu)
+
+13977 bool defragmentOnGpu;
+
+13979 if(canDefragmentOnGpu != canDefragmentOnCpu)
+
+13981 defragmentOnGpu = canDefragmentOnGpu;
+
+
+
+
+13986 defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
+13987 m_hAllocator->IsIntegratedGpu();
+
-13990 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
-
-13992 VmaBlockVector* pBlockVector,
-13993 uint32_t currentFrameIndex,
-13994 bool overlappingMoveSupported) :
-13995 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-13996 m_AllocationCount(0),
-13997 m_AllAllocations(false),
-
-13999 m_AllocationsMoved(0),
-14000 m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
-
-
-14003 const size_t blockCount = m_pBlockVector->m_Blocks.size();
-14004 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
-14006 BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
-14007 pBlockInfo->m_OriginalBlockIndex = blockIndex;
-14008 pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
-14009 m_Blocks.push_back(pBlockInfo);
-
-
-
-14013 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
-
-
-14016 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
-
-14018 for(
size_t i = m_Blocks.size(); i--; )
-
-14020 vma_delete(m_hAllocator, m_Blocks[i]);
-
-
-
-14024 void VmaDefragmentationAlgorithm_Generic::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
-
-
-14027 if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
-
-14029 VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
-14030 BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
-14031 if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
-
-14033 AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
-14034 (*it)->m_Allocations.push_back(allocInfo);
-
-
-
-
-
-
-14041 ++m_AllocationCount;
-
-
-
-14045 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
-14046 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-14047 VkDeviceSize maxBytesToMove,
-14048 uint32_t maxAllocationsToMove,
-14049 bool freeOldAllocations)
-
-14051 if(m_Blocks.empty())
-
-
-
-
-
-
-
-
-
-
-
-
-14064 size_t srcBlockMinIndex = 0;
-
-
-
-
-
-
-
-
-
-
-
-
-14077 size_t srcBlockIndex = m_Blocks.size() - 1;
-14078 size_t srcAllocIndex = SIZE_MAX;
-
+13990 bool overlappingMoveSupported = !defragmentOnGpu;
+
+13992 if(m_hAllocator->m_UseMutex)
+
+
+
+13996 if(!m_Mutex.TryLockWrite())
+
+13998 pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
+
+
+
+
+
+14004 m_Mutex.LockWrite();
+14005 pCtx->mutexLocked =
true;
+
+
+
+14009 pCtx->Begin(overlappingMoveSupported, flags);
+
+
+
+14013 const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
+14014 const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
+14015 pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
+
+
+14018 if(pStats != VMA_NULL)
+
+14020 const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
+14021 const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
+
+
+14024 VMA_ASSERT(bytesMoved <= maxBytesToMove);
+14025 VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
+14026 if(defragmentOnGpu)
+
+14028 maxGpuBytesToMove -= bytesMoved;
+14029 maxGpuAllocationsToMove -= allocationsMoved;
+
+
+
+14033 maxCpuBytesToMove -= bytesMoved;
+14034 maxCpuAllocationsToMove -= allocationsMoved;
+
+
+
+
+
+14040 if(m_hAllocator->m_UseMutex)
+14041 m_Mutex.UnlockWrite();
+
+14043 if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
+14044 pCtx->res = VK_NOT_READY;
+
+
+
+
+14049 if(pCtx->res >= VK_SUCCESS)
+
+14051 if(defragmentOnGpu)
+
+14053 ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
+
+
+
+14057 ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
+
+
+
+
+
+14063 void VmaBlockVector::DefragmentationEnd(
+14064 class VmaBlockVectorDefragmentationContext* pCtx,
+
+
+
+
+
+14070 VMA_ASSERT(pCtx->mutexLocked ==
false);
+
+
+
+14074 m_Mutex.LockWrite();
+14075 pCtx->mutexLocked =
true;
+
+
+
+14079 if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
-
-
-
-14084 while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
-
-14086 if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
-
-
-14089 if(srcBlockIndex == srcBlockMinIndex)
-
-
-
-
-
-
-14096 srcAllocIndex = SIZE_MAX;
-
-
-
-
-14101 srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
-
-
-
-14105 BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
-14106 AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
-
-14108 const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
-14109 const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
-14110 const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
-14111 const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
-
-
-14114 for(
size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
-
-14116 BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
-14117 VmaAllocationRequest dstAllocRequest;
-14118 if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
-14119 m_CurrentFrameIndex,
-14120 m_pBlockVector->GetFrameInUseCount(),
-14121 m_pBlockVector->GetBufferImageGranularity(),
-
-
-
-
-
-
-14128 &dstAllocRequest) &&
-
-14130 dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
-
-14132 VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
+
+14082 for(
size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
+
+14084 VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
+14085 if(blockCtx.hBuffer)
+
+14087 (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
+
+
+
+14091 if(pCtx->res >= VK_SUCCESS)
+
+14093 FreeEmptyBlocks(pStats);
+
+
+
+14097 if(pCtx->mutexLocked)
+
+14099 VMA_ASSERT(m_hAllocator->m_UseMutex);
+14100 m_Mutex.UnlockWrite();
+
+
+
+14104 uint32_t VmaBlockVector::ProcessDefragmentations(
+14105 class VmaBlockVectorDefragmentationContext *pCtx,
+
+
+14108 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+14110 const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
+
+14112 for(uint32_t i = 0; i < moveCount; ++ i)
+
+14114 VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
+
+
+14117 pMove->
memory = move.pDstBlock->GetDeviceMemory();
+14118 pMove->
offset = move.dstOffset;
+
+
+
+
+14123 pCtx->defragmentationMovesProcessed += moveCount;
+
+
+
+
+14128 void VmaBlockVector::CommitDefragmentations(
+14129 class VmaBlockVectorDefragmentationContext *pCtx,
+
+
+14132 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
-14135 if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
-14136 (m_BytesMoved + size > maxBytesToMove))
-
-
-
-
-14141 VmaDefragmentationMove move = {};
-14142 move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
-14143 move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
-14144 move.srcOffset = srcOffset;
-14145 move.dstOffset = dstAllocRequest.offset;
-
-14147 move.hAllocation = allocInfo.m_hAllocation;
-14148 move.pSrcBlock = pSrcBlockInfo->m_pBlock;
-14149 move.pDstBlock = pDstBlockInfo->m_pBlock;
-
-14151 moves.push_back(move);
-
-14153 pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
-
-
-
-14157 allocInfo.m_hAllocation);
-
-14159 if(freeOldAllocations)
-
-14161 pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
-14162 allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
-
-
-14165 if(allocInfo.m_pChanged != VMA_NULL)
-
-14167 *allocInfo.m_pChanged = VK_TRUE;
-
-
-14170 ++m_AllocationsMoved;
-14171 m_BytesMoved += size;
-
-14173 VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
-
-
-
-
-
-
-
-14181 if(srcAllocIndex > 0)
-
-
-
-
-
-14187 if(srcBlockIndex > 0)
-
-
-14190 srcAllocIndex = SIZE_MAX;
-
-
-
-
-
-
-
-
-
-14200 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount()
const
-
-
-14203 for(
size_t i = 0; i < m_Blocks.size(); ++i)
-
-14205 if(m_Blocks[i]->m_HasNonMovableAllocations)
-
-
-
-
-
-
-
-14213 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
-14214 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-14215 VkDeviceSize maxBytesToMove,
-14216 uint32_t maxAllocationsToMove,
-
-
-14219 if(!m_AllAllocations && m_AllocationCount == 0)
-
-
-
-
-14224 const size_t blockCount = m_Blocks.size();
-14225 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
-14227 BlockInfo* pBlockInfo = m_Blocks[blockIndex];
-
-14229 if(m_AllAllocations)
-
-14231 VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
-14232 for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
-14233 it != pMetadata->m_Suballocations.end();
-
-
-14236 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
-
-14238 AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
-14239 pBlockInfo->m_Allocations.push_back(allocInfo);
-
-
-
-
-14244 pBlockInfo->CalcHasNonMovableAllocations();
-
-
-
-14248 pBlockInfo->SortAllocationsByOffsetDescending();
-
-
-
-
-
-14254 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
-
-
-14257 const uint32_t roundCount = 2;
-
-
-14260 VkResult result = VK_SUCCESS;
-14261 for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
-
-
-
-
-
-
-
-14269 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
-14270 size_t dstBlockIndex, VkDeviceSize dstOffset,
-14271 size_t srcBlockIndex, VkDeviceSize srcOffset)
-
-14273 if(dstBlockIndex < srcBlockIndex)
-
-
-
-14277 if(dstBlockIndex > srcBlockIndex)
-
-
-
-14281 if(dstOffset < srcOffset)
-
-
-
-
-
-
-
-
-14291 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
-
-14293 VmaBlockVector* pBlockVector,
-14294 uint32_t currentFrameIndex,
-14295 bool overlappingMoveSupported) :
-14296 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-14297 m_OverlappingMoveSupported(overlappingMoveSupported),
-14298 m_AllocationCount(0),
-14299 m_AllAllocations(false),
-
-14301 m_AllocationsMoved(0),
-14302 m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
-
-14304 VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
-
-
-
-14308 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
-
-
+14134 for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
+
+14136 const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
+
+14138 move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
+14139 move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
+
+
+14142 pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
+14143 FreeEmptyBlocks(pStats);
+
+
+14146 size_t VmaBlockVector::CalcAllocationCount()
const
+
+
+14149 for(
size_t i = 0; i < m_Blocks.size(); ++i)
+
+14151 result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
+
+
+
+
+14156 bool VmaBlockVector::IsBufferImageGranularityConflictPossible()
const
+
+14158 if(m_BufferImageGranularity == 1)
+
+
+
+14162 VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
+14163 for(
size_t i = 0, count = m_Blocks.size(); i < count; ++i)
+
+14165 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[i];
+14166 VMA_ASSERT(m_Algorithm == 0);
+14167 VmaBlockMetadata_Generic*
const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
+14168 if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
+
+
+
+
+
+
+
+14176 void VmaBlockVector::MakePoolAllocationsLost(
+14177 uint32_t currentFrameIndex,
+14178 size_t* pLostAllocationCount)
+
+14180 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+14181 size_t lostAllocationCount = 0;
+14182 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
+14184 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
+14185 VMA_ASSERT(pBlock);
+14186 lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
+
+14188 if(pLostAllocationCount != VMA_NULL)
+
+14190 *pLostAllocationCount = lostAllocationCount;
+
+
+
+14194 VkResult VmaBlockVector::CheckCorruption()
+
+14196 if(!IsCorruptionDetectionEnabled())
+
+14198 return VK_ERROR_FEATURE_NOT_PRESENT;
+
+
+14201 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+14202 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
+14204 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
+14205 VMA_ASSERT(pBlock);
+14206 VkResult res = pBlock->CheckCorruption(m_hAllocator);
+14207 if(res != VK_SUCCESS)
+
+
+
+
+
+
+
+14215 void VmaBlockVector::AddStats(
VmaStats* pStats)
+
+14217 const uint32_t memTypeIndex = m_MemoryTypeIndex;
+14218 const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
+
+14220 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+14222 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
+14224 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
+14225 VMA_ASSERT(pBlock);
+14226 VMA_HEAVY_ASSERT(pBlock->Validate());
+
+14228 pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
+14229 VmaAddStatInfo(pStats->
total, allocationStatInfo);
+14230 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
+14231 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
+
+
+
+
+
+14238 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
+
+14240 VmaBlockVector* pBlockVector,
+14241 uint32_t currentFrameIndex,
+14242 bool overlappingMoveSupported) :
+14243 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+14244 m_AllocationCount(0),
+14245 m_AllAllocations(false),
+
+14247 m_AllocationsMoved(0),
+14248 m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
+
+
+14251 const size_t blockCount = m_pBlockVector->m_Blocks.size();
+14252 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
+14254 BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
+14255 pBlockInfo->m_OriginalBlockIndex = blockIndex;
+14256 pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
+14257 m_Blocks.push_back(pBlockInfo);
+
+
+
+14261 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
+
+
+14264 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
+
+14266 for(
size_t i = m_Blocks.size(); i--; )
+
+14268 vma_delete(m_hAllocator, m_Blocks[i]);
+
+
+
+14272 void VmaDefragmentationAlgorithm_Generic::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
+
+
+14275 if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
+
+14277 VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
+14278 BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
+14279 if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
+
+14281 AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
+14282 (*it)->m_Allocations.push_back(allocInfo);
+
+
+
+
+
+
+14289 ++m_AllocationCount;
+
+
+
+14293 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
+14294 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+14295 VkDeviceSize maxBytesToMove,
+14296 uint32_t maxAllocationsToMove,
+14297 bool freeOldAllocations)
+
+14299 if(m_Blocks.empty())
+
+
+
+
+
+
+
+
+
+
+
-14312 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
-14313 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-14314 VkDeviceSize maxBytesToMove,
-14315 uint32_t maxAllocationsToMove,
-
-
-14318 VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
-
-14320 const size_t blockCount = m_pBlockVector->GetBlockCount();
-14321 if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
-
-
-
-
-14326 PreprocessMetadata();
-
-
-
-14330 m_BlockInfos.resize(blockCount);
-14331 for(
size_t i = 0; i < blockCount; ++i)
-
-14333 m_BlockInfos[i].origBlockIndex = i;
-
-
-14336 VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [
this](
const BlockInfo& lhs,
const BlockInfo& rhs) ->
bool {
-14337 return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
-14338 m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
-
-
-
-
-14343 FreeSpaceDatabase freeSpaceDb;
-
-14345 size_t dstBlockInfoIndex = 0;
-14346 size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-14347 VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-14348 VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-14349 VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
-14350 VkDeviceSize dstOffset = 0;
-
-
-14353 for(
size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
-
-14355 const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
-14356 VmaDeviceMemoryBlock*
const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
-14357 VmaBlockMetadata_Generic*
const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
-14358 for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
-14359 !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
-
-14361 VmaAllocation_T*
const pAlloc = srcSuballocIt->hAllocation;
-14362 const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
-14363 const VkDeviceSize srcAllocSize = srcSuballocIt->size;
-14364 if(m_AllocationsMoved == maxAllocationsToMove ||
-14365 m_BytesMoved + srcAllocSize > maxBytesToMove)
-
-
-
-
-14370 const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
-
-14372 VmaDefragmentationMove move = {};
-
-14374 size_t freeSpaceInfoIndex;
-14375 VkDeviceSize dstAllocOffset;
-14376 if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
-14377 freeSpaceInfoIndex, dstAllocOffset))
-
-14379 size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
-14380 VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
-14381 VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
-
-
-14384 if(freeSpaceInfoIndex == srcBlockInfoIndex)
+14312 size_t srcBlockMinIndex = 0;
+
+
+
+
+
+
+
+
+
+
+
+
+14325 size_t srcBlockIndex = m_Blocks.size() - 1;
+14326 size_t srcAllocIndex = SIZE_MAX;
+
+
+
+
+
+14332 while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
+
+14334 if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
+
+
+14337 if(srcBlockIndex == srcBlockMinIndex)
+
+
+
+
+
+
+14344 srcAllocIndex = SIZE_MAX;
+
+
+
+
+14349 srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
+
+
+
+14353 BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
+14354 AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
+
+14356 const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
+14357 const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
+14358 const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
+14359 const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
+
+
+14362 for(
size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
+
+14364 BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
+14365 VmaAllocationRequest dstAllocRequest;
+14366 if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
+14367 m_CurrentFrameIndex,
+14368 m_pBlockVector->GetFrameInUseCount(),
+14369 m_pBlockVector->GetBufferImageGranularity(),
+
+
+
+
+
+
+14376 &dstAllocRequest) &&
+
+14378 dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
+
+14380 VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
+
+
+14383 if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
+14384 (m_BytesMoved + size > maxBytesToMove))
-14386 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
-
-
-14390 VmaSuballocation suballoc = *srcSuballocIt;
-14391 suballoc.offset = dstAllocOffset;
-14392 suballoc.hAllocation->ChangeOffset(dstAllocOffset);
-14393 m_BytesMoved += srcAllocSize;
-14394 ++m_AllocationsMoved;
-
-14396 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
-14398 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-14399 srcSuballocIt = nextSuballocIt;
+
+
+
+14389 VmaDefragmentationMove move = {};
+14390 move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
+14391 move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
+14392 move.srcOffset = srcOffset;
+14393 move.dstOffset = dstAllocRequest.offset;
+
+14395 move.hAllocation = allocInfo.m_hAllocation;
+14396 move.pSrcBlock = pSrcBlockInfo->m_pBlock;
+14397 move.pDstBlock = pDstBlockInfo->m_pBlock;
+
+14399 moves.push_back(move);
-14401 InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
-14403 move.srcBlockIndex = srcOrigBlockIndex;
-14404 move.dstBlockIndex = freeSpaceOrigBlockIndex;
-14405 move.srcOffset = srcAllocOffset;
-14406 move.dstOffset = dstAllocOffset;
-14407 move.size = srcAllocSize;
-
-14409 moves.push_back(move);
-
-
-
-
-
-
-14416 VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
+14401 pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
+
+
+
+14405 allocInfo.m_hAllocation);
+
+14407 if(freeOldAllocations)
+
+14409 pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
+14410 allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+
+
+14413 if(allocInfo.m_pChanged != VMA_NULL)
+
+14415 *allocInfo.m_pChanged = VK_TRUE;
+
-14418 VmaSuballocation suballoc = *srcSuballocIt;
-14419 suballoc.offset = dstAllocOffset;
-14420 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
-14421 m_BytesMoved += srcAllocSize;
-14422 ++m_AllocationsMoved;
-
-14424 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
-14426 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-14427 srcSuballocIt = nextSuballocIt;
+14418 ++m_AllocationsMoved;
+14419 m_BytesMoved += size;
+
+14421 VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
+
+
+
+
+
+
-14429 InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
-14431 move.srcBlockIndex = srcOrigBlockIndex;
-14432 move.dstBlockIndex = freeSpaceOrigBlockIndex;
-14433 move.srcOffset = srcAllocOffset;
-14434 move.dstOffset = dstAllocOffset;
-14435 move.size = srcAllocSize;
-
-14437 moves.push_back(move);
-
+14429 if(srcAllocIndex > 0)
+
+
+
+
+
+14435 if(srcBlockIndex > 0)
+
+
+14438 srcAllocIndex = SIZE_MAX;
-14442 dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
-
-
-14445 while(dstBlockInfoIndex < srcBlockInfoIndex &&
-14446 dstAllocOffset + srcAllocSize > dstBlockSize)
-
-
-14449 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
-
-14451 ++dstBlockInfoIndex;
-14452 dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-14453 pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-14454 pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-14455 dstBlockSize = pDstMetadata->GetSize();
-
-14457 dstAllocOffset = 0;
-
-
-
-14461 if(dstBlockInfoIndex == srcBlockInfoIndex)
-
-14463 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
-14465 const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
-
-14467 bool skipOver = overlap;
-14468 if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
-
-
-
-14472 skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
-
-
-
-
-14477 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
-
-14479 dstOffset = srcAllocOffset + srcAllocSize;
-
-
-
-
-
-14485 srcSuballocIt->offset = dstAllocOffset;
-14486 srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
-14487 dstOffset = dstAllocOffset + srcAllocSize;
-14488 m_BytesMoved += srcAllocSize;
-14489 ++m_AllocationsMoved;
-
+
+
+
+
+
+
+14448 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount()
const
+
+
+14451 for(
size_t i = 0; i < m_Blocks.size(); ++i)
+
+14453 if(m_Blocks[i]->m_HasNonMovableAllocations)
+
+
+
+
+
+
+
+14461 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
+14462 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+14463 VkDeviceSize maxBytesToMove,
+14464 uint32_t maxAllocationsToMove,
+
+
+14467 if(!m_AllAllocations && m_AllocationCount == 0)
+
+
+
+
+14472 const size_t blockCount = m_Blocks.size();
+14473 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
+14475 BlockInfo* pBlockInfo = m_Blocks[blockIndex];
+
+14477 if(m_AllAllocations)
+
+14479 VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
+14480 for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
+14481 it != pMetadata->m_Suballocations.end();
+
+
+14484 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
+
+14486 AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
+14487 pBlockInfo->m_Allocations.push_back(allocInfo);
+
+
+
-14492 move.srcBlockIndex = srcOrigBlockIndex;
-14493 move.dstBlockIndex = dstOrigBlockIndex;
-14494 move.srcOffset = srcAllocOffset;
-14495 move.dstOffset = dstAllocOffset;
-14496 move.size = srcAllocSize;
-
-14498 moves.push_back(move);
-
-
-
-
-
-
-
-14506 VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
-14507 VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
-
-14509 VmaSuballocation suballoc = *srcSuballocIt;
-14510 suballoc.offset = dstAllocOffset;
-14511 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
-14512 dstOffset = dstAllocOffset + srcAllocSize;
-14513 m_BytesMoved += srcAllocSize;
-14514 ++m_AllocationsMoved;
-
-14516 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
-14518 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-14519 srcSuballocIt = nextSuballocIt;
-
-14521 pDstMetadata->m_Suballocations.push_back(suballoc);
-
-14523 move.srcBlockIndex = srcOrigBlockIndex;
-14524 move.dstBlockIndex = dstOrigBlockIndex;
-14525 move.srcOffset = srcAllocOffset;
-14526 move.dstOffset = dstAllocOffset;
-14527 move.size = srcAllocSize;
-
-14529 moves.push_back(move);
-
-
-
-
-
-14535 m_BlockInfos.clear();
-
-14537 PostprocessMetadata();
+14492 pBlockInfo->CalcHasNonMovableAllocations();
+
+
+
+14496 pBlockInfo->SortAllocationsByOffsetDescending();
+
+
+
+
+
+14502 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
+
+
+14505 const uint32_t roundCount = 2;
+
+
+14508 VkResult result = VK_SUCCESS;
+14509 for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
+
+
+
+
+
+
+
+14517 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
+14518 size_t dstBlockIndex, VkDeviceSize dstOffset,
+14519 size_t srcBlockIndex, VkDeviceSize srcOffset)
+
+14521 if(dstBlockIndex < srcBlockIndex)
+
+
+
+14525 if(dstBlockIndex > srcBlockIndex)
+
+
+
+14529 if(dstOffset < srcOffset)
+
+
+
+
+
+
+
-
-
-
-14542 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
-
-14544 const size_t blockCount = m_pBlockVector->GetBlockCount();
-14545 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
-14547 VmaBlockMetadata_Generic*
const pMetadata =
-14548 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-14549 pMetadata->m_FreeCount = 0;
-14550 pMetadata->m_SumFreeSize = pMetadata->GetSize();
-14551 pMetadata->m_FreeSuballocationsBySize.clear();
-14552 for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-14553 it != pMetadata->m_Suballocations.end(); )
-
-14555 if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
-
-14557 VmaSuballocationList::iterator nextIt = it;
-
-14559 pMetadata->m_Suballocations.erase(it);
-
-
-
-
-
-
-
-
-
-
-14570 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
-
-14572 const size_t blockCount = m_pBlockVector->GetBlockCount();
-14573 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
-14575 VmaBlockMetadata_Generic*
const pMetadata =
-14576 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-14577 const VkDeviceSize blockSize = pMetadata->GetSize();
-
-
-14580 if(pMetadata->m_Suballocations.empty())
-
-14582 pMetadata->m_FreeCount = 1;
-
-14584 VmaSuballocation suballoc = {
-
-
-
-14588 VMA_SUBALLOCATION_TYPE_FREE };
-14589 pMetadata->m_Suballocations.push_back(suballoc);
-14590 pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
-
-
-
-
-14595 VkDeviceSize offset = 0;
-14596 VmaSuballocationList::iterator it;
-14597 for(it = pMetadata->m_Suballocations.begin();
-14598 it != pMetadata->m_Suballocations.end();
-
-
-14601 VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
-14602 VMA_ASSERT(it->offset >= offset);
-
-
-14605 if(it->offset > offset)
-
-14607 ++pMetadata->m_FreeCount;
-14608 const VkDeviceSize freeSize = it->offset - offset;
-14609 VmaSuballocation suballoc = {
-
-
-
-14613 VMA_SUBALLOCATION_TYPE_FREE };
-14614 VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-14615 if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
-14617 pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
-
-
-
-14621 pMetadata->m_SumFreeSize -= it->size;
-14622 offset = it->offset + it->size;
-
-
-
-14626 if(offset < blockSize)
-
-14628 ++pMetadata->m_FreeCount;
-14629 const VkDeviceSize freeSize = blockSize - offset;
-14630 VmaSuballocation suballoc = {
-
-
-
-14634 VMA_SUBALLOCATION_TYPE_FREE };
-14635 VMA_ASSERT(it == pMetadata->m_Suballocations.end());
-14636 VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-14637 if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
-14639 pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
-
-
-
-
-14644 pMetadata->m_FreeSuballocationsBySize.begin(),
-14645 pMetadata->m_FreeSuballocationsBySize.end(),
-14646 VmaSuballocationItemSizeLess());
-
+14539 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
+
+14541 VmaBlockVector* pBlockVector,
+14542 uint32_t currentFrameIndex,
+14543 bool overlappingMoveSupported) :
+14544 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+14545 m_OverlappingMoveSupported(overlappingMoveSupported),
+14546 m_AllocationCount(0),
+14547 m_AllAllocations(false),
+
+14549 m_AllocationsMoved(0),
+14550 m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
+
+14552 VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
+
+
+
+14556 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
+
+
+
+14560 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
+14561 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+14562 VkDeviceSize maxBytesToMove,
+14563 uint32_t maxAllocationsToMove,
+
+
+14566 VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
+
+14568 const size_t blockCount = m_pBlockVector->GetBlockCount();
+14569 if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
+
+
+
+
+14574 PreprocessMetadata();
+
+
+
+14578 m_BlockInfos.resize(blockCount);
+14579 for(
size_t i = 0; i < blockCount; ++i)
+
+14581 m_BlockInfos[i].origBlockIndex = i;
+
+
+14584 VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [
this](
const BlockInfo& lhs,
const BlockInfo& rhs) ->
bool {
+14585 return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
+14586 m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
+
+
+
+
+14591 FreeSpaceDatabase freeSpaceDb;
+
+14593 size_t dstBlockInfoIndex = 0;
+14594 size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+14595 VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+14596 VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+14597 VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
+14598 VkDeviceSize dstOffset = 0;
+
+
+14601 for(
size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
+
+14603 const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
+14604 VmaDeviceMemoryBlock*
const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
+14605 VmaBlockMetadata_Generic*
const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
+14606 for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
+14607 !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
+
+14609 VmaAllocation_T*
const pAlloc = srcSuballocIt->hAllocation;
+14610 const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
+14611 const VkDeviceSize srcAllocSize = srcSuballocIt->size;
+14612 if(m_AllocationsMoved == maxAllocationsToMove ||
+14613 m_BytesMoved + srcAllocSize > maxBytesToMove)
+
+
+
+
+14618 const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
+
+14620 VmaDefragmentationMove move = {};
+
+14622 size_t freeSpaceInfoIndex;
+14623 VkDeviceSize dstAllocOffset;
+14624 if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
+14625 freeSpaceInfoIndex, dstAllocOffset))
+
+14627 size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
+14628 VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
+14629 VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
+
+
+14632 if(freeSpaceInfoIndex == srcBlockInfoIndex)
+
+14634 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
+
+
+14638 VmaSuballocation suballoc = *srcSuballocIt;
+14639 suballoc.offset = dstAllocOffset;
+14640 suballoc.hAllocation->ChangeOffset(dstAllocOffset);
+14641 m_BytesMoved += srcAllocSize;
+14642 ++m_AllocationsMoved;
+
+14644 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
+14646 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+14647 srcSuballocIt = nextSuballocIt;
-14649 VMA_HEAVY_ASSERT(pMetadata->Validate());
-
-
-
-14653 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc)
-
-
-14656 VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-14657 while(it != pMetadata->m_Suballocations.end())
-
-14659 if(it->offset < suballoc.offset)
-
-
-
-
-14664 pMetadata->m_Suballocations.insert(it, suballoc);
-
-
-
-
-14670 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
-
-
-14673 VmaBlockVector* pBlockVector,
-14674 uint32_t currFrameIndex) :
-
-14676 mutexLocked(false),
-14677 blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
-14678 defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
-14679 defragmentationMovesProcessed(0),
-14680 defragmentationMovesCommitted(0),
-14681 hasDefragmentationPlan(0),
-14682 m_hAllocator(hAllocator),
-14683 m_hCustomPool(hCustomPool),
-14684 m_pBlockVector(pBlockVector),
-14685 m_CurrFrameIndex(currFrameIndex),
-14686 m_pAlgorithm(VMA_NULL),
-14687 m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
-14688 m_AllAllocations(false)
-
-
+14649 InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
+14651 move.srcBlockIndex = srcOrigBlockIndex;
+14652 move.dstBlockIndex = freeSpaceOrigBlockIndex;
+14653 move.srcOffset = srcAllocOffset;
+14654 move.dstOffset = dstAllocOffset;
+14655 move.size = srcAllocSize;
+
+14657 moves.push_back(move);
+
+
+
+
+
+
+14664 VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
+
+14666 VmaSuballocation suballoc = *srcSuballocIt;
+14667 suballoc.offset = dstAllocOffset;
+14668 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
+14669 m_BytesMoved += srcAllocSize;
+14670 ++m_AllocationsMoved;
+
+14672 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
+14674 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+14675 srcSuballocIt = nextSuballocIt;
+
+14677 InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
+14679 move.srcBlockIndex = srcOrigBlockIndex;
+14680 move.dstBlockIndex = freeSpaceOrigBlockIndex;
+14681 move.srcOffset = srcAllocOffset;
+14682 move.dstOffset = dstAllocOffset;
+14683 move.size = srcAllocSize;
+
+14685 moves.push_back(move);
+
+
+
+
+14690 dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
-14692 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
-
-14694 vma_delete(m_hAllocator, m_pAlgorithm);
-
-
-14697 void VmaBlockVectorDefragmentationContext::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
-
-14699 AllocInfo info = { hAlloc, pChanged };
-14700 m_Allocations.push_back(info);
-
-
-14703 void VmaBlockVectorDefragmentationContext::Begin(
bool overlappingMoveSupported,
VmaDefragmentationFlags flags)
-
-14705 const bool allAllocations = m_AllAllocations ||
-14706 m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
+
+14693 while(dstBlockInfoIndex < srcBlockInfoIndex &&
+14694 dstAllocOffset + srcAllocSize > dstBlockSize)
+
+
+14697 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
+
+14699 ++dstBlockInfoIndex;
+14700 dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+14701 pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+14702 pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+14703 dstBlockSize = pDstMetadata->GetSize();
+
+14705 dstAllocOffset = 0;
+
-
-
-
-
-
-
-
-
-
-
-
-14719 if(VMA_DEBUG_MARGIN == 0 &&
-
-14721 !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
-
-
-14724 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
-14725 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
-
-
-14729 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
-14730 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
-
-
-
-14735 m_pAlgorithm->AddAll();
-
-
-
-14739 for(
size_t i = 0, count = m_Allocations.size(); i < count; ++i)
-
-14741 m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
-
-
-
+
+14709 if(dstBlockInfoIndex == srcBlockInfoIndex)
+
+14711 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
+14713 const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
+
+14715 bool skipOver = overlap;
+14716 if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
+
+
+
+14720 skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
+
+
+
+
+14725 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
+
+14727 dstOffset = srcAllocOffset + srcAllocSize;
+
+
+
+
+
+14733 srcSuballocIt->offset = dstAllocOffset;
+14734 srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
+14735 dstOffset = dstAllocOffset + srcAllocSize;
+14736 m_BytesMoved += srcAllocSize;
+14737 ++m_AllocationsMoved;
+
+
+14740 move.srcBlockIndex = srcOrigBlockIndex;
+14741 move.dstBlockIndex = dstOrigBlockIndex;
+14742 move.srcOffset = srcAllocOffset;
+14743 move.dstOffset = dstAllocOffset;
+14744 move.size = srcAllocSize;
-
-
-14749 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
-
-14751 uint32_t currFrameIndex,
-
-
-14754 m_hAllocator(hAllocator),
-14755 m_CurrFrameIndex(currFrameIndex),
-
-
-14758 m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
-
-14760 memset(m_DefaultPoolContexts, 0,
sizeof(m_DefaultPoolContexts));
-
-
-14763 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
-
-14765 for(
size_t i = m_CustomPoolContexts.size(); i--; )
-
-14767 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
-14768 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
-14769 vma_delete(m_hAllocator, pBlockVectorCtx);
-
-14771 for(
size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
-
-14773 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
-14774 if(pBlockVectorCtx)
-
-14776 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
-14777 vma_delete(m_hAllocator, pBlockVectorCtx);
-
-
-
-
-14782 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount,
const VmaPool* pPools)
-
-14784 for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
-14786 VmaPool pool = pPools[poolIndex];
-
-
-14789 if(pool->m_BlockVector.GetAlgorithm() == 0)
-
-14791 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
-14793 for(
size_t i = m_CustomPoolContexts.size(); i--; )
-
-14795 if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
-
-14797 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
-
-
-
-14802 if(!pBlockVectorDefragCtx)
-
-14804 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
-
-14807 &pool->m_BlockVector,
-
-14809 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
-
-14812 pBlockVectorDefragCtx->AddAll();
-
-
-
-
-14817 void VmaDefragmentationContext_T::AddAllocations(
-14818 uint32_t allocationCount,
-
-14820 VkBool32* pAllocationsChanged)
-
-
-14823 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
-
-14826 VMA_ASSERT(hAlloc);
-
-14828 if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
-
-14830 (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
-
-14832 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
-14834 const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
-
-14836 if(hAllocPool != VK_NULL_HANDLE)
-
-
-14839 if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
-
-14841 for(
size_t i = m_CustomPoolContexts.size(); i--; )
-
-14843 if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
-
-14845 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
-
-
-14849 if(!pBlockVectorDefragCtx)
-
-14851 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
-
-14854 &hAllocPool->m_BlockVector,
-
-14856 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
-
-
-
-
-
-14863 const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
-14864 pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
-14865 if(!pBlockVectorDefragCtx)
-
-14867 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
-
-14870 m_hAllocator->m_pBlockVectors[memTypeIndex],
-
-14872 m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
-
-
-
-14876 if(pBlockVectorDefragCtx)
-
-14878 VkBool32*
const pChanged = (pAllocationsChanged != VMA_NULL) ?
-14879 &pAllocationsChanged[allocIndex] : VMA_NULL;
-14880 pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
-
-
-
-
-
-14886 VkResult VmaDefragmentationContext_T::Defragment(
-14887 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
-14888 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
-
-
-
-
-
-
-
-
-
-
-
-14900 m_MaxCpuBytesToMove = maxCpuBytesToMove;
-14901 m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
-
-14903 m_MaxGpuBytesToMove = maxGpuBytesToMove;
-14904 m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
-
-14906 if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
-14907 m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
-
-
-14910 return VK_NOT_READY;
+14746 moves.push_back(move);
+
+
+
+
+
+
+
+14754 VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
+14755 VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
+
+14757 VmaSuballocation suballoc = *srcSuballocIt;
+14758 suballoc.offset = dstAllocOffset;
+14759 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
+14760 dstOffset = dstAllocOffset + srcAllocSize;
+14761 m_BytesMoved += srcAllocSize;
+14762 ++m_AllocationsMoved;
+
+14764 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
+14766 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+14767 srcSuballocIt = nextSuballocIt;
+
+14769 pDstMetadata->m_Suballocations.push_back(suballoc);
+
+14771 move.srcBlockIndex = srcOrigBlockIndex;
+14772 move.dstBlockIndex = dstOrigBlockIndex;
+14773 move.srcOffset = srcAllocOffset;
+14774 move.dstOffset = dstAllocOffset;
+14775 move.size = srcAllocSize;
+
+14777 moves.push_back(move);
+
+
+
+
+
+14783 m_BlockInfos.clear();
+
+14785 PostprocessMetadata();
+
+
+
+
+14790 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
+
+14792 const size_t blockCount = m_pBlockVector->GetBlockCount();
+14793 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
+14795 VmaBlockMetadata_Generic*
const pMetadata =
+14796 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+14797 pMetadata->m_FreeCount = 0;
+14798 pMetadata->m_SumFreeSize = pMetadata->GetSize();
+14799 pMetadata->m_FreeSuballocationsBySize.clear();
+14800 for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+14801 it != pMetadata->m_Suballocations.end(); )
+
+14803 if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
+
+14805 VmaSuballocationList::iterator nextIt = it;
+
+14807 pMetadata->m_Suballocations.erase(it);
+
+
+
+
+
+
+
+
+
+
+14818 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
+
+14820 const size_t blockCount = m_pBlockVector->GetBlockCount();
+14821 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
+14823 VmaBlockMetadata_Generic*
const pMetadata =
+14824 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+14825 const VkDeviceSize blockSize = pMetadata->GetSize();
+
+
+14828 if(pMetadata->m_Suballocations.empty())
+
+14830 pMetadata->m_FreeCount = 1;
+
+14832 VmaSuballocation suballoc = {
+
+
+
+14836 VMA_SUBALLOCATION_TYPE_FREE };
+14837 pMetadata->m_Suballocations.push_back(suballoc);
+14838 pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
+
+
+
+
+14843 VkDeviceSize offset = 0;
+14844 VmaSuballocationList::iterator it;
+14845 for(it = pMetadata->m_Suballocations.begin();
+14846 it != pMetadata->m_Suballocations.end();
+
+
+14849 VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
+14850 VMA_ASSERT(it->offset >= offset);
+
+
+14853 if(it->offset > offset)
+
+14855 ++pMetadata->m_FreeCount;
+14856 const VkDeviceSize freeSize = it->offset - offset;
+14857 VmaSuballocation suballoc = {
+
+
+
+14861 VMA_SUBALLOCATION_TYPE_FREE };
+14862 VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+14863 if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+14865 pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
+
+
+
+14869 pMetadata->m_SumFreeSize -= it->size;
+14870 offset = it->offset + it->size;
+
+
+
+14874 if(offset < blockSize)
+
+14876 ++pMetadata->m_FreeCount;
+14877 const VkDeviceSize freeSize = blockSize - offset;
+14878 VmaSuballocation suballoc = {
+
+
+
+14882 VMA_SUBALLOCATION_TYPE_FREE };
+14883 VMA_ASSERT(it == pMetadata->m_Suballocations.end());
+14884 VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+14885 if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
+14887 pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
+
+
+
+
+14892 pMetadata->m_FreeSuballocationsBySize.begin(),
+14893 pMetadata->m_FreeSuballocationsBySize.end(),
+14894 VmaSuballocationItemSizeLess());
+
+
+14897 VMA_HEAVY_ASSERT(pMetadata->Validate());
+
+
+
+14901 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc)
+
+
+14904 VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+14905 while(it != pMetadata->m_Suballocations.end())
+
+14907 if(it->offset < suballoc.offset)
+
+
+
-
-14913 if(commandBuffer == VK_NULL_HANDLE)
-
-14915 maxGpuBytesToMove = 0;
-14916 maxGpuAllocationsToMove = 0;
-
-
-14919 VkResult res = VK_SUCCESS;
-
-
-14922 for(uint32_t memTypeIndex = 0;
-14923 memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
-
-
-14926 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-14927 if(pBlockVectorCtx)
-
-14929 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-14930 pBlockVectorCtx->GetBlockVector()->Defragment(
-
-
-14933 maxCpuBytesToMove, maxCpuAllocationsToMove,
-14934 maxGpuBytesToMove, maxGpuAllocationsToMove,
-
-14936 if(pBlockVectorCtx->res != VK_SUCCESS)
-
-14938 res = pBlockVectorCtx->res;
-
-
-
-
-
-14944 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-14945 customCtxIndex < customCtxCount && res >= VK_SUCCESS;
-
-
-14948 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-14949 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-14950 pBlockVectorCtx->GetBlockVector()->Defragment(
-
-
-14953 maxCpuBytesToMove, maxCpuAllocationsToMove,
-14954 maxGpuBytesToMove, maxGpuAllocationsToMove,
-
-14956 if(pBlockVectorCtx->res != VK_SUCCESS)
-
-14958 res = pBlockVectorCtx->res;
-
-
-
-
-
-
-
-
-
-
-
-
-14971 for(uint32_t memTypeIndex = 0;
-14972 memTypeIndex < m_hAllocator->GetMemoryTypeCount();
-
-
-14975 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-14976 if(pBlockVectorCtx)
-
-14978 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
-14980 if(!pBlockVectorCtx->hasDefragmentationPlan)
-
-14982 pBlockVectorCtx->GetBlockVector()->Defragment(
-
-
-14985 m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
-14986 m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
-
-
-14989 if(pBlockVectorCtx->res < VK_SUCCESS)
-
-
-14992 pBlockVectorCtx->hasDefragmentationPlan =
true;
-
-
-14995 const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
-
-14997 pCurrentMove, movesLeft);
-
-14999 movesLeft -= processed;
-15000 pCurrentMove += processed;
-
-
-
-
-15005 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-15006 customCtxIndex < customCtxCount;
-
-
-15009 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-15010 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
-15012 if(!pBlockVectorCtx->hasDefragmentationPlan)
-
-15014 pBlockVectorCtx->GetBlockVector()->Defragment(
-
-
-15017 m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
-15018 m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
-
-
-15021 if(pBlockVectorCtx->res < VK_SUCCESS)
-
-
-15024 pBlockVectorCtx->hasDefragmentationPlan =
true;
-
-
-15027 const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
-
-15029 pCurrentMove, movesLeft);
-
-15031 movesLeft -= processed;
-15032 pCurrentMove += processed;
-
-
-
-
-
-
-15039 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
-
-15041 VkResult res = VK_SUCCESS;
-
-
-15044 for(uint32_t memTypeIndex = 0;
-15045 memTypeIndex < m_hAllocator->GetMemoryTypeCount();
-
-
-15048 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-15049 if(pBlockVectorCtx)
-
-15051 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
-15053 if(!pBlockVectorCtx->hasDefragmentationPlan)
-
-15055 res = VK_NOT_READY;
-
-
-
-15059 pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
-15060 pBlockVectorCtx, m_pStats);
-
-15062 if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
-15063 res = VK_NOT_READY;
-
-
-
-
-15068 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-15069 customCtxIndex < customCtxCount;
-
-
-15072 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-15073 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
-15075 if(!pBlockVectorCtx->hasDefragmentationPlan)
-
-15077 res = VK_NOT_READY;
-
-
-
-15081 pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
-15082 pBlockVectorCtx, m_pStats);
-
-15084 if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
-15085 res = VK_NOT_READY;
-
-
-
-
-
-
-
-15094 #if VMA_RECORDING_ENABLED
-
-15096 VmaRecorder::VmaRecorder() :
-
-
-
-15100 m_RecordingStartTime(std::chrono::high_resolution_clock::now())
-
-
-
-
-
-15106 m_UseMutex = useMutex;
-15107 m_Flags = settings.
flags;
-
-15109 #if defined(_WIN32)
-
-15111 errno_t err = fopen_s(&m_File, settings.
pFilePath,
"wb");
-
-
-
-15115 return VK_ERROR_INITIALIZATION_FAILED;
-
-
-
-15119 m_File = fopen(settings.
pFilePath,
"wb");
-
-
-
-15123 return VK_ERROR_INITIALIZATION_FAILED;
-
-
-
-
-15128 fprintf(m_File,
"%s\n",
"Vulkan Memory Allocator,Calls recording");
-15129 fprintf(m_File,
"%s\n",
"1,8");
-
-
+14912 pMetadata->m_Suballocations.insert(it, suballoc);
+
+
+
+
+14918 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
+
+
+14921 VmaBlockVector* pBlockVector,
+14922 uint32_t currFrameIndex) :
+
+14924 mutexLocked(false),
+14925 blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
+14926 defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
+14927 defragmentationMovesProcessed(0),
+14928 defragmentationMovesCommitted(0),
+14929 hasDefragmentationPlan(0),
+14930 m_hAllocator(hAllocator),
+14931 m_hCustomPool(hCustomPool),
+14932 m_pBlockVector(pBlockVector),
+14933 m_CurrFrameIndex(currFrameIndex),
+14934 m_pAlgorithm(VMA_NULL),
+14935 m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
+14936 m_AllAllocations(false)
+
+
+
+14940 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
+
+14942 vma_delete(m_hAllocator, m_pAlgorithm);
+
+
+14945 void VmaBlockVectorDefragmentationContext::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
+
+14947 AllocInfo info = { hAlloc, pChanged };
+14948 m_Allocations.push_back(info);
+
+
+14951 void VmaBlockVectorDefragmentationContext::Begin(
bool overlappingMoveSupported,
VmaDefragmentationFlags flags)
+
+14953 const bool allAllocations = m_AllAllocations ||
+14954 m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
+
+
+
+
+
+
+
+
+
+
+
+
+14967 if(VMA_DEBUG_MARGIN == 0 &&
+
+14969 !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
+
+
+14972 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
+14973 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
+
+
+14977 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
+14978 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
+
+
+
+14983 m_pAlgorithm->AddAll();
+
+
+
+14987 for(
size_t i = 0, count = m_Allocations.size(); i < count; ++i)
+
+14989 m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
+
+
+
+
+
+
+14997 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
+
+14999 uint32_t currFrameIndex,
+
+
+15002 m_hAllocator(hAllocator),
+15003 m_CurrFrameIndex(currFrameIndex),
+
+
+15006 m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
+
+15008 memset(m_DefaultPoolContexts, 0,
sizeof(m_DefaultPoolContexts));
+
+
+15011 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
+
+15013 for(
size_t i = m_CustomPoolContexts.size(); i--; )
+
+15015 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
+15016 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
+15017 vma_delete(m_hAllocator, pBlockVectorCtx);
+
+15019 for(
size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
+
+15021 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
+15022 if(pBlockVectorCtx)
+
+15024 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
+15025 vma_delete(m_hAllocator, pBlockVectorCtx);
+
+
+
+
+15030 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount,
const VmaPool* pPools)
+
+15032 for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+
+15034 VmaPool pool = pPools[poolIndex];
+
+
+15037 if(pool->m_BlockVector.GetAlgorithm() == 0)
+
+15039 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
+15041 for(
size_t i = m_CustomPoolContexts.size(); i--; )
+
+15043 if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
+
+15045 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
+
+
+
+15050 if(!pBlockVectorDefragCtx)
+
+15052 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
+
+15055 &pool->m_BlockVector,
+
+15057 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
+
+15060 pBlockVectorDefragCtx->AddAll();
+
+
+
+
+15065 void VmaDefragmentationContext_T::AddAllocations(
+15066 uint32_t allocationCount,
+
+15068 VkBool32* pAllocationsChanged)
+
+
+15071 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
+
+15074 VMA_ASSERT(hAlloc);
+
+15076 if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
+
+15078 (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
+
+15080 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
+15082 const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
+
+15084 if(hAllocPool != VK_NULL_HANDLE)
+
+
+15087 if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
+
+15089 for(
size_t i = m_CustomPoolContexts.size(); i--; )
+
+15091 if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
+
+15093 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
+
+
+15097 if(!pBlockVectorDefragCtx)
+
+15099 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
+
+15102 &hAllocPool->m_BlockVector,
+
+15104 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
+
+
+
+
+
+15111 const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
+15112 pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
+15113 if(!pBlockVectorDefragCtx)
+
+15115 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
+
+15118 m_hAllocator->m_pBlockVectors[memTypeIndex],
+
+15120 m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
+
+
+
+15124 if(pBlockVectorDefragCtx)
+
+15126 VkBool32*
const pChanged = (pAllocationsChanged != VMA_NULL) ?
+15127 &pAllocationsChanged[allocIndex] : VMA_NULL;
+15128 pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
+
+
+
-15134 VmaRecorder::~VmaRecorder()
-
-15136 if(m_File != VMA_NULL)
-
-
-
-
-
-15142 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
-
-15144 CallParams callParams;
-15145 GetBasicParams(callParams);
-
-15147 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15148 fprintf(m_File,
"%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
-
-
-15152 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
-
-15154 CallParams callParams;
-15155 GetBasicParams(callParams);
-
-15157 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15158 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
-
-
-
-
-15164 CallParams callParams;
-15165 GetBasicParams(callParams);
+15134 VkResult VmaDefragmentationContext_T::Defragment(
+15135 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+15136 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+
+
+
+
+
+
+
+
+
+
+
+15148 m_MaxCpuBytesToMove = maxCpuBytesToMove;
+15149 m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
+
+15151 m_MaxGpuBytesToMove = maxGpuBytesToMove;
+15152 m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
+
+15154 if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
+15155 m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
+
+
+15158 return VK_NOT_READY;
+
+
+15161 if(commandBuffer == VK_NULL_HANDLE)
+
+15163 maxGpuBytesToMove = 0;
+15164 maxGpuAllocationsToMove = 0;
+
-15167 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15168 fprintf(m_File,
"%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-
-
-
-
-
-
-15179 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex,
VmaPool pool)
-
-15181 CallParams callParams;
-15182 GetBasicParams(callParams);
-
-15184 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15185 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15190 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
-15191 const VkMemoryRequirements& vkMemReq,
-
-
-
-15195 CallParams callParams;
-15196 GetBasicParams(callParams);
-
-15198 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15199 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
-15200 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
-15202 vkMemReq.alignment,
-15203 vkMemReq.memoryTypeBits,
-
-
-
-
-
-
-
-15211 userDataStr.GetString());
-
-
-
-15215 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
-15216 const VkMemoryRequirements& vkMemReq,
-
-15218 uint64_t allocationCount,
-
-
-15221 CallParams callParams;
-15222 GetBasicParams(callParams);
-
-15224 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15225 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
-15226 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
-
-15228 vkMemReq.alignment,
-15229 vkMemReq.memoryTypeBits,
-
-
-
-
-
-
-15236 PrintPointerList(allocationCount, pAllocations);
-15237 fprintf(m_File,
",%s\n", userDataStr.GetString());
-
-
-
-15241 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
-15242 const VkMemoryRequirements& vkMemReq,
-15243 bool requiresDedicatedAllocation,
-15244 bool prefersDedicatedAllocation,
-
-
-
-15248 CallParams callParams;
-15249 GetBasicParams(callParams);
-
-15251 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15252 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
-15253 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
-15255 vkMemReq.alignment,
-15256 vkMemReq.memoryTypeBits,
-15257 requiresDedicatedAllocation ? 1 : 0,
-15258 prefersDedicatedAllocation ? 1 : 0,
-
-
-
-
-
-
-
-15266 userDataStr.GetString());
-
-
-
-15270 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
-15271 const VkMemoryRequirements& vkMemReq,
-15272 bool requiresDedicatedAllocation,
-15273 bool prefersDedicatedAllocation,
-
-
-
-15277 CallParams callParams;
-15278 GetBasicParams(callParams);
-
-15280 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15281 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
-15282 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
-15284 vkMemReq.alignment,
-15285 vkMemReq.memoryTypeBits,
-15286 requiresDedicatedAllocation ? 1 : 0,
-15287 prefersDedicatedAllocation ? 1 : 0,
-
-
-
-
-
-
-
-15295 userDataStr.GetString());
-
-
-
-15299 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
-
-
-15302 CallParams callParams;
-15303 GetBasicParams(callParams);
-
-15305 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15306 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15311 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
-15312 uint64_t allocationCount,
-
-
-15315 CallParams callParams;
-15316 GetBasicParams(callParams);
-
-15318 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15319 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
-15320 PrintPointerList(allocationCount, pAllocations);
-15321 fprintf(m_File,
"\n");
-
-
-
-15325 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
-
-15327 const void* pUserData)
-
-15329 CallParams callParams;
-15330 GetBasicParams(callParams);
+15167 VkResult res = VK_SUCCESS;
+
+
+15170 for(uint32_t memTypeIndex = 0;
+15171 memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
+
+
+15174 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+15175 if(pBlockVectorCtx)
+
+15177 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+15178 pBlockVectorCtx->GetBlockVector()->Defragment(
+
+
+15181 maxCpuBytesToMove, maxCpuAllocationsToMove,
+15182 maxGpuBytesToMove, maxGpuAllocationsToMove,
+
+15184 if(pBlockVectorCtx->res != VK_SUCCESS)
+
+15186 res = pBlockVectorCtx->res;
+
+
+
+
+
+15192 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+15193 customCtxIndex < customCtxCount && res >= VK_SUCCESS;
+
+
+15196 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+15197 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+15198 pBlockVectorCtx->GetBlockVector()->Defragment(
+
+
+15201 maxCpuBytesToMove, maxCpuAllocationsToMove,
+15202 maxGpuBytesToMove, maxGpuAllocationsToMove,
+
+15204 if(pBlockVectorCtx->res != VK_SUCCESS)
+
+15206 res = pBlockVectorCtx->res;
+
+
+
+
+
+
+
+
+
+
+
+
+15219 for(uint32_t memTypeIndex = 0;
+15220 memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+
+
+15223 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+15224 if(pBlockVectorCtx)
+
+15226 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
+15228 if(!pBlockVectorCtx->hasDefragmentationPlan)
+
+15230 pBlockVectorCtx->GetBlockVector()->Defragment(
+
+
+15233 m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+15234 m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+
+
+15237 if(pBlockVectorCtx->res < VK_SUCCESS)
+
+
+15240 pBlockVectorCtx->hasDefragmentationPlan =
true;
+
+
+15243 const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+
+15245 pCurrentMove, movesLeft);
+
+15247 movesLeft -= processed;
+15248 pCurrentMove += processed;
+
+
+
+
+15253 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+15254 customCtxIndex < customCtxCount;
+
+
+15257 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+15258 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
+15260 if(!pBlockVectorCtx->hasDefragmentationPlan)
+
+15262 pBlockVectorCtx->GetBlockVector()->Defragment(
+
+
+15265 m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+15266 m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+
+
+15269 if(pBlockVectorCtx->res < VK_SUCCESS)
+
+
+15272 pBlockVectorCtx->hasDefragmentationPlan =
true;
+
+
+15275 const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+
+15277 pCurrentMove, movesLeft);
+
+15279 movesLeft -= processed;
+15280 pCurrentMove += processed;
+
+
+
+
+
+
+15287 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
+
+15289 VkResult res = VK_SUCCESS;
+
+
+15292 for(uint32_t memTypeIndex = 0;
+15293 memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+
+
+15296 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+15297 if(pBlockVectorCtx)
+
+15299 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
+15301 if(!pBlockVectorCtx->hasDefragmentationPlan)
+
+15303 res = VK_NOT_READY;
+
+
+
+15307 pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+15308 pBlockVectorCtx, m_pStats);
+
+15310 if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+15311 res = VK_NOT_READY;
+
+
+
+
+15316 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+15317 customCtxIndex < customCtxCount;
+
+
+15320 VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+15321 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
+15323 if(!pBlockVectorCtx->hasDefragmentationPlan)
+
+15325 res = VK_NOT_READY;
+
+
+
+15329 pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+15330 pBlockVectorCtx, m_pStats);
-15332 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15333 UserDataString userDataStr(
-
-
-15336 fprintf(m_File,
"%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
-15338 userDataStr.GetString());
-
-
+15332 if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+15333 res = VK_NOT_READY;
+
+
+
+
+
+
-15342 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
-
-
-15345 CallParams callParams;
-15346 GetBasicParams(callParams);
-
-15348 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15349 fprintf(m_File,
"%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15354 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
-
-
-15357 CallParams callParams;
-15358 GetBasicParams(callParams);
-
-15360 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15361 fprintf(m_File,
"%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15366 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
-
-
-15369 CallParams callParams;
-15370 GetBasicParams(callParams);
-
-15372 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15373 fprintf(m_File,
"%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15378 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
-15379 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
-15381 CallParams callParams;
-15382 GetBasicParams(callParams);
-
-15384 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15385 fprintf(m_File,
"%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-
-
-15392 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
-15393 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
-15395 CallParams callParams;
-15396 GetBasicParams(callParams);
-
-15398 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15399 fprintf(m_File,
"%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-
-
-15406 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
-15407 const VkBufferCreateInfo& bufCreateInfo,
-
-
-
-15411 CallParams callParams;
-15412 GetBasicParams(callParams);
-
-15414 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15415 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
-15416 fprintf(m_File,
"%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-15417 bufCreateInfo.flags,
-15418 bufCreateInfo.size,
-15419 bufCreateInfo.usage,
-15420 bufCreateInfo.sharingMode,
-15421 allocCreateInfo.
flags,
-15422 allocCreateInfo.
usage,
-
-
-
-15426 allocCreateInfo.
pool,
-
-15428 userDataStr.GetString());
-
-
+15342 #if VMA_RECORDING_ENABLED
+
+15344 VmaRecorder::VmaRecorder() :
+
+
+
+15348 m_RecordingStartTime(std::chrono::high_resolution_clock::now())
+
+
+
+
+
+15354 m_UseMutex = useMutex;
+15355 m_Flags = settings.
flags;
+
+15357 #if defined(_WIN32)
+
+15359 errno_t err = fopen_s(&m_File, settings.
pFilePath,
"wb");
+
+
+
+15363 return VK_ERROR_INITIALIZATION_FAILED;
+
+
+
+15367 m_File = fopen(settings.
pFilePath,
"wb");
+
+
+
+15371 return VK_ERROR_INITIALIZATION_FAILED;
+
+
+
+
+15376 fprintf(m_File,
"%s\n",
"Vulkan Memory Allocator,Calls recording");
+15377 fprintf(m_File,
"%s\n",
"1,8");
+
+
+
+
+15382 VmaRecorder::~VmaRecorder()
+
+15384 if(m_File != VMA_NULL)
+
+
+
+
+
+15390 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
+
+15392 CallParams callParams;
+15393 GetBasicParams(callParams);
+
+15395 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15396 fprintf(m_File,
"%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
+
+
+15400 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
+
+15402 CallParams callParams;
+15403 GetBasicParams(callParams);
+
+15405 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15406 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
+
+
+
+
+15412 CallParams callParams;
+15413 GetBasicParams(callParams);
+
+15415 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15416 fprintf(m_File,
"%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+
+
+
+
+
+
+15427 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex,
VmaPool pool)
+
+15429 CallParams callParams;
+15430 GetBasicParams(callParams);
-15432 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
-15433 const VkImageCreateInfo& imageCreateInfo,
-
-
-
-15437 CallParams callParams;
-15438 GetBasicParams(callParams);
-
-15440 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15441 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
-15442 fprintf(m_File,
"%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-15443 imageCreateInfo.flags,
-15444 imageCreateInfo.imageType,
-15445 imageCreateInfo.format,
-15446 imageCreateInfo.extent.width,
-15447 imageCreateInfo.extent.height,
-15448 imageCreateInfo.extent.depth,
-15449 imageCreateInfo.mipLevels,
-15450 imageCreateInfo.arrayLayers,
-15451 imageCreateInfo.samples,
-15452 imageCreateInfo.tiling,
-15453 imageCreateInfo.usage,
-15454 imageCreateInfo.sharingMode,
-15455 imageCreateInfo.initialLayout,
-15456 allocCreateInfo.
flags,
-15457 allocCreateInfo.
usage,
-
-
-
-15461 allocCreateInfo.
pool,
-
-15463 userDataStr.GetString());
-
-
-
-15467 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
-
-
-15470 CallParams callParams;
-15471 GetBasicParams(callParams);
-
-15473 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15474 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15479 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
-
-
-15482 CallParams callParams;
-15483 GetBasicParams(callParams);
-
-15485 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15486 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15491 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
-
-
-15494 CallParams callParams;
-15495 GetBasicParams(callParams);
-
-15497 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15498 fprintf(m_File,
"%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15503 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
-
-
-15506 CallParams callParams;
-15507 GetBasicParams(callParams);
-
-15509 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15510 fprintf(m_File,
"%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15515 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
-
-
-15518 CallParams callParams;
-15519 GetBasicParams(callParams);
-
-15521 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15522 fprintf(m_File,
"%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15527 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
-
-
-
-15531 CallParams callParams;
-15532 GetBasicParams(callParams);
-
-15534 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15535 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
-
-
-15538 fprintf(m_File,
",");
-
-15540 fprintf(m_File,
",%llu,%u,%llu,%u,%p,%p\n",
-
-
-
-
-
-
-
-
-
-15550 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
-
-
-15553 CallParams callParams;
-15554 GetBasicParams(callParams);
-
-15556 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15557 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
-
-
-
-
-15562 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
-
-
-
-15566 CallParams callParams;
-15567 GetBasicParams(callParams);
-
-15569 VmaMutexLock lock(m_FileMutex, m_UseMutex);
-15570 fprintf(m_File,
"%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-15571 pool, name != VMA_NULL ? name :
"");
-
-
-
-
+15432 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15433 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15438 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
+15439 const VkMemoryRequirements& vkMemReq,
+
+
+
+15443 CallParams callParams;
+15444 GetBasicParams(callParams);
+
+15446 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15447 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
+15448 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
+15450 vkMemReq.alignment,
+15451 vkMemReq.memoryTypeBits,
+
+
+
+
+
+
+
+15459 userDataStr.GetString());
+
+
+
+15463 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
+15464 const VkMemoryRequirements& vkMemReq,
+
+15466 uint64_t allocationCount,
+
+
+15469 CallParams callParams;
+15470 GetBasicParams(callParams);
+
+15472 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15473 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
+15474 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
+
+15476 vkMemReq.alignment,
+15477 vkMemReq.memoryTypeBits,
+
+
+
+
+
+
+15484 PrintPointerList(allocationCount, pAllocations);
+15485 fprintf(m_File,
",%s\n", userDataStr.GetString());
+
+
+
+15489 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+15490 const VkMemoryRequirements& vkMemReq,
+15491 bool requiresDedicatedAllocation,
+15492 bool prefersDedicatedAllocation,
+
+
+
+15496 CallParams callParams;
+15497 GetBasicParams(callParams);
+
+15499 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15500 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
+15501 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
+15503 vkMemReq.alignment,
+15504 vkMemReq.memoryTypeBits,
+15505 requiresDedicatedAllocation ? 1 : 0,
+15506 prefersDedicatedAllocation ? 1 : 0,
+
+
+
+
+
+
+
+15514 userDataStr.GetString());
+
+
+
+15518 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
+15519 const VkMemoryRequirements& vkMemReq,
+15520 bool requiresDedicatedAllocation,
+15521 bool prefersDedicatedAllocation,
+
+
+
+15525 CallParams callParams;
+15526 GetBasicParams(callParams);
+
+15528 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15529 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
+15530 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
+15532 vkMemReq.alignment,
+15533 vkMemReq.memoryTypeBits,
+15534 requiresDedicatedAllocation ? 1 : 0,
+15535 prefersDedicatedAllocation ? 1 : 0,
+
+
+
+
+
+
+
+15543 userDataStr.GetString());
+
+
+
+15547 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
+
+
+15550 CallParams callParams;
+15551 GetBasicParams(callParams);
+
+15553 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15554 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15559 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
+15560 uint64_t allocationCount,
+
+
+15563 CallParams callParams;
+15564 GetBasicParams(callParams);
+
+15566 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15567 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
+15568 PrintPointerList(allocationCount, pAllocations);
+15569 fprintf(m_File,
"\n");
+
+
+
+15573 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
+
+15575 const void* pUserData)
-15577 if(pUserData != VMA_NULL)
-
-
-
-15581 m_Str = (
const char*)pUserData;
-
-
-
-
-15586 snprintf(m_PtrStr, 17,
"%p", pUserData);
-
-
-
-
-
-
-
-
+15577 CallParams callParams;
+15578 GetBasicParams(callParams);
+
+15580 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15581 UserDataString userDataStr(
+
+
+15584 fprintf(m_File,
"%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
+15586 userDataStr.GetString());
+
+
+
+15590 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
+
+
+15593 CallParams callParams;
+15594 GetBasicParams(callParams);
-15596 void VmaRecorder::WriteConfiguration(
-15597 const VkPhysicalDeviceProperties& devProps,
-15598 const VkPhysicalDeviceMemoryProperties& memProps,
-15599 uint32_t vulkanApiVersion,
-15600 bool dedicatedAllocationExtensionEnabled,
-15601 bool bindMemory2ExtensionEnabled,
-15602 bool memoryBudgetExtensionEnabled,
-15603 bool deviceCoherentMemoryExtensionEnabled)
+15596 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15597 fprintf(m_File,
"%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15602 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
+
-15605 fprintf(m_File,
"Config,Begin\n");
-
-15607 fprintf(m_File,
"VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
-
-15609 fprintf(m_File,
"PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
-15610 fprintf(m_File,
"PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
-15611 fprintf(m_File,
"PhysicalDevice,vendorID,%u\n", devProps.vendorID);
-15612 fprintf(m_File,
"PhysicalDevice,deviceID,%u\n", devProps.deviceID);
-15613 fprintf(m_File,
"PhysicalDevice,deviceType,%u\n", devProps.deviceType);
-15614 fprintf(m_File,
"PhysicalDevice,deviceName,%s\n", devProps.deviceName);
-
-15616 fprintf(m_File,
"PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
-15617 fprintf(m_File,
"PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
-15618 fprintf(m_File,
"PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
+15605 CallParams callParams;
+15606 GetBasicParams(callParams);
+
+15608 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15609 fprintf(m_File,
"%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15614 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
+
+
+15617 CallParams callParams;
+15618 GetBasicParams(callParams);
-15620 fprintf(m_File,
"PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
-15621 for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
-
-15623 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
-15624 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
-
-15626 fprintf(m_File,
"PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
-15627 for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
-
-15629 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
-15630 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
-
-
-15633 fprintf(m_File,
"Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
-15634 fprintf(m_File,
"Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
-15635 fprintf(m_File,
"Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
-15636 fprintf(m_File,
"Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
-
-15638 fprintf(m_File,
"Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
-15639 fprintf(m_File,
"Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
-15640 fprintf(m_File,
"Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
-15641 fprintf(m_File,
"Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
-15642 fprintf(m_File,
"Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
-15643 fprintf(m_File,
"Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
-15644 fprintf(m_File,
"Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
-15645 fprintf(m_File,
"Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
-15646 fprintf(m_File,
"Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-
-15648 fprintf(m_File,
"Config,End\n");
-
-
-15651 void VmaRecorder::GetBasicParams(CallParams& outParams)
-
-15653 #if defined(_WIN32)
-15654 outParams.threadId = GetCurrentThreadId();
-
-
-
-
-15659 std::thread::id thread_id = std::this_thread::get_id();
-15660 std::stringstream thread_id_to_string_converter;
-15661 thread_id_to_string_converter << thread_id;
-15662 std::string thread_id_as_string = thread_id_to_string_converter.str();
-15663 outParams.threadId =
static_cast<uint32_t
>(std::stoi(thread_id_as_string.c_str()));
-
-
-15666 auto current_time = std::chrono::high_resolution_clock::now();
-
-15668 outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
-
-
-15671 void VmaRecorder::PrintPointerList(uint64_t count,
const VmaAllocation* pItems)
-
-
-
-15675 fprintf(m_File,
"%p", pItems[0]);
-15676 for(uint64_t i = 1; i < count; ++i)
-
-15678 fprintf(m_File,
" %p", pItems[i]);
-
-
-
-
-15683 void VmaRecorder::Flush()
+15620 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15621 fprintf(m_File,
"%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15626 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
+15627 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
+15629 CallParams callParams;
+15630 GetBasicParams(callParams);
+
+15632 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15633 fprintf(m_File,
"%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+
+
+15640 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
+15641 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
+15643 CallParams callParams;
+15644 GetBasicParams(callParams);
+
+15646 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15647 fprintf(m_File,
"%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+
+
+15654 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
+15655 const VkBufferCreateInfo& bufCreateInfo,
+
+
+
+15659 CallParams callParams;
+15660 GetBasicParams(callParams);
+
+15662 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15663 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
+15664 fprintf(m_File,
"%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+15665 bufCreateInfo.flags,
+15666 bufCreateInfo.size,
+15667 bufCreateInfo.usage,
+15668 bufCreateInfo.sharingMode,
+15669 allocCreateInfo.
flags,
+15670 allocCreateInfo.
usage,
+
+
+
+15674 allocCreateInfo.
pool,
+
+15676 userDataStr.GetString());
+
+
+
+15680 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
+15681 const VkImageCreateInfo& imageCreateInfo,
+
+
-
-
-
-
-
-
-
-
-
-
-15696 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(
const VkAllocationCallbacks* pAllocationCallbacks) :
-15697 m_Allocator(pAllocationCallbacks, 1024)
-
-
-
-15701 template<
typename... Types>
VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
-
-15703 VmaMutexLock mutexLock(m_Mutex);
-15704 return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
-
-
-15707 void VmaAllocationObjectAllocator::Free(
VmaAllocation hAlloc)
-
-15709 VmaMutexLock mutexLock(m_Mutex);
-15710 m_Allocator.Free(hAlloc);
-
-
-
-
-
-
-15718 m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
-
-
-
-
-
-
-15725 m_hDevice(pCreateInfo->device),
-15726 m_hInstance(pCreateInfo->instance),
-15727 m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
-15728 m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
-15729 *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
-15730 m_AllocationObjectAllocator(&m_AllocationCallbacks),
-15731 m_HeapSizeLimitMask(0),
-15732 m_PreferredLargeHeapBlockSize(0),
-15733 m_PhysicalDevice(pCreateInfo->physicalDevice),
-15734 m_CurrentFrameIndex(0),
-15735 m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
-15736 m_Pools(VmaStlAllocator<
VmaPool>(GetAllocationCallbacks())),
-
-15738 m_GlobalMemoryTypeBits(UINT32_MAX)
-
-15740 ,m_pRecorder(VMA_NULL)
-
-
-15743 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-15745 m_UseKhrDedicatedAllocation =
false;
-15746 m_UseKhrBindMemory2 =
false;
-
-
-15749 if(VMA_DEBUG_DETECT_CORRUPTION)
-
-
-15752 VMA_ASSERT(VMA_DEBUG_MARGIN %
sizeof(uint32_t) == 0);
-
-
-
+15685 CallParams callParams;
+15686 GetBasicParams(callParams);
+
+15688 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15689 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
+15690 fprintf(m_File,
"%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+15691 imageCreateInfo.flags,
+15692 imageCreateInfo.imageType,
+15693 imageCreateInfo.format,
+15694 imageCreateInfo.extent.width,
+15695 imageCreateInfo.extent.height,
+15696 imageCreateInfo.extent.depth,
+15697 imageCreateInfo.mipLevels,
+15698 imageCreateInfo.arrayLayers,
+15699 imageCreateInfo.samples,
+15700 imageCreateInfo.tiling,
+15701 imageCreateInfo.usage,
+15702 imageCreateInfo.sharingMode,
+15703 imageCreateInfo.initialLayout,
+15704 allocCreateInfo.
flags,
+15705 allocCreateInfo.
usage,
+
+
+
+15709 allocCreateInfo.
pool,
+
+15711 userDataStr.GetString());
+
+
+
+15715 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
+
+
+15718 CallParams callParams;
+15719 GetBasicParams(callParams);
+
+15721 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15722 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15727 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
+
+
+15730 CallParams callParams;
+15731 GetBasicParams(callParams);
+
+15733 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15734 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15739 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
+
+
+15742 CallParams callParams;
+15743 GetBasicParams(callParams);
+
+15745 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15746 fprintf(m_File,
"%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15751 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
+
+
+15754 CallParams callParams;
+15755 GetBasicParams(callParams);
-15757 if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-
-15759 #if !(VMA_DEDICATED_ALLOCATION)
-
-
-15762 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
-
-
-15765 #if !(VMA_BIND_MEMORY2)
-
-
-15768 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
-
-
-
-15772 #if !(VMA_MEMORY_BUDGET)
-
-
-15775 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
-
-
-15778 #if !(VMA_BUFFER_DEVICE_ADDRESS)
-15779 if(m_UseKhrBufferDeviceAddress)
-
-15781 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-
-
-15784 #if VMA_VULKAN_VERSION < 1002000
-15785 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
-
-15787 VMA_ASSERT(0 &&
"vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
-
-
-15790 #if VMA_VULKAN_VERSION < 1001000
-15791 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-15793 VMA_ASSERT(0 &&
"vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
-
-
-15796 #if !(VMA_MEMORY_PRIORITY)
-15797 if(m_UseExtMemoryPriority)
-
-15799 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-
-
-
-15803 memset(&m_DeviceMemoryCallbacks, 0 ,
sizeof(m_DeviceMemoryCallbacks));
-15804 memset(&m_PhysicalDeviceProperties, 0,
sizeof(m_PhysicalDeviceProperties));
-15805 memset(&m_MemProps, 0,
sizeof(m_MemProps));
-
-15807 memset(&m_pBlockVectors, 0,
sizeof(m_pBlockVectors));
-15808 memset(&m_pDedicatedAllocations, 0,
sizeof(m_pDedicatedAllocations));
-15809 memset(&m_VulkanFunctions, 0,
sizeof(m_VulkanFunctions));
-
-
-
-
-
-
-
-
-
-
-15820 (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
-15821 (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+15757 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15758 fprintf(m_File,
"%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15763 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
+
+
+15766 CallParams callParams;
+15767 GetBasicParams(callParams);
+
+15769 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15770 fprintf(m_File,
"%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15775 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
+
+
+
+15779 CallParams callParams;
+15780 GetBasicParams(callParams);
+
+15782 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15783 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
+
+
+15786 fprintf(m_File,
",");
+
+15788 fprintf(m_File,
",%llu,%u,%llu,%u,%p,%p\n",
+
+
+
+
+
+
+
+
+
+15798 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
+
+
+15801 CallParams callParams;
+15802 GetBasicParams(callParams);
+
+15804 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15805 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
+
+
+
+
+15810 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
+
+
+
+15814 CallParams callParams;
+15815 GetBasicParams(callParams);
+
+15817 VmaMutexLock lock(m_FileMutex, m_UseMutex);
+15818 fprintf(m_File,
"%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+15819 pool, name != VMA_NULL ? name :
"");
+
+
-15823 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
-15824 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
-15825 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
-15826 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
-
-
-
-
-15831 m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
-
-
-
-15835 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
-15837 const VkDeviceSize limit = pCreateInfo->
pHeapSizeLimit[heapIndex];
-15838 if(limit != VK_WHOLE_SIZE)
-
-15840 m_HeapSizeLimitMask |= 1u << heapIndex;
-15841 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
-
-15843 m_MemProps.memoryHeaps[heapIndex].size = limit;
-
-
-
-
-
-15849 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-15851 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
-
-15853 m_pBlockVectors[memTypeIndex] = vma_new(
this, VmaBlockVector)(
-
-
-
-15857 preferredBlockSize,
-
-
-15860 GetBufferImageGranularity(),
-
-
-
-
-
-
-15867 m_pDedicatedAllocations[memTypeIndex] = vma_new(
this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
-
-
-
-
-
-
-15874 VkResult res = VK_SUCCESS;
-
-
-
-
-15879 #if VMA_RECORDING_ENABLED
-15880 m_pRecorder = vma_new(
this, VmaRecorder)();
-
-15882 if(res != VK_SUCCESS)
-
-
-
-15886 m_pRecorder->WriteConfiguration(
-15887 m_PhysicalDeviceProperties,
-
-15889 m_VulkanApiVersion,
-15890 m_UseKhrDedicatedAllocation,
-15891 m_UseKhrBindMemory2,
-15892 m_UseExtMemoryBudget,
-15893 m_UseAmdDeviceCoherentMemory);
-15894 m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
-
-15896 VMA_ASSERT(0 &&
"VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
-15897 return VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-
-15901 #if VMA_MEMORY_BUDGET
-15902 if(m_UseExtMemoryBudget)
-
-15904 UpdateVulkanBudget();
-
-
-
-
-
-
-15911 VmaAllocator_T::~VmaAllocator_T()
-
-15913 #if VMA_RECORDING_ENABLED
-15914 if(m_pRecorder != VMA_NULL)
-
-15916 m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
-15917 vma_delete(
this, m_pRecorder);
-
-
-
-15921 VMA_ASSERT(m_Pools.empty());
-
-15923 for(
size_t i = GetMemoryTypeCount(); i--; )
-
-15925 if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
-
-15927 VMA_ASSERT(0 &&
"Unfreed dedicated allocations found.");
-
-
-15930 vma_delete(
this, m_pDedicatedAllocations[i]);
-15931 vma_delete(
this, m_pBlockVectors[i]);
-
-
-
-15935 void VmaAllocator_T::ImportVulkanFunctions(
const VmaVulkanFunctions* pVulkanFunctions)
-
-15937 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-15938 ImportVulkanFunctions_Static();
-
+
+
+15825 if(pUserData != VMA_NULL)
+
+
+
+15829 m_Str = (
const char*)pUserData;
+
+
+
+
+15834 snprintf(m_PtrStr, 17,
"%p", pUserData);
+
+
+
+
+
+
+
+
+
+15844 void VmaRecorder::WriteConfiguration(
+15845 const VkPhysicalDeviceProperties& devProps,
+15846 const VkPhysicalDeviceMemoryProperties& memProps,
+15847 uint32_t vulkanApiVersion,
+15848 bool dedicatedAllocationExtensionEnabled,
+15849 bool bindMemory2ExtensionEnabled,
+15850 bool memoryBudgetExtensionEnabled,
+15851 bool deviceCoherentMemoryExtensionEnabled)
+
+15853 fprintf(m_File,
"Config,Begin\n");
+
+15855 fprintf(m_File,
"VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
+
+15857 fprintf(m_File,
"PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
+15858 fprintf(m_File,
"PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
+15859 fprintf(m_File,
"PhysicalDevice,vendorID,%u\n", devProps.vendorID);
+15860 fprintf(m_File,
"PhysicalDevice,deviceID,%u\n", devProps.deviceID);
+15861 fprintf(m_File,
"PhysicalDevice,deviceType,%u\n", devProps.deviceType);
+15862 fprintf(m_File,
"PhysicalDevice,deviceName,%s\n", devProps.deviceName);
+
+15864 fprintf(m_File,
"PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
+15865 fprintf(m_File,
"PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
+15866 fprintf(m_File,
"PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
+
+15868 fprintf(m_File,
"PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
+15869 for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
+
+15871 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
+15872 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
+
+15874 fprintf(m_File,
"PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
+15875 for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
+
+15877 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
+15878 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
+
+
+15881 fprintf(m_File,
"Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
+15882 fprintf(m_File,
"Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
+15883 fprintf(m_File,
"Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
+15884 fprintf(m_File,
"Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
+
+15886 fprintf(m_File,
"Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
+15887 fprintf(m_File,
"Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
+15888 fprintf(m_File,
"Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
+15889 fprintf(m_File,
"Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
+15890 fprintf(m_File,
"Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
+15891 fprintf(m_File,
"Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
+15892 fprintf(m_File,
"Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
+15893 fprintf(m_File,
"Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
+15894 fprintf(m_File,
"Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
+15896 fprintf(m_File,
"Config,End\n");
+
+
+15899 void VmaRecorder::GetBasicParams(CallParams& outParams)
+
+15901 #if defined(_WIN32)
+15902 outParams.threadId = GetCurrentThreadId();
+
+
+
+
+15907 std::thread::id thread_id = std::this_thread::get_id();
+15908 std::stringstream thread_id_to_string_converter;
+15909 thread_id_to_string_converter << thread_id;
+15910 std::string thread_id_as_string = thread_id_to_string_converter.str();
+15911 outParams.threadId =
static_cast<uint32_t
>(std::stoi(thread_id_as_string.c_str()));
+
+
+15914 auto current_time = std::chrono::high_resolution_clock::now();
+
+15916 outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
+
+
+15919 void VmaRecorder::PrintPointerList(uint64_t count,
const VmaAllocation* pItems)
+
+
+
+15923 fprintf(m_File,
"%p", pItems[0]);
+15924 for(uint64_t i = 1; i < count; ++i)
+
+15926 fprintf(m_File,
" %p", pItems[i]);
+
+
+
+
+15931 void VmaRecorder::Flush()
+
+
+
+
+
+
+
+
-15941 if(pVulkanFunctions != VMA_NULL)
-
-15943 ImportVulkanFunctions_Custom(pVulkanFunctions);
-
-
-15946 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-15947 ImportVulkanFunctions_Dynamic();
-
-
-15950 ValidateVulkanFunctions();
-
-
-15953 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
+
+15944 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(
const VkAllocationCallbacks* pAllocationCallbacks) :
+15945 m_Allocator(pAllocationCallbacks, 1024)
+
+
+
+15949 template<
typename... Types>
VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
+
+15951 VmaMutexLock mutexLock(m_Mutex);
+15952 return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
+
-15955 void VmaAllocator_T::ImportVulkanFunctions_Static()
+15955 void VmaAllocationObjectAllocator::Free(
VmaAllocation hAlloc)
-
-15958 m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
-15959 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
-15960 m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-15961 m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
-15962 m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
-15963 m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
-15964 m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
-15965 m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
-15966 m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
-15967 m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
-15968 m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
-15969 m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
-15970 m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
-15971 m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
-15972 m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
-15973 m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
-15974 m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
-
-
-15977 #if VMA_VULKAN_VERSION >= 1001000
-15978 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-15980 m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
-15981 m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
-15982 m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
-15983 m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
-15984 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
-
-
-
-
-
-
-15991 void VmaAllocator_T::ImportVulkanFunctions_Custom(
const VmaVulkanFunctions* pVulkanFunctions)
-
-15993 VMA_ASSERT(pVulkanFunctions != VMA_NULL);
-
-15995 #define VMA_COPY_IF_NOT_NULL(funcName) \
-15996 if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
-
-15998 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
-15999 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
-16000 VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
-16001 VMA_COPY_IF_NOT_NULL(vkFreeMemory);
-16002 VMA_COPY_IF_NOT_NULL(vkMapMemory);
-16003 VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
-16004 VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
-16005 VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
-16006 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
-16007 VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
-16008 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
-16009 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
-16010 VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
-16011 VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
-16012 VMA_COPY_IF_NOT_NULL(vkCreateImage);
-16013 VMA_COPY_IF_NOT_NULL(vkDestroyImage);
-16014 VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
-
-16016 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-16017 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
-16018 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
-
-
-16021 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-16022 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
-16023 VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
-
-
-16026 #if VMA_MEMORY_BUDGET
-16027 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
-
-
-16030 #undef VMA_COPY_IF_NOT_NULL
-
-
-16033 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-
-16035 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
-
-16037 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
-16038 if(m_VulkanFunctions.memberName == VMA_NULL) \
-16039 m_VulkanFunctions.memberName = \
-16040 (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
-16041 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
-16042 if(m_VulkanFunctions.memberName == VMA_NULL) \
-16043 m_VulkanFunctions.memberName = \
-16044 (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
-
-16046 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties,
"vkGetPhysicalDeviceProperties");
-16047 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties,
"vkGetPhysicalDeviceMemoryProperties");
-16048 VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory,
"vkAllocateMemory");
-16049 VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory,
"vkFreeMemory");
-16050 VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory,
"vkMapMemory");
-16051 VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory,
"vkUnmapMemory");
-16052 VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges,
"vkFlushMappedMemoryRanges");
-16053 VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges,
"vkInvalidateMappedMemoryRanges");
-16054 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory,
"vkBindBufferMemory");
-16055 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory,
"vkBindImageMemory");
-16056 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements,
"vkGetBufferMemoryRequirements");
-16057 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements,
"vkGetImageMemoryRequirements");
-16058 VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer,
"vkCreateBuffer");
-16059 VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer,
"vkDestroyBuffer");
-16060 VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage,
"vkCreateImage");
-16061 VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage,
"vkDestroyImage");
-16062 VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer,
"vkCmdCopyBuffer");
-
-16064 #if VMA_VULKAN_VERSION >= 1001000
-16065 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-16067 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2,
"vkGetBufferMemoryRequirements2");
-16068 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2,
"vkGetImageMemoryRequirements2");
-16069 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2,
"vkBindBufferMemory2");
-16070 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2,
"vkBindImageMemory2");
-16071 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2,
"vkGetPhysicalDeviceMemoryProperties2");
-
-
+15957 VmaMutexLock mutexLock(m_Mutex);
+15958 m_Allocator.Free(hAlloc);
+
+
+
+
+
+
+15966 m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
+
+
+
+
+
+
+15973 m_hDevice(pCreateInfo->device),
+15974 m_hInstance(pCreateInfo->instance),
+15975 m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
+15976 m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
+15977 *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
+15978 m_AllocationObjectAllocator(&m_AllocationCallbacks),
+15979 m_HeapSizeLimitMask(0),
+15980 m_DeviceMemoryCount(0),
+15981 m_PreferredLargeHeapBlockSize(0),
+15982 m_PhysicalDevice(pCreateInfo->physicalDevice),
+15983 m_CurrentFrameIndex(0),
+15984 m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
+
+15986 m_GlobalMemoryTypeBits(UINT32_MAX)
+
+15988 ,m_pRecorder(VMA_NULL)
+
+
+15991 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+15993 m_UseKhrDedicatedAllocation =
false;
+15994 m_UseKhrBindMemory2 =
false;
+
+
+15997 if(VMA_DEBUG_DETECT_CORRUPTION)
+
+
+16000 VMA_ASSERT(VMA_DEBUG_MARGIN %
sizeof(uint32_t) == 0);
+
+
+
+
+16005 if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+
+16007 #if !(VMA_DEDICATED_ALLOCATION)
+
+
+16010 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
+
+
+16013 #if !(VMA_BIND_MEMORY2)
+
+
+16016 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
+
+
+
+16020 #if !(VMA_MEMORY_BUDGET)
+
+
+16023 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
+
+
+16026 #if !(VMA_BUFFER_DEVICE_ADDRESS)
+16027 if(m_UseKhrBufferDeviceAddress)
+
+16029 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+
+
+16032 #if VMA_VULKAN_VERSION < 1002000
+16033 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
+
+16035 VMA_ASSERT(0 &&
"vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
+
+
+16038 #if VMA_VULKAN_VERSION < 1001000
+16039 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16041 VMA_ASSERT(0 &&
"vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
+
+
+16044 #if !(VMA_MEMORY_PRIORITY)
+16045 if(m_UseExtMemoryPriority)
+
+16047 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+
+
+
+16051 memset(&m_DeviceMemoryCallbacks, 0 ,
sizeof(m_DeviceMemoryCallbacks));
+16052 memset(&m_PhysicalDeviceProperties, 0,
sizeof(m_PhysicalDeviceProperties));
+16053 memset(&m_MemProps, 0,
sizeof(m_MemProps));
+
+16055 memset(&m_pBlockVectors, 0,
sizeof(m_pBlockVectors));
+16056 memset(&m_VulkanFunctions, 0,
sizeof(m_VulkanFunctions));
+
+
+
+
+
+
+
+
+
+
+16067 (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
+16068 (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+
+16070 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
+16071 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
+16072 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
+16073 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
-16075 #if VMA_DEDICATED_ALLOCATION
-16076 if(m_UseKhrDedicatedAllocation)
-
-16078 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR,
"vkGetBufferMemoryRequirements2KHR");
-16079 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR,
"vkGetImageMemoryRequirements2KHR");
-
-
-
-16083 #if VMA_BIND_MEMORY2
-16084 if(m_UseKhrBindMemory2)
-
-16086 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR,
"vkBindBufferMemory2KHR");
-16087 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR,
"vkBindImageMemory2KHR");
-
-
-
-16091 #if VMA_MEMORY_BUDGET
-16092 if(m_UseExtMemoryBudget)
-
-16094 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR,
"vkGetPhysicalDeviceMemoryProperties2KHR");
-
-
-
-16098 #undef VMA_FETCH_DEVICE_FUNC
-16099 #undef VMA_FETCH_INSTANCE_FUNC
-
-
-
-
-16104 void VmaAllocator_T::ValidateVulkanFunctions()
-
-16106 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
-16107 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
-16108 VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
-16109 VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
-16110 VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
-16111 VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
-16112 VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
-16113 VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
-16114 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
-16115 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
-16116 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
-16117 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
-16118 VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
-16119 VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
-16120 VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
-16121 VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
-16122 VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
-
-16124 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-16125 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
-
-16127 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
-16128 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
-
-
-
-16132 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-16133 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
-
-16135 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
-16136 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
-
-
-
-16140 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-16141 if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-16143 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+
+
+
+16078 m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
+
+
+
+16082 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
+16084 const VkDeviceSize limit = pCreateInfo->
pHeapSizeLimit[heapIndex];
+16085 if(limit != VK_WHOLE_SIZE)
+
+16087 m_HeapSizeLimitMask |= 1u << heapIndex;
+16088 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
+
+16090 m_MemProps.memoryHeaps[heapIndex].size = limit;
+
+
+
+
+
+16096 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+16098 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+
+16100 m_pBlockVectors[memTypeIndex] = vma_new(
this, VmaBlockVector)(
+
+
+
+16104 preferredBlockSize,
+
+
+16107 GetBufferImageGranularity(),
+
+
+
+
+
+
+
+
+
+
+
+16119 VkResult res = VK_SUCCESS;
+
+
+
+
+16124 #if VMA_RECORDING_ENABLED
+16125 m_pRecorder = vma_new(
this, VmaRecorder)();
+
+16127 if(res != VK_SUCCESS)
+
+
+
+16131 m_pRecorder->WriteConfiguration(
+16132 m_PhysicalDeviceProperties,
+
+16134 m_VulkanApiVersion,
+16135 m_UseKhrDedicatedAllocation,
+16136 m_UseKhrBindMemory2,
+16137 m_UseExtMemoryBudget,
+16138 m_UseAmdDeviceCoherentMemory);
+16139 m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
+
+16141 VMA_ASSERT(0 &&
"VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
+16142 return VK_ERROR_FEATURE_NOT_PRESENT;
+
-
-
-
-16148 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
-
-16150 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-16151 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-16152 const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
-16153 return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
+
+16146 #if VMA_MEMORY_BUDGET
+16147 if(m_UseExtMemoryBudget)
+
+16149 UpdateVulkanBudget();
+
+
+
+
-16156 VkResult VmaAllocator_T::AllocateMemoryOfType(
-
-16158 VkDeviceSize alignment,
-16159 bool dedicatedAllocation,
-16160 VkBuffer dedicatedBuffer,
-16161 VkBufferUsageFlags dedicatedBufferUsage,
-16162 VkImage dedicatedImage,
-
-16164 uint32_t memTypeIndex,
-16165 VmaSuballocationType suballocType,
-16166 size_t allocationCount,
-
-
-16169 VMA_ASSERT(pAllocations != VMA_NULL);
-16170 VMA_DEBUG_LOG(
" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
-
-
-
-
-
-16176 (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
-
-
-
-
-
-
-
-
-16186 VmaBlockVector*
const blockVector = m_pBlockVectors[memTypeIndex];
-16187 VMA_ASSERT(blockVector);
-
-16189 const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
-16190 bool preferDedicatedMemory =
-16191 VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
-16192 dedicatedAllocation ||
-
-16194 size > preferredBlockSize / 2;
-
-16196 if(preferDedicatedMemory &&
-
-16198 finalCreateInfo.
pool == VK_NULL_HANDLE)
-
-
-
-
-
-
-
-
-16207 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-16211 return AllocateDedicatedMemory(
-
-
-
-
-
-
-
-
-
-16221 dedicatedBufferUsage,
-
-
-
-
-
-
-
-16229 VkResult res = blockVector->Allocate(
-16230 m_CurrentFrameIndex.load(),
-
-
-
-
-
-
-16237 if(res == VK_SUCCESS)
-
-
-
+16156 VmaAllocator_T::~VmaAllocator_T()
+
+16158 #if VMA_RECORDING_ENABLED
+16159 if(m_pRecorder != VMA_NULL)
+
+16161 m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
+16162 vma_delete(
this, m_pRecorder);
+
+
+
+16166 VMA_ASSERT(m_Pools.IsEmpty());
+
+16168 for(
size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
+
+16170 if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
+
+16172 VMA_ASSERT(0 &&
"Unfreed dedicated allocations found.");
+
+
+16175 vma_delete(
this, m_pBlockVectors[memTypeIndex]);
+
+
+
+16179 void VmaAllocator_T::ImportVulkanFunctions(
const VmaVulkanFunctions* pVulkanFunctions)
+
+16181 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+16182 ImportVulkanFunctions_Static();
+
+
+16185 if(pVulkanFunctions != VMA_NULL)
+
+16187 ImportVulkanFunctions_Custom(pVulkanFunctions);
+
+
+16190 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+16191 ImportVulkanFunctions_Dynamic();
+
+
+16194 ValidateVulkanFunctions();
+
+
+16197 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
+16199 void VmaAllocator_T::ImportVulkanFunctions_Static()
+
+
+16202 m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
+16203 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
+16204 m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+16205 m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
+16206 m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
+16207 m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
+16208 m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
+16209 m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
+16210 m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
+16211 m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
+16212 m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
+16213 m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
+16214 m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
+16215 m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
+16216 m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
+16217 m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
+16218 m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+
+
+16221 #if VMA_VULKAN_VERSION >= 1001000
+16222 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16224 m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
+16225 m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
+16226 m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
+16227 m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
+16228 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
+
+
+
+
+
+
+16235 void VmaAllocator_T::ImportVulkanFunctions_Custom(
const VmaVulkanFunctions* pVulkanFunctions)
+
+16237 VMA_ASSERT(pVulkanFunctions != VMA_NULL);
+
+16239 #define VMA_COPY_IF_NOT_NULL(funcName) \
+16240 if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
-
-
-
-16245 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-16249 res = AllocateDedicatedMemory(
-
-
-
-
-
-
-
-
-
-16259 dedicatedBufferUsage,
-
-
-
-16263 if(res == VK_SUCCESS)
-
-
-16266 VMA_DEBUG_LOG(
" Allocated as DedicatedMemory");
-
-
-
-
-
-16272 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
-
-
-
-
-
+16242 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+16243 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+16244 VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+16245 VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+16246 VMA_COPY_IF_NOT_NULL(vkMapMemory);
+16247 VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+16248 VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+16249 VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+16250 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+16251 VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+16252 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+16253 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+16254 VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+16255 VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+16256 VMA_COPY_IF_NOT_NULL(vkCreateImage);
+16257 VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+16258 VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+
+16260 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+16261 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+16262 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+
+
+16265 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+16266 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
+16267 VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
+
+
+16270 #if VMA_MEMORY_BUDGET
+16271 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
+
+
+16274 #undef VMA_COPY_IF_NOT_NULL
+
+
+16277 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-16279 VkResult VmaAllocator_T::AllocateDedicatedMemory(
-
-16281 VmaSuballocationType suballocType,
-16282 uint32_t memTypeIndex,
-
-
-16285 bool isUserDataString,
-
-
-16288 VkBuffer dedicatedBuffer,
-16289 VkBufferUsageFlags dedicatedBufferUsage,
-16290 VkImage dedicatedImage,
-16291 size_t allocationCount,
-
-
-16294 VMA_ASSERT(allocationCount > 0 && pAllocations);
-
-
-
-16298 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
-16300 GetBudget(&heapBudget, heapIndex, 1);
-16301 if(heapBudget.
usage + size * allocationCount > heapBudget.
budget)
-
-16303 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-16307 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-16308 allocInfo.memoryTypeIndex = memTypeIndex;
-16309 allocInfo.allocationSize = size;
-
-16311 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-16312 VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
-16313 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-16315 if(dedicatedBuffer != VK_NULL_HANDLE)
-
-16317 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
-16318 dedicatedAllocInfo.buffer = dedicatedBuffer;
-16319 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-
-16321 else if(dedicatedImage != VK_NULL_HANDLE)
-
-16323 dedicatedAllocInfo.image = dedicatedImage;
-16324 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-
-
-
-
-16329 #if VMA_BUFFER_DEVICE_ADDRESS
-16330 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-16331 if(m_UseKhrBufferDeviceAddress)
-
-16333 bool canContainBufferWithDeviceAddress =
true;
-16334 if(dedicatedBuffer != VK_NULL_HANDLE)
-
-16336 canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX ||
-16337 (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
-
-16339 else if(dedicatedImage != VK_NULL_HANDLE)
-
-16341 canContainBufferWithDeviceAddress =
false;
-
-16343 if(canContainBufferWithDeviceAddress)
-
-16345 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-16346 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-
-
-
-
-16351 #if VMA_MEMORY_PRIORITY
-16352 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-16353 if(m_UseExtMemoryPriority)
-
-16355 priorityInfo.priority = priority;
-16356 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-
-
-
-
-16361 VkResult res = VK_SUCCESS;
-16362 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
-16364 res = AllocateDedicatedMemoryPage(
-
-
-
-
-
-
-
-16372 pAllocations + allocIndex);
-16373 if(res != VK_SUCCESS)
-
-
-
-
-
-16379 if(res == VK_SUCCESS)
-
-
-
-16383 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-16384 AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-16385 VMA_ASSERT(pDedicatedAllocations);
-16386 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
-16388 VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
-
-
+16279 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
+
+16281 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
+16282 if(m_VulkanFunctions.memberName == VMA_NULL) \
+16283 m_VulkanFunctions.memberName = \
+16284 (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
+16285 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
+16286 if(m_VulkanFunctions.memberName == VMA_NULL) \
+16287 m_VulkanFunctions.memberName = \
+16288 (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
+
+16290 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties,
"vkGetPhysicalDeviceProperties");
+16291 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties,
"vkGetPhysicalDeviceMemoryProperties");
+16292 VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory,
"vkAllocateMemory");
+16293 VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory,
"vkFreeMemory");
+16294 VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory,
"vkMapMemory");
+16295 VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory,
"vkUnmapMemory");
+16296 VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges,
"vkFlushMappedMemoryRanges");
+16297 VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges,
"vkInvalidateMappedMemoryRanges");
+16298 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory,
"vkBindBufferMemory");
+16299 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory,
"vkBindImageMemory");
+16300 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements,
"vkGetBufferMemoryRequirements");
+16301 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements,
"vkGetImageMemoryRequirements");
+16302 VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer,
"vkCreateBuffer");
+16303 VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer,
"vkDestroyBuffer");
+16304 VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage,
"vkCreateImage");
+16305 VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage,
"vkDestroyImage");
+16306 VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer,
"vkCmdCopyBuffer");
+
+16308 #if VMA_VULKAN_VERSION >= 1001000
+16309 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16311 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2,
"vkGetBufferMemoryRequirements2");
+16312 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2,
"vkGetImageMemoryRequirements2");
+16313 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2,
"vkBindBufferMemory2");
+16314 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2,
"vkBindImageMemory2");
+16315 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2,
"vkGetPhysicalDeviceMemoryProperties2");
+
+
+
+16319 #if VMA_DEDICATED_ALLOCATION
+16320 if(m_UseKhrDedicatedAllocation)
+
+16322 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR,
"vkGetBufferMemoryRequirements2KHR");
+16323 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR,
"vkGetImageMemoryRequirements2KHR");
+
+
+
+16327 #if VMA_BIND_MEMORY2
+16328 if(m_UseKhrBindMemory2)
+
+16330 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR,
"vkBindBufferMemory2KHR");
+16331 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR,
"vkBindImageMemory2KHR");
+
+
+
+16335 #if VMA_MEMORY_BUDGET
+16336 if(m_UseExtMemoryBudget)
+
+16338 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR,
"vkGetPhysicalDeviceMemoryProperties2KHR");
+
+
+
+16342 #undef VMA_FETCH_DEVICE_FUNC
+16343 #undef VMA_FETCH_INSTANCE_FUNC
+
+
+
+
+16348 void VmaAllocator_T::ValidateVulkanFunctions()
+
+16350 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
+16351 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
+16352 VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
+16353 VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
+16354 VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
+16355 VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
+16356 VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
+16357 VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
+16358 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
+16359 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
+16360 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
+16361 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
+16362 VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
+16363 VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
+16364 VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
+16365 VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
+16366 VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+
+16368 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+16369 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
+
+16371 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
+16372 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
+
+
+
+16376 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+16377 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
+
+16379 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
+16380 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
+
+
+
+16384 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+16385 if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16387 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+
+
+
-16392 VMA_DEBUG_LOG(
" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
-
-
-
-
-16397 while(allocIndex--)
-
-
-16400 VkDeviceMemory hMemory = currAlloc->GetMemory();
-
-
-
-
-
-
-
-
-
-
-
-16412 FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
-16413 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
-16414 currAlloc->SetUserData(
this, VMA_NULL);
-16415 m_AllocationObjectAllocator.Free(currAlloc);
-
+16392 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
+
+16394 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+16395 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+16396 const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
+16397 return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
+
+
+16400 VkResult VmaAllocator_T::AllocateMemoryOfType(
+
+16402 VkDeviceSize alignment,
+16403 bool dedicatedAllocation,
+16404 VkBuffer dedicatedBuffer,
+16405 VkBufferUsageFlags dedicatedBufferUsage,
+16406 VkImage dedicatedImage,
+
+16408 uint32_t memTypeIndex,
+16409 VmaSuballocationType suballocType,
+16410 size_t allocationCount,
+
+
+16413 VMA_ASSERT(pAllocations != VMA_NULL);
+16414 VMA_DEBUG_LOG(
" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
+
+
-16418 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
-
-
-
-
-
-16424 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
-
-16426 VmaSuballocationType suballocType,
-16427 uint32_t memTypeIndex,
-16428 const VkMemoryAllocateInfo& allocInfo,
-
-16430 bool isUserDataString,
-
-
-
-16434 VkDeviceMemory hMemory = VK_NULL_HANDLE;
-16435 VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
-
-
-16438 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
-
-
-
-16442 void* pMappedData = VMA_NULL;
-
-
-16445 res = (*m_VulkanFunctions.vkMapMemory)(
-
-
-
-
-
-
-
-
-16454 VMA_DEBUG_LOG(
" vkMapMemory FAILED");
-16455 FreeVulkanMemory(memTypeIndex, size, hMemory);
-
-
-
-
-16460 *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
-16461 (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
-16462 (*pAllocation)->SetUserData(
this, pUserData);
-16463 m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
-16464 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
-16466 FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
-
-
-
-
-16472 void VmaAllocator_T::GetBufferMemoryRequirements(
-
-16474 VkMemoryRequirements& memReq,
-16475 bool& requiresDedicatedAllocation,
-16476 bool& prefersDedicatedAllocation)
const
-
-16478 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-16479 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-16481 VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
-16482 memReqInfo.buffer = hBuffer;
-
-16484 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
+
+16420 (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
+
+
+
+
+
+
+
+
+16430 VmaBlockVector*
const blockVector = m_pBlockVectors[memTypeIndex];
+16431 VMA_ASSERT(blockVector);
+
+16433 const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
+16434 bool preferDedicatedMemory =
+16435 VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
+16436 dedicatedAllocation ||
+
+16438 size > preferredBlockSize / 2;
+
+16440 if(preferDedicatedMemory &&
+
+16442 finalCreateInfo.
pool == VK_NULL_HANDLE)
+
+
+
+
+
+
+
+
+16451 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
+16455 return AllocateDedicatedMemory(
+
+
+
+
+
+
+
+
+
+16465 dedicatedBufferUsage,
+
+
+
+
+
+
+
+16473 VkResult res = blockVector->Allocate(
+16474 m_CurrentFrameIndex.load(),
+
+
+
+
+
+
+16481 if(res == VK_SUCCESS)
+
+
+
-16486 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-16487 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-
-16489 (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
-16491 memReq = memReq2.memoryRequirements;
-16492 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-16493 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-
-
-
-
-16498 (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
-16499 requiresDedicatedAllocation =
false;
-16500 prefersDedicatedAllocation =
false;
-
-
-
-16504 void VmaAllocator_T::GetImageMemoryRequirements(
-
-16506 VkMemoryRequirements& memReq,
-16507 bool& requiresDedicatedAllocation,
-16508 bool& prefersDedicatedAllocation)
const
-
-16510 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-16511 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
-16513 VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
-16514 memReqInfo.image = hImage;
-
-16516 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
-16518 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-16519 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-
-16521 (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
-16523 memReq = memReq2.memoryRequirements;
-16524 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-16525 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
+
+
+16489 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
+
+
+16495 if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
+
+16497 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+16500 res = AllocateDedicatedMemory(
+
+
+
+
+
+
+
+
+
+16510 dedicatedBufferUsage,
+
+
+
+16514 if(res == VK_SUCCESS)
+
+
+16517 VMA_DEBUG_LOG(
" Allocated as DedicatedMemory");
+
+
+
+
+
+16523 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
+
+
-
-
-
-16530 (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
-16531 requiresDedicatedAllocation =
false;
-16532 prefersDedicatedAllocation =
false;
-
-
-
-16536 VkResult VmaAllocator_T::AllocateMemory(
-16537 const VkMemoryRequirements& vkMemReq,
-16538 bool requiresDedicatedAllocation,
-16539 bool prefersDedicatedAllocation,
-16540 VkBuffer dedicatedBuffer,
-16541 VkBufferUsageFlags dedicatedBufferUsage,
-16542 VkImage dedicatedImage,
-
-16544 VmaSuballocationType suballocType,
-16545 size_t allocationCount,
-
-
-16548 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
-
-16550 VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
-
-16552 if(vkMemReq.size == 0)
-
-16554 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+16529 VkResult VmaAllocator_T::AllocateDedicatedMemory(
+
+16531 VmaSuballocationType suballocType,
+16532 uint32_t memTypeIndex,
+
+
+16535 bool isUserDataString,
+
+
+16538 VkBuffer dedicatedBuffer,
+16539 VkBufferUsageFlags dedicatedBufferUsage,
+16540 VkImage dedicatedImage,
+16541 size_t allocationCount,
+
+
+16544 VMA_ASSERT(allocationCount > 0 && pAllocations);
+
+
+
+16548 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
+16550 GetBudget(&heapBudget, heapIndex, 1);
+16551 if(heapBudget.
usage + size * allocationCount > heapBudget.
budget)
+
+16553 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
-
-
-
-16559 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
-16560 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
+
+16557 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+16558 allocInfo.memoryTypeIndex = memTypeIndex;
+16559 allocInfo.allocationSize = size;
+
+16561 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+16562 VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+16563 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-16565 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
-16566 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-16568 if(requiresDedicatedAllocation)
-
-
-
-16572 VMA_ASSERT(0 &&
"VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
-16573 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-16575 if(createInfo.
pool != VK_NULL_HANDLE)
-
-16577 VMA_ASSERT(0 &&
"Pool specified while dedicated allocation is required.");
-16578 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-16581 if((createInfo.
pool != VK_NULL_HANDLE) &&
-
-
-16584 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
-16585 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-16588 if(createInfo.
pool != VK_NULL_HANDLE)
-
-16590 const VkDeviceSize alignmentForPool = VMA_MAX(
-16591 vkMemReq.alignment,
-16592 GetMemoryTypeMinAlignment(createInfo.
pool->m_BlockVector.GetMemoryTypeIndex()));
-
-
-
-
-16597 (m_MemProps.memoryTypes[createInfo.
pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
-
-
-
-16602 return createInfo.
pool->m_BlockVector.Allocate(
-16603 m_CurrentFrameIndex.load(),
-
-
-
-
-
-
-
-
-
-
-16614 uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
-16615 uint32_t memTypeIndex = UINT32_MAX;
-
-16617 if(res == VK_SUCCESS)
-
-16619 VkDeviceSize alignmentForMemType = VMA_MAX(
-16620 vkMemReq.alignment,
-16621 GetMemoryTypeMinAlignment(memTypeIndex));
-
-16623 res = AllocateMemoryOfType(
-
-16625 alignmentForMemType,
-16626 requiresDedicatedAllocation || prefersDedicatedAllocation,
-
-16628 dedicatedBufferUsage,
-
-
-
-
-
-
-
-16636 if(res == VK_SUCCESS)
-
-
-
-
-
-
-
-
-
-16646 memoryTypeBits &= ~(1u << memTypeIndex);
-
-
-16649 if(res == VK_SUCCESS)
-
-16651 alignmentForMemType = VMA_MAX(
-16652 vkMemReq.alignment,
-16653 GetMemoryTypeMinAlignment(memTypeIndex));
-
-16655 res = AllocateMemoryOfType(
-
-16657 alignmentForMemType,
-16658 requiresDedicatedAllocation || prefersDedicatedAllocation,
-
-16660 dedicatedBufferUsage,
-
-
-
-
-
-
-
-16668 if(res == VK_SUCCESS)
-
-
-
-
-
-
-
-
-
-16678 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-
-
-
-
-
-
-
-
-
-16689 void VmaAllocator_T::FreeMemory(
-16690 size_t allocationCount,
-
-
-16693 VMA_ASSERT(pAllocations);
-
-16695 for(
size_t allocIndex = allocationCount; allocIndex--; )
-
-
-
-16699 if(allocation != VK_NULL_HANDLE)
-
-16701 if(TouchAllocation(allocation))
-
-16703 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
-16705 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
-
-
-16708 switch(allocation->GetType())
-
-16710 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-16712 VmaBlockVector* pBlockVector = VMA_NULL;
-16713 VmaPool hPool = allocation->GetBlock()->GetParentPool();
-16714 if(hPool != VK_NULL_HANDLE)
-
-16716 pBlockVector = &hPool->m_BlockVector;
-
-
-
-16720 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-16721 pBlockVector = m_pBlockVectors[memTypeIndex];
-
-16723 pBlockVector->Free(allocation);
-
-
-16726 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-16727 FreeDedicatedMemory(allocation);
-
-
-
-
-
-
-
-16735 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
-16736 allocation->SetUserData(
this, VMA_NULL);
-16737 m_AllocationObjectAllocator.Free(allocation);
-
-
-
-
-16742 VkResult VmaAllocator_T::ResizeAllocation(
-
-16744 VkDeviceSize newSize)
-
-
-16747 if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
-
-16749 return VK_ERROR_VALIDATION_FAILED_EXT;
+16565 if(dedicatedBuffer != VK_NULL_HANDLE)
+
+16567 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
+16568 dedicatedAllocInfo.buffer = dedicatedBuffer;
+16569 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+
+16571 else if(dedicatedImage != VK_NULL_HANDLE)
+
+16573 dedicatedAllocInfo.image = dedicatedImage;
+16574 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+
+
+
+
+16579 #if VMA_BUFFER_DEVICE_ADDRESS
+16580 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+16581 if(m_UseKhrBufferDeviceAddress)
+
+16583 bool canContainBufferWithDeviceAddress =
true;
+16584 if(dedicatedBuffer != VK_NULL_HANDLE)
+
+16586 canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX ||
+16587 (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
+
+16589 else if(dedicatedImage != VK_NULL_HANDLE)
+
+16591 canContainBufferWithDeviceAddress =
false;
+
+16593 if(canContainBufferWithDeviceAddress)
+
+16595 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+16596 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+
+
+
+
+16601 #if VMA_MEMORY_PRIORITY
+16602 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+16603 if(m_UseExtMemoryPriority)
+
+16605 priorityInfo.priority = priority;
+16606 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+
+
+
+
+16611 VkResult res = VK_SUCCESS;
+16612 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
+16614 res = AllocateDedicatedMemoryPage(
+
+
+
+
+
+
+
+16622 pAllocations + allocIndex);
+16623 if(res != VK_SUCCESS)
+
+
+
+
+
+16629 if(res == VK_SUCCESS)
+
+
+
+16633 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+16634 DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
+16635 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
+16637 dedicatedAllocations.PushBack(pAllocations[allocIndex]);
+
+
+
+16641 VMA_DEBUG_LOG(
" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
+
+
+
+
+16646 while(allocIndex--)
+
+
+16649 VkDeviceMemory hMemory = currAlloc->GetMemory();
+
+
+
+
+
+
+
+
+
+
+
+16661 FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
+16662 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
+16663 currAlloc->SetUserData(
this, VMA_NULL);
+16664 m_AllocationObjectAllocator.Free(currAlloc);
+
+
+16667 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
+
+
+
+
+
+16673 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
+
+16675 VmaSuballocationType suballocType,
+16676 uint32_t memTypeIndex,
+16677 const VkMemoryAllocateInfo& allocInfo,
+
+16679 bool isUserDataString,
+
+
+
+16683 VkDeviceMemory hMemory = VK_NULL_HANDLE;
+16684 VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
+
+
+16687 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
+
+
+
+16691 void* pMappedData = VMA_NULL;
+
+
+16694 res = (*m_VulkanFunctions.vkMapMemory)(
+
+
+
+
+
+
+
+
+16703 VMA_DEBUG_LOG(
" vkMapMemory FAILED");
+16704 FreeVulkanMemory(memTypeIndex, size, hMemory);
+
+
+
+
+16709 *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
+16710 (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
+16711 (*pAllocation)->SetUserData(
this, pUserData);
+16712 m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
+16713 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
+16715 FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
+
+
+
+
+16721 void VmaAllocator_T::GetBufferMemoryRequirements(
+
+16723 VkMemoryRequirements& memReq,
+16724 bool& requiresDedicatedAllocation,
+16725 bool& prefersDedicatedAllocation)
const
+
+16727 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+16728 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16730 VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+16731 memReqInfo.buffer = hBuffer;
+
+16733 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
+16735 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+16736 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+
+16738 (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
+16740 memReq = memReq2.memoryRequirements;
+16741 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+16742 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
+
+
+
+16747 (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
+16748 requiresDedicatedAllocation =
false;
+16749 prefersDedicatedAllocation =
false;
-16751 if(newSize == alloc->GetSize())
-
-
-
-16755 return VK_ERROR_OUT_OF_POOL_MEMORY;
-
-
-16758 void VmaAllocator_T::CalculateStats(
VmaStats* pStats)
-
-
-16761 InitStatInfo(pStats->
total);
-16762 for(
size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
-
-16764 for(
size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
-
+
+
+16753 void VmaAllocator_T::GetImageMemoryRequirements(
+
+16755 VkMemoryRequirements& memReq,
+16756 bool& requiresDedicatedAllocation,
+16757 bool& prefersDedicatedAllocation)
const
+
+16759 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+16760 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
+16762 VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+16763 memReqInfo.image = hImage;
+
+16765 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
-16768 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-16770 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
-16771 VMA_ASSERT(pBlockVector);
-16772 pBlockVector->AddStats(pStats);
-
-
-
-
-16777 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-16778 for(
size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
-16780 m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
-
+16767 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+16768 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+
+16770 (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
+16772 memReq = memReq2.memoryRequirements;
+16773 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+16774 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
+
+
+
+16779 (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
+16780 requiresDedicatedAllocation =
false;
+16781 prefersDedicatedAllocation =
false;
-
-
-16785 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-16787 const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-16788 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-16789 AllocationVectorType*
const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-16790 VMA_ASSERT(pDedicatedAllocVector);
-16791 for(
size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
-
-
-16794 (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
-16795 VmaAddStatInfo(pStats->
total, allocationStatInfo);
-16796 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
-16797 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
-
-
+
+
+16785 VkResult VmaAllocator_T::AllocateMemory(
+16786 const VkMemoryRequirements& vkMemReq,
+16787 bool requiresDedicatedAllocation,
+16788 bool prefersDedicatedAllocation,
+16789 VkBuffer dedicatedBuffer,
+16790 VkBufferUsageFlags dedicatedBufferUsage,
+16791 VkImage dedicatedImage,
+
+16793 VmaSuballocationType suballocType,
+16794 size_t allocationCount,
+
+
+16797 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
+
+16799 VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
-
-16802 VmaPostprocessCalcStatInfo(pStats->
total);
-16803 for(
size_t i = 0; i < GetMemoryTypeCount(); ++i)
-16804 VmaPostprocessCalcStatInfo(pStats->
memoryType[i]);
-16805 for(
size_t i = 0; i < GetMemoryHeapCount(); ++i)
-16806 VmaPostprocessCalcStatInfo(pStats->
memoryHeap[i]);
-
-
-16809 void VmaAllocator_T::GetBudget(
VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
-
-16811 #if VMA_MEMORY_BUDGET
-16812 if(m_UseExtMemoryBudget)
+16801 if(vkMemReq.size == 0)
+
+16803 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+
+
+16808 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
+16809 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
-16814 if(m_Budget.m_OperationsSinceBudgetFetch < 30)
-
-16816 VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
-16817 for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
-16819 const uint32_t heapIndex = firstHeap + i;
-
-16821 outBudget->
blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
-
-16824 if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->
blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
-
-16826 outBudget->
usage = m_Budget.m_VulkanUsage[heapIndex] +
-16827 outBudget->
blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-
-
-
-16831 outBudget->
usage = 0;
-
-
-
-16835 outBudget->
budget = VMA_MIN(
-16836 m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
-
-
-
-
-16841 UpdateVulkanBudget();
-16842 GetBudget(outBudget, firstHeap, heapCount);
-
-
-
-
-
-16848 for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
-16850 const uint32_t heapIndex = firstHeap + i;
-
-16852 outBudget->
blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
-
-
-16856 outBudget->
budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10;
-
-
-
-
-16861 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
-
-16863 VkResult VmaAllocator_T::DefragmentationBegin(
-
-
-
-
-
-
-
-
-
-16873 *pContext = vma_new(
this, VmaDefragmentationContext_T)(
-16874 this, m_CurrentFrameIndex.load(), info.
flags, pStats);
-
-
-16877 (*pContext)->AddAllocations(
-
-
-16880 VkResult res = (*pContext)->Defragment(
-
-
-
-
-16885 if(res != VK_NOT_READY)
-
-16887 vma_delete(
this, *pContext);
-16888 *pContext = VMA_NULL;
-
-
-
-
-
-16894 VkResult VmaAllocator_T::DefragmentationEnd(
-
-
-16897 vma_delete(
this, context);
-
-
-
-16901 VkResult VmaAllocator_T::DefragmentationPassBegin(
-
-
-
-16905 return context->DefragmentPassBegin(pInfo);
-
-16907 VkResult VmaAllocator_T::DefragmentationPassEnd(
-
-
-16910 return context->DefragmentPassEnd();
-
-
-
-
-
-16916 if(hAllocation->CanBecomeLost())
-
-
-
-
-
-16922 const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-16923 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
-
-16926 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
-
-
-16930 pAllocationInfo->
offset = 0;
-16931 pAllocationInfo->
size = hAllocation->GetSize();
-
-16933 pAllocationInfo->
pUserData = hAllocation->GetUserData();
-
-
-16936 else if(localLastUseFrameIndex == localCurrFrameIndex)
-
-16938 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
-16939 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
-16940 pAllocationInfo->
offset = hAllocation->GetOffset();
-16941 pAllocationInfo->
size = hAllocation->GetSize();
-
-16943 pAllocationInfo->
pUserData = hAllocation->GetUserData();
-
-
-
-
-16948 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
-16950 localLastUseFrameIndex = localCurrFrameIndex;
-
-
-
-
-
-
-16957 #if VMA_STATS_STRING_ENABLED
-16958 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-16959 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
-
-16962 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-16963 if(localLastUseFrameIndex == localCurrFrameIndex)
-
-
-
-
-
-16969 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
-16971 localLastUseFrameIndex = localCurrFrameIndex;
-
-
-
-
-
-16977 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
-16978 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
-16979 pAllocationInfo->
offset = hAllocation->GetOffset();
-16980 pAllocationInfo->
size = hAllocation->GetSize();
-16981 pAllocationInfo->
pMappedData = hAllocation->GetMappedData();
-16982 pAllocationInfo->
pUserData = hAllocation->GetUserData();
-
-
-
-16986 bool VmaAllocator_T::TouchAllocation(
VmaAllocation hAllocation)
-
-
-16989 if(hAllocation->CanBecomeLost())
-
-16991 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-16992 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
-
-16995 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
-
-
-16999 else if(localLastUseFrameIndex == localCurrFrameIndex)
-
-
-
-
-
-17005 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
-17007 localLastUseFrameIndex = localCurrFrameIndex;
-
-
-
-
-
-
-17014 #if VMA_STATS_STRING_ENABLED
-17015 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-17016 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
-
-17019 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-17020 if(localLastUseFrameIndex == localCurrFrameIndex)
-
-
-
-
-
-17026 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
-17028 localLastUseFrameIndex = localCurrFrameIndex;
-
-
+16814 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
+16815 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+16817 if(requiresDedicatedAllocation)
+
+
+
+16821 VMA_ASSERT(0 &&
"VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
+16822 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+16824 if(createInfo.
pool != VK_NULL_HANDLE)
+
+16826 VMA_ASSERT(0 &&
"Pool specified while dedicated allocation is required.");
+16827 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+16830 if((createInfo.
pool != VK_NULL_HANDLE) &&
+
+
+16833 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
+16834 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+16837 if(createInfo.
pool != VK_NULL_HANDLE)
+
+16839 const VkDeviceSize alignmentForPool = VMA_MAX(
+16840 vkMemReq.alignment,
+16841 GetMemoryTypeMinAlignment(createInfo.
pool->m_BlockVector.GetMemoryTypeIndex()));
+
+
+
+
+16846 (m_MemProps.memoryTypes[createInfo.
pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
+
+
+
+16851 return createInfo.
pool->m_BlockVector.Allocate(
+16852 m_CurrentFrameIndex.load(),
+
+
+
+
+
+
+
+
+
+
+16863 uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+16864 uint32_t memTypeIndex = UINT32_MAX;
+
+16866 if(res == VK_SUCCESS)
+
+16868 VkDeviceSize alignmentForMemType = VMA_MAX(
+16869 vkMemReq.alignment,
+16870 GetMemoryTypeMinAlignment(memTypeIndex));
+
+16872 res = AllocateMemoryOfType(
+
+16874 alignmentForMemType,
+16875 requiresDedicatedAllocation || prefersDedicatedAllocation,
+
+16877 dedicatedBufferUsage,
+
+
+
+
+
+
+
+16885 if(res == VK_SUCCESS)
+
+
+
+
+
+
+
+
+
+16895 memoryTypeBits &= ~(1u << memTypeIndex);
+
+
+16898 if(res == VK_SUCCESS)
+
+16900 alignmentForMemType = VMA_MAX(
+16901 vkMemReq.alignment,
+16902 GetMemoryTypeMinAlignment(memTypeIndex));
+
+16904 res = AllocateMemoryOfType(
+
+16906 alignmentForMemType,
+16907 requiresDedicatedAllocation || prefersDedicatedAllocation,
+
+16909 dedicatedBufferUsage,
+
+
+
+
+
+
+
+16917 if(res == VK_SUCCESS)
+
+
+
+
+
+
+
+
+
+16927 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+
+
+
+
+
+
+
+
+
+16938 void VmaAllocator_T::FreeMemory(
+16939 size_t allocationCount,
+
+
+16942 VMA_ASSERT(pAllocations);
+
+16944 for(
size_t allocIndex = allocationCount; allocIndex--; )
+
+
+
+16948 if(allocation != VK_NULL_HANDLE)
+
+16950 if(TouchAllocation(allocation))
+
+16952 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
+16954 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
+
+
+16957 switch(allocation->GetType())
+
+16959 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+16961 VmaBlockVector* pBlockVector = VMA_NULL;
+16962 VmaPool hPool = allocation->GetBlock()->GetParentPool();
+16963 if(hPool != VK_NULL_HANDLE)
+
+16965 pBlockVector = &hPool->m_BlockVector;
+
+
+
+16969 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+16970 pBlockVector = m_pBlockVectors[memTypeIndex];
+
+16972 pBlockVector->Free(allocation);
+
+
+16975 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+16976 FreeDedicatedMemory(allocation);
+
+
+
+
+
+
+
+16984 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
+16985 allocation->SetUserData(
this, VMA_NULL);
+16986 m_AllocationObjectAllocator.Free(allocation);
+
+
+
+
+16991 void VmaAllocator_T::CalculateStats(
VmaStats* pStats)
+
+
+16994 InitStatInfo(pStats->
total);
+16995 for(
size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
+
+16997 for(
size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
+
+
+
+17001 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+17003 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
+17004 VMA_ASSERT(pBlockVector);
+17005 pBlockVector->AddStats(pStats);
+
+
+
+
+17010 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+17011 for(
VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+
+17013 pool->m_BlockVector.AddStats(pStats);
+
+
+
+
+17018 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+17020 const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+17021 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+17022 DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
+
+17024 alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
+
+
+17027 alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
+17028 VmaAddStatInfo(pStats->
total, allocationStatInfo);
+17029 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
+17030 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
-
+
-
-
-
-
-
-
-17040 VMA_DEBUG_LOG(
" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->
memoryTypeIndex, pCreateInfo->
flags);
+
+17035 VmaPostprocessCalcStatInfo(pStats->
total);
+17036 for(
size_t i = 0; i < GetMemoryTypeCount(); ++i)
+17037 VmaPostprocessCalcStatInfo(pStats->
memoryType[i]);
+17038 for(
size_t i = 0; i < GetMemoryHeapCount(); ++i)
+17039 VmaPostprocessCalcStatInfo(pStats->
memoryHeap[i]);
+
-
-
-
-
-
-
-
-
-17050 return VK_ERROR_INITIALIZATION_FAILED;
-
-
-
-17054 ((1u << pCreateInfo->
memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
-
-17056 return VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-17059 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.
memoryTypeIndex);
-
-17061 *pPool = vma_new(
this, VmaPool_T)(
this, newCreateInfo, preferredBlockSize);
-
-17063 VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
-17064 if(res != VK_SUCCESS)
-
-17066 vma_delete(
this, *pPool);
-
-
-
-
-
-
-17073 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-17074 (*pPool)->SetId(m_NextPoolId++);
-17075 VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
-
-
-
-
-
-17081 void VmaAllocator_T::DestroyPool(
VmaPool pool)
-
-
-
-17085 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-17086 bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
-17087 VMA_ASSERT(success &&
"Pool not found in Allocator.");
-
-
-17090 vma_delete(
this, pool);
-
-
-
-
-17095 pool->m_BlockVector.GetPoolStats(pPoolStats);
-
-
-17098 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
-
-17100 m_CurrentFrameIndex.store(frameIndex);
-
-17102 #if VMA_MEMORY_BUDGET
-17103 if(m_UseExtMemoryBudget)
-
-17105 UpdateVulkanBudget();
-
-
-
-
-17110 void VmaAllocator_T::MakePoolAllocationsLost(
-
-17112 size_t* pLostAllocationCount)
-
-17114 hPool->m_BlockVector.MakePoolAllocationsLost(
-17115 m_CurrentFrameIndex.load(),
-17116 pLostAllocationCount);
-
-
-17119 VkResult VmaAllocator_T::CheckPoolCorruption(
VmaPool hPool)
-
-17121 return hPool->m_BlockVector.CheckCorruption();
-
+17042 void VmaAllocator_T::GetBudget(
VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
+
+17044 #if VMA_MEMORY_BUDGET
+17045 if(m_UseExtMemoryBudget)
+
+17047 if(m_Budget.m_OperationsSinceBudgetFetch < 30)
+
+17049 VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
+17050 for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
+17052 const uint32_t heapIndex = firstHeap + i;
+
+17054 outBudget->
blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
+
+17057 if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->
blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
+
+17059 outBudget->
usage = m_Budget.m_VulkanUsage[heapIndex] +
+17060 outBudget->
blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+
+
+
+17064 outBudget->
usage = 0;
+
+
+
+17068 outBudget->
budget = VMA_MIN(
+17069 m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
+
+
+
+
+17074 UpdateVulkanBudget();
+17075 GetBudget(outBudget, firstHeap, heapCount);
+
+
+
+
+
+17081 for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
+17083 const uint32_t heapIndex = firstHeap + i;
+
+17085 outBudget->
blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
+
+
+17089 outBudget->
budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10;
+
+
+
+
+17094 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+
+17096 VkResult VmaAllocator_T::DefragmentationBegin(
+
+
+
+
+
+
+
+
+
+17106 *pContext = vma_new(
this, VmaDefragmentationContext_T)(
+17107 this, m_CurrentFrameIndex.load(), info.
flags, pStats);
+
+
+17110 (*pContext)->AddAllocations(
+
+
+17113 VkResult res = (*pContext)->Defragment(
+
+
+
+
+17118 if(res != VK_NOT_READY)
+
+17120 vma_delete(
this, *pContext);
+17121 *pContext = VMA_NULL;
+
-17124 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
-
-17126 VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-17129 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-17131 if(((1u << memTypeIndex) & memoryTypeBits) != 0)
-
-17133 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
-17134 VMA_ASSERT(pBlockVector);
-17135 VkResult localRes = pBlockVector->CheckCorruption();
-
-
-17138 case VK_ERROR_FEATURE_NOT_PRESENT:
-
-
-17141 finalRes = VK_SUCCESS;
-
-
-
-
-
-
-
-
+
+
+
+17127 VkResult VmaAllocator_T::DefragmentationEnd(
+
+
+17130 vma_delete(
this, context);
+
+
+
+17134 VkResult VmaAllocator_T::DefragmentationPassBegin(
+
+
+
+17138 return context->DefragmentPassBegin(pInfo);
+
+17140 VkResult VmaAllocator_T::DefragmentationPassEnd(
+
+
+17143 return context->DefragmentPassEnd();
+
+
+
+
+
+17149 if(hAllocation->CanBecomeLost())
-17151 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-17152 for(
size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
-17154 if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
-
-17156 VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
-
-
-17159 case VK_ERROR_FEATURE_NOT_PRESENT:
-
-
-17162 finalRes = VK_SUCCESS;
-
-
-
-
-
-
-
-
-
-
-
-17174 void VmaAllocator_T::CreateLostAllocation(
VmaAllocation* pAllocation)
-
-17176 *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST,
false);
-17177 (*pAllocation)->InitLost();
-
-
-17180 VkResult VmaAllocator_T::AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
-
-17182 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
-
-
-17185 if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
-
-17187 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-17188 VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
-
-17191 const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
-17192 if(blockBytesAfterAllocation > heapSize)
-
-17194 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
-17196 if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
+
+
+
+
+17155 const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+17156 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
+
+17159 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
+
+
+17163 pAllocationInfo->
offset = 0;
+17164 pAllocationInfo->
size = hAllocation->GetSize();
+
+17166 pAllocationInfo->
pUserData = hAllocation->GetUserData();
+
+
+17169 else if(localLastUseFrameIndex == localCurrFrameIndex)
+
+17171 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
+17172 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
+17173 pAllocationInfo->
offset = hAllocation->GetOffset();
+17174 pAllocationInfo->
size = hAllocation->GetSize();
+
+17176 pAllocationInfo->
pUserData = hAllocation->GetUserData();
+
+
+
+
+17181 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
+17183 localLastUseFrameIndex = localCurrFrameIndex;
+
+
+
+
+
+
+17190 #if VMA_STATS_STRING_ENABLED
+17191 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+17192 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
+
+17195 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+17196 if(localLastUseFrameIndex == localCurrFrameIndex)
-
-
-
-
-17204 m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
-
-
-
-17208 VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+
+
+17202 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
+17204 localLastUseFrameIndex = localCurrFrameIndex;
+
+
+
+
-17210 if(res == VK_SUCCESS)
-
-17212 #if VMA_MEMORY_BUDGET
-17213 ++m_Budget.m_OperationsSinceBudgetFetch;
-
-
-
-17217 if(m_DeviceMemoryCallbacks.
pfnAllocate != VMA_NULL)
-
-17219 (*m_DeviceMemoryCallbacks.
pfnAllocate)(
this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.
pUserData);
-
-
-
+17210 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
+17211 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
+17212 pAllocationInfo->
offset = hAllocation->GetOffset();
+17213 pAllocationInfo->
size = hAllocation->GetSize();
+17214 pAllocationInfo->
pMappedData = hAllocation->GetMappedData();
+17215 pAllocationInfo->
pUserData = hAllocation->GetUserData();
+
+
+
+17219 bool VmaAllocator_T::TouchAllocation(
VmaAllocation hAllocation)
+
+
+17222 if(hAllocation->CanBecomeLost())
-17224 m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
-
-
-
-
-
-17230 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
-
-
-17233 if(m_DeviceMemoryCallbacks.
pfnFree != VMA_NULL)
-
-17235 (*m_DeviceMemoryCallbacks.
pfnFree)(
this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.
pUserData);
-
-
-
-17239 (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
-
-17241 m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
-
-
-17244 VkResult VmaAllocator_T::BindVulkanBuffer(
-17245 VkDeviceMemory memory,
-17246 VkDeviceSize memoryOffset,
-
-
-
-17250 if(pNext != VMA_NULL)
-
-17252 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-17253 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-17254 m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
-
-17256 VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
-17257 bindBufferMemoryInfo.pNext = pNext;
-17258 bindBufferMemoryInfo.buffer = buffer;
-17259 bindBufferMemoryInfo.memory = memory;
-17260 bindBufferMemoryInfo.memoryOffset = memoryOffset;
-17261 return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
-
-
-
-17266 return VK_ERROR_EXTENSION_NOT_PRESENT;
-
+17224 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+17225 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
+
+17228 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
+
+
+17232 else if(localLastUseFrameIndex == localCurrFrameIndex)
+
+
+
+
+
+17238 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
+17240 localLastUseFrameIndex = localCurrFrameIndex;
+
+
+
+
+
+
+17247 #if VMA_STATS_STRING_ENABLED
+17248 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+17249 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
+
+17252 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+17253 if(localLastUseFrameIndex == localCurrFrameIndex)
+
+
+
+
+
+17259 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
+17261 localLastUseFrameIndex = localCurrFrameIndex;
+
+
+
+
+
+
-
-
-17271 return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
-
-
+
+
+
+
+17273 VMA_DEBUG_LOG(
" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->
memoryTypeIndex, pCreateInfo->
flags);
-17275 VkResult VmaAllocator_T::BindVulkanImage(
-17276 VkDeviceMemory memory,
-17277 VkDeviceSize memoryOffset,
-
-
-
-17281 if(pNext != VMA_NULL)
+
+
+
+
+
+
+
-17283 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-17284 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-17285 m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
-
-17287 VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
-17288 bindBufferMemoryInfo.pNext = pNext;
-17289 bindBufferMemoryInfo.image = image;
-17290 bindBufferMemoryInfo.memory = memory;
-17291 bindBufferMemoryInfo.memoryOffset = memoryOffset;
-17292 return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
-
-
-
-17297 return VK_ERROR_EXTENSION_NOT_PRESENT;
-
-
-
-
-17302 return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
-
-
-
-17306 VkResult VmaAllocator_T::Map(
VmaAllocation hAllocation,
void** ppData)
-
-17308 if(hAllocation->CanBecomeLost())
-
-17310 return VK_ERROR_MEMORY_MAP_FAILED;
-
-
-17313 switch(hAllocation->GetType())
-
-17315 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-17317 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
-17318 char *pBytes = VMA_NULL;
-17319 VkResult res = pBlock->Map(
this, 1, (
void**)&pBytes);
-17320 if(res == VK_SUCCESS)
-
-17322 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
-17323 hAllocation->BlockAllocMap();
-
-
-
-17327 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-17328 return hAllocation->DedicatedAllocMap(
this, ppData);
-
-
-17331 return VK_ERROR_MEMORY_MAP_FAILED;
-
-
-
-
-
-17337 switch(hAllocation->GetType())
-
-17339 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-17341 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
-17342 hAllocation->BlockAllocUnmap();
-17343 pBlock->Unmap(
this, 1);
-
-
-17346 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-17347 hAllocation->DedicatedAllocUnmap(
this);
-
-
-
-
-
-
-17354 VkResult VmaAllocator_T::BindBufferMemory(
-
-17356 VkDeviceSize allocationLocalOffset,
-
-
-
-17360 VkResult res = VK_SUCCESS;
-17361 switch(hAllocation->GetType())
+17283 return VK_ERROR_INITIALIZATION_FAILED;
+
+
+
+17287 ((1u << pCreateInfo->
memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
+
+17289 return VK_ERROR_FEATURE_NOT_PRESENT;
+
+
+17292 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.
memoryTypeIndex);
+
+17294 *pPool = vma_new(
this, VmaPool_T)(
this, newCreateInfo, preferredBlockSize);
+
+17296 VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
+17297 if(res != VK_SUCCESS)
+
+17299 vma_delete(
this, *pPool);
+
+
+
+
+
+
+17306 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+17307 (*pPool)->SetId(m_NextPoolId++);
+17308 m_Pools.PushBack(*pPool);
+
+
+
+
+
+17314 void VmaAllocator_T::DestroyPool(
VmaPool pool)
+
+
+
+17318 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+17319 m_Pools.Remove(pool);
+
+
+17322 vma_delete(
this, pool);
+
+
+
+
+17327 pool->m_BlockVector.GetPoolStats(pPoolStats);
+
+
+17330 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
+
+17332 m_CurrentFrameIndex.store(frameIndex);
+
+17334 #if VMA_MEMORY_BUDGET
+17335 if(m_UseExtMemoryBudget)
+
+17337 UpdateVulkanBudget();
+
+
+
+
+17342 void VmaAllocator_T::MakePoolAllocationsLost(
+
+17344 size_t* pLostAllocationCount)
+
+17346 hPool->m_BlockVector.MakePoolAllocationsLost(
+17347 m_CurrentFrameIndex.load(),
+17348 pLostAllocationCount);
+
+
+17351 VkResult VmaAllocator_T::CheckPoolCorruption(
VmaPool hPool)
+
+17353 return hPool->m_BlockVector.CheckCorruption();
+
+
+17356 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
+
+17358 VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
+
+
+17361 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-17363 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-17364 res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
-
-17366 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-17368 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
-17369 VMA_ASSERT(pBlock &&
"Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
-17370 res = pBlock->BindBufferMemory(
this, hAllocation, allocationLocalOffset, hBuffer, pNext);
-
-
-
-
-
-
-
-
-17379 VkResult VmaAllocator_T::BindImageMemory(
-
-17381 VkDeviceSize allocationLocalOffset,
-
-
-
-17385 VkResult res = VK_SUCCESS;
-17386 switch(hAllocation->GetType())
-
-17388 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-17389 res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
-
-17391 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-17393 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-17394 VMA_ASSERT(pBlock &&
"Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
-17395 res = pBlock->BindImageMemory(
this, hAllocation, allocationLocalOffset, hImage, pNext);
-
-
-
-
-
-
-
-
-17404 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
-
-17406 VkDeviceSize offset, VkDeviceSize size,
-17407 VMA_CACHE_OPERATION op)
-
-17409 VkResult res = VK_SUCCESS;
-
-17411 VkMappedMemoryRange memRange = {};
-17412 if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
-
-
-
-17416 case VMA_CACHE_FLUSH:
-17417 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
-17419 case VMA_CACHE_INVALIDATE:
-17420 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
-
-
-
-
-
-
-
-
-17430 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
-17431 uint32_t allocationCount,
-
-17433 const VkDeviceSize* offsets,
const VkDeviceSize* sizes,
-17434 VMA_CACHE_OPERATION op)
-
-17436 typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
-17437 typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
-17438 RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
-
-17440 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
-
-17443 const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
-17444 const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
-17445 VkMappedMemoryRange newRange;
-17446 if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
-
-17448 ranges.push_back(newRange);
-
-
-
-17452 VkResult res = VK_SUCCESS;
-17453 if(!ranges.empty())
-
-
+17363 if(((1u << memTypeIndex) & memoryTypeBits) != 0)
+
+17365 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
+17366 VMA_ASSERT(pBlockVector);
+17367 VkResult localRes = pBlockVector->CheckCorruption();
+
+
+17370 case VK_ERROR_FEATURE_NOT_PRESENT:
+
+
+17373 finalRes = VK_SUCCESS;
+
+
+
+
+
+
+
+
+
+17383 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+17384 for(
VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+
+17386 if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+
+17388 VkResult localRes = pool->m_BlockVector.CheckCorruption();
+
+
+17391 case VK_ERROR_FEATURE_NOT_PRESENT:
+
+
+17394 finalRes = VK_SUCCESS;
+
+
+
+
+
+
+
+
+
+
+
+17406 void VmaAllocator_T::CreateLostAllocation(
VmaAllocation* pAllocation)
+
+17408 *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST,
false);
+17409 (*pAllocation)->InitLost();
+
+
+
+17413 template<
typename T>
+17414 struct AtomicTransactionalIncrement
+
+
+17417 typedef std::atomic<T> AtomicT;
+17418 ~AtomicTransactionalIncrement()
+
+
+
+
+17423 T Increment(AtomicT* atomic)
+
+
+17426 return m_Atomic->fetch_add(1);
+
+
+
+17430 m_Atomic =
nullptr;
+
+
+
+17434 AtomicT* m_Atomic =
nullptr;
+
+
+17437 VkResult VmaAllocator_T::AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
+
+17439 AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
+17440 const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
+17441 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+17442 if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
+
+17444 return VK_ERROR_TOO_MANY_OBJECTS;
+
+
+
+17448 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
+
+
+17451 if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
+
+17453 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+17454 VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
-17457 case VMA_CACHE_FLUSH:
-17458 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-
-17460 case VMA_CACHE_INVALIDATE:
-17461 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-
-
-
-
-
-
-
-
-
-17471 void VmaAllocator_T::FreeDedicatedMemory(
const VmaAllocation allocation)
-
-17473 VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-
-17475 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
-17477 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-17478 AllocationVectorType*
const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-17479 VMA_ASSERT(pDedicatedAllocations);
-17480 bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
-17481 VMA_ASSERT(success);
-
-
-17484 VkDeviceMemory hMemory = allocation->GetMemory();
-
-
-
-
-
-
-
-
-
-
-
-17496 FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
+17457 const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
+17458 if(blockBytesAfterAllocation > heapSize)
+
+17460 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+17462 if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
+
+
+
+
+
+
+
+17470 m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
+
+
+
+17474 VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+
+17476 if(res == VK_SUCCESS)
+
+17478 #if VMA_MEMORY_BUDGET
+17479 ++m_Budget.m_OperationsSinceBudgetFetch;
+
+
+
+17483 if(m_DeviceMemoryCallbacks.
pfnAllocate != VMA_NULL)
+
+17485 (*m_DeviceMemoryCallbacks.
pfnAllocate)(
this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.
pUserData);
+
+
+17488 deviceMemoryCountIncrement.Commit();
+
+
+
+17492 m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
+
+
+
+
-17498 VMA_DEBUG_LOG(
" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
-
-
-17501 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits()
const
-
-17503 VkBufferCreateInfo dummyBufCreateInfo;
-17504 VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
+17498 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
+
+
+17501 if(m_DeviceMemoryCallbacks.
pfnFree != VMA_NULL)
+
+17503 (*m_DeviceMemoryCallbacks.
pfnFree)(
this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.
pUserData);
+
-17506 uint32_t memoryTypeBits = 0;
-
-
-17509 VkBuffer buf = VK_NULL_HANDLE;
-17510 VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
-17511 m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
-17512 if(res == VK_SUCCESS)
-
-
-17515 VkMemoryRequirements memReq;
-17516 (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
-17517 memoryTypeBits = memReq.memoryTypeBits;
-
-
-17520 (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
-
-
-17523 return memoryTypeBits;
-
-
-17526 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits()
const
-
-
-17529 VMA_ASSERT(GetMemoryTypeCount() > 0);
-
-17531 uint32_t memoryTypeBits = UINT32_MAX;
-
-17533 if(!m_UseAmdDeviceCoherentMemory)
-
-
-17536 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-17538 if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-
-17540 memoryTypeBits &= ~(1u << memTypeIndex);
-
-
-
+
+17507 (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+
+17509 m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
+
+17511 --m_DeviceMemoryCount;
+
+
+17514 VkResult VmaAllocator_T::BindVulkanBuffer(
+17515 VkDeviceMemory memory,
+17516 VkDeviceSize memoryOffset,
+
+
+
+17520 if(pNext != VMA_NULL)
+
+17522 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+17523 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+17524 m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
+
+17526 VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
+17527 bindBufferMemoryInfo.pNext = pNext;
+17528 bindBufferMemoryInfo.buffer = buffer;
+17529 bindBufferMemoryInfo.memory = memory;
+17530 bindBufferMemoryInfo.memoryOffset = memoryOffset;
+17531 return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
+
+
+
+17536 return VK_ERROR_EXTENSION_NOT_PRESENT;
+
+
+
+
+17541 return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
+
+
-17545 return memoryTypeBits;
-
-
-17548 bool VmaAllocator_T::GetFlushOrInvalidateRange(
-
-17550 VkDeviceSize offset, VkDeviceSize size,
-17551 VkMappedMemoryRange& outRange)
const
-
-17553 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-17554 if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
-
-17556 const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-17557 const VkDeviceSize allocationSize = allocation->GetSize();
-17558 VMA_ASSERT(offset <= allocationSize);
-
-17560 outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
-17561 outRange.pNext = VMA_NULL;
-17562 outRange.memory = allocation->GetMemory();
-
-17564 switch(allocation->GetType())
-
-17566 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-17567 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-17568 if(size == VK_WHOLE_SIZE)
-
-17570 outRange.size = allocationSize - outRange.offset;
-
-
-
-17574 VMA_ASSERT(offset + size <= allocationSize);
-17575 outRange.size = VMA_MIN(
-17576 VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
-17577 allocationSize - outRange.offset);
-
-
-17580 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
-
-17583 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-17584 if(size == VK_WHOLE_SIZE)
-
-17586 size = allocationSize - offset;
-
-
-
-17590 VMA_ASSERT(offset + size <= allocationSize);
-
-17592 outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
-
-
-17595 const VkDeviceSize allocationOffset = allocation->GetOffset();
-17596 VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
-17597 const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
-17598 outRange.offset += allocationOffset;
-17599 outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
-
-
-
-
-
-
-
-
-
-
-
-17611 #if VMA_MEMORY_BUDGET
-
-17613 void VmaAllocator_T::UpdateVulkanBudget()
-
-17615 VMA_ASSERT(m_UseExtMemoryBudget);
-
-17617 VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
-
-17619 VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
-17620 VmaPnextChainPushFront(&memProps, &budgetProps);
-
-17622 GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
+17545 VkResult VmaAllocator_T::BindVulkanImage(
+17546 VkDeviceMemory memory,
+17547 VkDeviceSize memoryOffset,
+
+
+
+17551 if(pNext != VMA_NULL)
+
+17553 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+17554 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+17555 m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
+
+17557 VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
+17558 bindBufferMemoryInfo.pNext = pNext;
+17559 bindBufferMemoryInfo.image = image;
+17560 bindBufferMemoryInfo.memory = memory;
+17561 bindBufferMemoryInfo.memoryOffset = memoryOffset;
+17562 return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
+
+
+
+17567 return VK_ERROR_EXTENSION_NOT_PRESENT;
+
+
+
+
+17572 return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
+
+
+
+17576 VkResult VmaAllocator_T::Map(
VmaAllocation hAllocation,
void** ppData)
+
+17578 if(hAllocation->CanBecomeLost())
+
+17580 return VK_ERROR_MEMORY_MAP_FAILED;
+
+
+17583 switch(hAllocation->GetType())
+
+17585 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+17587 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
+17588 char *pBytes = VMA_NULL;
+17589 VkResult res = pBlock->Map(
this, 1, (
void**)&pBytes);
+17590 if(res == VK_SUCCESS)
+
+17592 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
+17593 hAllocation->BlockAllocMap();
+
+
+
+17597 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+17598 return hAllocation->DedicatedAllocMap(
this, ppData);
+
+
+17601 return VK_ERROR_MEMORY_MAP_FAILED;
+
+
+
+
+
+17607 switch(hAllocation->GetType())
+
+17609 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+17611 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
+17612 hAllocation->BlockAllocUnmap();
+17613 pBlock->Unmap(
this, 1);
+
+
+17616 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+17617 hAllocation->DedicatedAllocUnmap(
this);
+
+
+
+
+
-
-17625 VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
-
-17627 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
-17629 m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
-17630 m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
-17631 m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
-
-
-17634 if(m_Budget.m_VulkanBudget[heapIndex] == 0)
-
-17636 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10;
-
-17638 else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
-
-17640 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
-
-17642 if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
-
-17644 m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-
-
-17647 m_Budget.m_OperationsSinceBudgetFetch = 0;
-
-
-
-
-
-17653 void VmaAllocator_T::FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern)
+17624 VkResult VmaAllocator_T::BindBufferMemory(
+
+17626 VkDeviceSize allocationLocalOffset,
+
+
+
+17630 VkResult res = VK_SUCCESS;
+17631 switch(hAllocation->GetType())
+
+17633 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+17634 res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
+
+17636 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+17638 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
+17639 VMA_ASSERT(pBlock &&
"Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
+17640 res = pBlock->BindBufferMemory(
this, hAllocation, allocationLocalOffset, hBuffer, pNext);
+
+
+
+
+
+
+
+
+17649 VkResult VmaAllocator_T::BindImageMemory(
+
+17651 VkDeviceSize allocationLocalOffset,
+
+
-17655 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
-17656 !hAllocation->CanBecomeLost() &&
-17657 (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
-17659 void* pData = VMA_NULL;
-17660 VkResult res = Map(hAllocation, &pData);
-17661 if(res == VK_SUCCESS)
-
-17663 memset(pData, (
int)pattern, (
size_t)hAllocation->GetSize());
-17664 FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
-17665 Unmap(hAllocation);
-
-
-
-17669 VMA_ASSERT(0 &&
"VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
-
-
+17655 VkResult res = VK_SUCCESS;
+17656 switch(hAllocation->GetType())
+
+17658 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+17659 res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
+
+17661 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+17663 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+17664 VMA_ASSERT(pBlock &&
"Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
+17665 res = pBlock->BindImageMemory(
this, hAllocation, allocationLocalOffset, hImage, pNext);
+
+
+
+
+
+
-17674 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
-
-17676 uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
-17677 if(memoryTypeBits == UINT32_MAX)
-
-17679 memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
-17680 m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
-
-17682 return memoryTypeBits;
-
-
-17685 #if VMA_STATS_STRING_ENABLED
-
-17687 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
-
-17689 bool dedicatedAllocationsStarted =
false;
-17690 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-17692 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-17693 AllocationVectorType*
const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-17694 VMA_ASSERT(pDedicatedAllocVector);
-17695 if(pDedicatedAllocVector->empty() ==
false)
-
-17697 if(dedicatedAllocationsStarted ==
false)
-
-17699 dedicatedAllocationsStarted =
true;
-17700 json.WriteString(
"DedicatedAllocations");
-17701 json.BeginObject();
-
-
-17704 json.BeginString(
"Type ");
-17705 json.ContinueString(memTypeIndex);
-
-
-
+17674 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
+
+17676 VkDeviceSize offset, VkDeviceSize size,
+17677 VMA_CACHE_OPERATION op)
+
+17679 VkResult res = VK_SUCCESS;
+
+17681 VkMappedMemoryRange memRange = {};
+17682 if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
+
+
+
+17686 case VMA_CACHE_FLUSH:
+17687 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
+17689 case VMA_CACHE_INVALIDATE:
+17690 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
+
+
+
+
+
+
+
+
+17700 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
+17701 uint32_t allocationCount,
+
+17703 const VkDeviceSize* offsets,
const VkDeviceSize* sizes,
+17704 VMA_CACHE_OPERATION op)
+
+17706 typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
+17707 typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
+17708 RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
-17710 for(
size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
-
-17712 json.BeginObject(
true);
-
-17714 hAlloc->PrintParameters(json);
-
-
-
-
+17710 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
+
+17713 const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
+17714 const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
+17715 VkMappedMemoryRange newRange;
+17716 if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
+
+17718 ranges.push_back(newRange);
-17721 if(dedicatedAllocationsStarted)
-
-
-
-
-
-17727 bool allocationsStarted =
false;
-17728 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
-17730 if(m_pBlockVectors[memTypeIndex]->IsEmpty() ==
false)
-
-17732 if(allocationsStarted ==
false)
-
-17734 allocationsStarted =
true;
-17735 json.WriteString(
"DefaultPools");
-17736 json.BeginObject();
-
-
-17739 json.BeginString(
"Type ");
-17740 json.ContinueString(memTypeIndex);
-
-
-17743 m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
-
-
-17746 if(allocationsStarted)
-
-
-
+
+17722 VkResult res = VK_SUCCESS;
+17723 if(!ranges.empty())
+
+
+
+17727 case VMA_CACHE_FLUSH:
+17728 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+
+17730 case VMA_CACHE_INVALIDATE:
+17731 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+
+
+
+
+
+
+
+
+
+17741 void VmaAllocator_T::FreeDedicatedMemory(
const VmaAllocation allocation)
+
+17743 VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+
+17745 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
+17747 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+17748 DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
+17749 dedicatedAllocations.Remove(allocation);
-
-
-17754 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-17755 const size_t poolCount = m_Pools.size();
-
-
-17758 json.WriteString(
"Pools");
-17759 json.BeginObject();
-17760 for(
size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
-17762 json.BeginString();
-17763 json.ContinueString(m_Pools[poolIndex]->GetId());
-
+17752 VkDeviceMemory hMemory = allocation->GetMemory();
+
+
+
+
+
+
+
+
+
+
+
+17764 FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
-17766 m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-17782 VMA_ASSERT(pCreateInfo && pAllocator);
-
-
-17785 VMA_DEBUG_LOG(
"vmaCreateAllocator");
-
-17787 return (*pAllocator)->Init(pCreateInfo);
-
-
-
-
-
-17793 if(allocator != VK_NULL_HANDLE)
-
-17795 VMA_DEBUG_LOG(
"vmaDestroyAllocator");
-17796 VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
-17797 vma_delete(&allocationCallbacks, allocator);
-
-
+17766 VMA_DEBUG_LOG(
" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
+
+
+17769 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits()
const
+
+17771 VkBufferCreateInfo dummyBufCreateInfo;
+17772 VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
+
+17774 uint32_t memoryTypeBits = 0;
+
+
+17777 VkBuffer buf = VK_NULL_HANDLE;
+17778 VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
+17779 m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
+17780 if(res == VK_SUCCESS)
+
+
+17783 VkMemoryRequirements memReq;
+17784 (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
+17785 memoryTypeBits = memReq.memoryTypeBits;
+
+
+17788 (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
+
+
+17791 return memoryTypeBits;
+
+
+17794 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits()
const
+
+
+17797 VMA_ASSERT(GetMemoryTypeCount() > 0);
+
+17799 uint32_t memoryTypeBits = UINT32_MAX;
-
-
-17803 VMA_ASSERT(allocator && pAllocatorInfo);
-17804 pAllocatorInfo->
instance = allocator->m_hInstance;
-17805 pAllocatorInfo->
physicalDevice = allocator->GetPhysicalDevice();
-17806 pAllocatorInfo->
device = allocator->m_hDevice;
-
-
-
-
-17811 const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
-17813 VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
-17814 *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
-
-
-
-
-17819 const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
-
-17821 VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
-17822 *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
-
-
-
-
-17827 uint32_t memoryTypeIndex,
-17828 VkMemoryPropertyFlags* pFlags)
-
-17830 VMA_ASSERT(allocator && pFlags);
-17831 VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
-17832 *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
-
-
-
-
-17837 uint32_t frameIndex)
-
-17839 VMA_ASSERT(allocator);
-17840 VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
-
-17842 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-17844 allocator->SetCurrentFrameIndex(frameIndex);
-
-
-
-
-
-
-17851 VMA_ASSERT(allocator && pStats);
-17852 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-17853 allocator->CalculateStats(pStats);
-
-
-
-
-
-
-17860 VMA_ASSERT(allocator && pBudget);
-17861 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-17862 allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
-
-
-17865 #if VMA_STATS_STRING_ENABLED
-
-
-
-17869 char** ppStatsString,
-17870 VkBool32 detailedMap)
-
-17872 VMA_ASSERT(allocator && ppStatsString);
-17873 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-17875 VmaStringBuilder sb(allocator);
-
-17877 VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
-17878 json.BeginObject();
-
-
-17881 allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
-
-
-17884 allocator->CalculateStats(&stats);
-
-17886 json.WriteString(
"Total");
-17887 VmaPrintStatInfo(json, stats.
total);
-
-17889 for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
-
-17891 json.BeginString(
"Heap ");
-17892 json.ContinueString(heapIndex);
-
-17894 json.BeginObject();
-
-17896 json.WriteString(
"Size");
-17897 json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
-
-17899 json.WriteString(
"Flags");
-17900 json.BeginArray(
true);
-17901 if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
-
-17903 json.WriteString(
"DEVICE_LOCAL");
-
-
-
-17907 json.WriteString(
"Budget");
-17908 json.BeginObject();
-
-17910 json.WriteString(
"BlockBytes");
-17911 json.WriteNumber(budget[heapIndex].blockBytes);
-17912 json.WriteString(
"AllocationBytes");
-17913 json.WriteNumber(budget[heapIndex].allocationBytes);
-17914 json.WriteString(
"Usage");
-17915 json.WriteNumber(budget[heapIndex].usage);
-17916 json.WriteString(
"Budget");
-17917 json.WriteNumber(budget[heapIndex].budget);
-
-
+17801 if(!m_UseAmdDeviceCoherentMemory)
+
+
+17804 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+17806 if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+
+17808 memoryTypeBits &= ~(1u << memTypeIndex);
+
+
+
+
+17813 return memoryTypeBits;
+
+
+17816 bool VmaAllocator_T::GetFlushOrInvalidateRange(
+
+17818 VkDeviceSize offset, VkDeviceSize size,
+17819 VkMappedMemoryRange& outRange)
const
+
+17821 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+17822 if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
+
+17824 const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+17825 const VkDeviceSize allocationSize = allocation->GetSize();
+17826 VMA_ASSERT(offset <= allocationSize);
+
+17828 outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+17829 outRange.pNext = VMA_NULL;
+17830 outRange.memory = allocation->GetMemory();
+
+17832 switch(allocation->GetType())
+
+17834 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+17835 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+17836 if(size == VK_WHOLE_SIZE)
+
+17838 outRange.size = allocationSize - outRange.offset;
+
+
+
+17842 VMA_ASSERT(offset + size <= allocationSize);
+17843 outRange.size = VMA_MIN(
+17844 VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
+17845 allocationSize - outRange.offset);
+
+
+17848 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
+
+17851 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+17852 if(size == VK_WHOLE_SIZE)
+
+17854 size = allocationSize - offset;
+
+
+
+17858 VMA_ASSERT(offset + size <= allocationSize);
+
+17860 outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
+
+
+17863 const VkDeviceSize allocationOffset = allocation->GetOffset();
+17864 VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+17865 const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
+17866 outRange.offset += allocationOffset;
+17867 outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
+
+
+
+
+
+
+
+
+
+
+
+17879 #if VMA_MEMORY_BUDGET
+
+17881 void VmaAllocator_T::UpdateVulkanBudget()
+
+17883 VMA_ASSERT(m_UseExtMemoryBudget);
+
+17885 VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+
+17887 VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
+17888 VmaPnextChainPushFront(&memProps, &budgetProps);
+
+17890 GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
+
+
+17893 VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
+
+17895 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
+17897 m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
+17898 m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
+17899 m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
+
+
+17902 if(m_Budget.m_VulkanBudget[heapIndex] == 0)
+
+17904 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10;
+
+17906 else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
+
+17908 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
+
+17910 if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
+
+17912 m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+
+
+17915 m_Budget.m_OperationsSinceBudgetFetch = 0;
+
+
+
+
-
-
-17923 json.WriteString(
"Stats");
-17924 VmaPrintStatInfo(json, stats.
memoryHeap[heapIndex]);
-
-
-17927 for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
-
-17929 if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
-
-17931 json.BeginString(
"Type ");
-17932 json.ContinueString(typeIndex);
-
-
-17935 json.BeginObject();
-
-17937 json.WriteString(
"Flags");
-17938 json.BeginArray(
true);
-17939 VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
-17940 if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
-
-17942 json.WriteString(
"DEVICE_LOCAL");
-
-17944 if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
-17946 json.WriteString(
"HOST_VISIBLE");
-
-17948 if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
-
-17950 json.WriteString(
"HOST_COHERENT");
-
-17952 if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
-
-17954 json.WriteString(
"HOST_CACHED");
-
-17956 if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
-
-17958 json.WriteString(
"LAZILY_ALLOCATED");
-
-17960 if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
-
-17962 json.WriteString(
" PROTECTED");
-
-17964 if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-
-17966 json.WriteString(
" DEVICE_COHERENT");
-
-17968 if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
-
-17970 json.WriteString(
" DEVICE_UNCACHED");
-
-
-
-
-
-17976 json.WriteString(
"Stats");
-17977 VmaPrintStatInfo(json, stats.
memoryType[typeIndex]);
-
-
-
-
-
-
-
-
-17986 if(detailedMap == VK_TRUE)
-
-17988 allocator->PrintDetailedMap(json);
-
-
-
-
-
-17994 const size_t len = sb.GetLength();
-17995 char*
const pChars = vma_new_array(allocator,
char, len + 1);
-
-
-17998 memcpy(pChars, sb.GetData(), len);
-
-18000 pChars[len] =
'\0';
-18001 *ppStatsString = pChars;
-
-
-
-
-18006 char* pStatsString)
-
-18008 if(pStatsString != VMA_NULL)
-
-18010 VMA_ASSERT(allocator);
-18011 size_t len = strlen(pStatsString);
-18012 vma_delete_array(allocator, pStatsString, len + 1);
-
-
-
-
-
-
-
-
-
-
-18023 uint32_t memoryTypeBits,
-
-18025 uint32_t* pMemoryTypeIndex)
-
-18027 VMA_ASSERT(allocator != VK_NULL_HANDLE);
-18028 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-18029 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
-18031 memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
-
-
-
-
+17921 void VmaAllocator_T::FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern)
+
+17923 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
+17924 !hAllocation->CanBecomeLost() &&
+17925 (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
+17927 void* pData = VMA_NULL;
+17928 VkResult res = Map(hAllocation, &pData);
+17929 if(res == VK_SUCCESS)
+
+17931 memset(pData, (
int)pattern, (
size_t)hAllocation->GetSize());
+17932 FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
+17933 Unmap(hAllocation);
+
+
+
+17937 VMA_ASSERT(0 &&
"VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
+
+
+
+
+17942 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
+
+17944 uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
+17945 if(memoryTypeBits == UINT32_MAX)
+
+17947 memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
+17948 m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
+
+17950 return memoryTypeBits;
+
+
+17953 #if VMA_STATS_STRING_ENABLED
+
+17955 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
+
+17957 bool dedicatedAllocationsStarted =
false;
+17958 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+17960 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+17961 DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
+17962 if(!dedicatedAllocList.IsEmpty())
+
+17964 if(dedicatedAllocationsStarted ==
false)
+
+17966 dedicatedAllocationsStarted =
true;
+17967 json.WriteString(
"DedicatedAllocations");
+17968 json.BeginObject();
+
+
+17971 json.BeginString(
"Type ");
+17972 json.ContinueString(memTypeIndex);
+
+
+
+
+
+17978 alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
+
+17980 json.BeginObject(
true);
+17981 alloc->PrintParameters(json);
+
+
+
+
+
+
+17988 if(dedicatedAllocationsStarted)
+
+
+
+
+
+17994 bool allocationsStarted =
false;
+17995 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
+17997 if(m_pBlockVectors[memTypeIndex]->IsEmpty() ==
false)
+
+17999 if(allocationsStarted ==
false)
+
+18001 allocationsStarted =
true;
+18002 json.WriteString(
"DefaultPools");
+18003 json.BeginObject();
+
+
+18006 json.BeginString(
"Type ");
+18007 json.ContinueString(memTypeIndex);
+
+
+18010 m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+
+
+18013 if(allocationsStarted)
+
+
+
+
+
+
+
+18021 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+18022 if(!m_Pools.IsEmpty())
+
+18024 json.WriteString(
"Pools");
+18025 json.BeginObject();
+18026 for(
VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+
+18028 json.BeginString();
+18029 json.ContinueString(pool->GetId());
+
+
+18032 pool->m_BlockVector.PrintDetailedMap(json);
+
+
+
-
-18038 uint32_t requiredFlags = pAllocationCreateInfo->
requiredFlags;
-18039 uint32_t preferredFlags = pAllocationCreateInfo->
preferredFlags;
-18040 uint32_t notPreferredFlags = 0;
-
-
-18043 switch(pAllocationCreateInfo->
usage)
-
-
-
-
-18048 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
-18050 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
-
-
-18054 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-
-
-18057 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-18058 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
-18060 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
-
-
-18064 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-18065 preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-
-
-18068 notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
-
-18071 requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
-
-
-
-
-
-
-
-
-18080 (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
-
-18082 notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
-
-
-18085 *pMemoryTypeIndex = UINT32_MAX;
-18086 uint32_t minCost = UINT32_MAX;
-18087 for(uint32_t memTypeIndex = 0, memTypeBit = 1;
-18088 memTypeIndex < allocator->GetMemoryTypeCount();
-18089 ++memTypeIndex, memTypeBit <<= 1)
-
-
-18092 if((memTypeBit & memoryTypeBits) != 0)
-
-18094 const VkMemoryPropertyFlags currFlags =
-18095 allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-
-18097 if((requiredFlags & ~currFlags) == 0)
-
-
-18100 uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
-18101 VmaCountBitsSet(currFlags & notPreferredFlags);
-
-18103 if(currCost < minCost)
-
-18105 *pMemoryTypeIndex = memTypeIndex;
-
-
-
-
-18110 minCost = currCost;
-
-
-
-
-18115 return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
-
-
-
-
-18120 const VkBufferCreateInfo* pBufferCreateInfo,
-
-18122 uint32_t* pMemoryTypeIndex)
-
-18124 VMA_ASSERT(allocator != VK_NULL_HANDLE);
-18125 VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
-18126 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-18127 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
-18129 const VkDevice hDev = allocator->m_hDevice;
-18130 VkBuffer hBuffer = VK_NULL_HANDLE;
-18131 VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
-18132 hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
-18133 if(res == VK_SUCCESS)
-
-18135 VkMemoryRequirements memReq = {};
-18136 allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
-18137 hDev, hBuffer, &memReq);
-
-
-
-18141 memReq.memoryTypeBits,
-18142 pAllocationCreateInfo,
-
-
-18145 allocator->GetVulkanFunctions().vkDestroyBuffer(
-18146 hDev, hBuffer, allocator->GetAllocationCallbacks());
-
-
-
-
-
-
-18153 const VkImageCreateInfo* pImageCreateInfo,
-
-18155 uint32_t* pMemoryTypeIndex)
-
-18157 VMA_ASSERT(allocator != VK_NULL_HANDLE);
-18158 VMA_ASSERT(pImageCreateInfo != VMA_NULL);
-18159 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-18160 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+
+
+
+
+
+
+
+
+
+18048 VMA_ASSERT(pCreateInfo && pAllocator);
+
+
+18051 VMA_DEBUG_LOG(
"vmaCreateAllocator");
+
+18053 return (*pAllocator)->Init(pCreateInfo);
+
+
+
+
+
+18059 if(allocator != VK_NULL_HANDLE)
+
+18061 VMA_DEBUG_LOG(
"vmaDestroyAllocator");
+18062 VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
+18063 vma_delete(&allocationCallbacks, allocator);
+
+
+
+
+
+18069 VMA_ASSERT(allocator && pAllocatorInfo);
+18070 pAllocatorInfo->
instance = allocator->m_hInstance;
+18071 pAllocatorInfo->
physicalDevice = allocator->GetPhysicalDevice();
+18072 pAllocatorInfo->
device = allocator->m_hDevice;
+
+
+
+
+18077 const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
+
+18079 VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
+18080 *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
+
+
+
+
+18085 const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
+
+18087 VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
+18088 *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
+
+
+
+
+18093 uint32_t memoryTypeIndex,
+18094 VkMemoryPropertyFlags* pFlags)
+
+18096 VMA_ASSERT(allocator && pFlags);
+18097 VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
+18098 *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
+
+
+
+
+18103 uint32_t frameIndex)
+
+18105 VMA_ASSERT(allocator);
+18106 VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
+
+18108 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18110 allocator->SetCurrentFrameIndex(frameIndex);
+
+
+
+
+
+
+18117 VMA_ASSERT(allocator && pStats);
+18118 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+18119 allocator->CalculateStats(pStats);
+
+
+
+
+
+
+18126 VMA_ASSERT(allocator && pBudget);
+18127 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+18128 allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
+
+
+18131 #if VMA_STATS_STRING_ENABLED
+
+
+
+18135 char** ppStatsString,
+18136 VkBool32 detailedMap)
+
+18138 VMA_ASSERT(allocator && ppStatsString);
+18139 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18141 VmaStringBuilder sb(allocator);
+
+18143 VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
+18144 json.BeginObject();
+
+
+18147 allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
+
+
+18150 allocator->CalculateStats(&stats);
+
+18152 json.WriteString(
"Total");
+18153 VmaPrintStatInfo(json, stats.
total);
+
+18155 for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
+
+18157 json.BeginString(
"Heap ");
+18158 json.ContinueString(heapIndex);
+
+18160 json.BeginObject();
-18162 const VkDevice hDev = allocator->m_hDevice;
-18163 VkImage hImage = VK_NULL_HANDLE;
-18164 VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
-18165 hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
-18166 if(res == VK_SUCCESS)
-
-18168 VkMemoryRequirements memReq = {};
-18169 allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
-18170 hDev, hImage, &memReq);
-
-
-
-18174 memReq.memoryTypeBits,
-18175 pAllocationCreateInfo,
-
-
-18178 allocator->GetVulkanFunctions().vkDestroyImage(
-18179 hDev, hImage, allocator->GetAllocationCallbacks());
-
-
-
-
-
-
-
-
-
-18189 VMA_ASSERT(allocator && pCreateInfo && pPool);
-
-18191 VMA_DEBUG_LOG(
"vmaCreatePool");
+18162 json.WriteString(
"Size");
+18163 json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
+
+18165 json.WriteString(
"Flags");
+18166 json.BeginArray(
true);
+18167 if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
+
+18169 json.WriteString(
"DEVICE_LOCAL");
+
+
+
+18173 json.WriteString(
"Budget");
+18174 json.BeginObject();
+
+18176 json.WriteString(
"BlockBytes");
+18177 json.WriteNumber(budget[heapIndex].blockBytes);
+18178 json.WriteString(
"AllocationBytes");
+18179 json.WriteNumber(budget[heapIndex].allocationBytes);
+18180 json.WriteString(
"Usage");
+18181 json.WriteNumber(budget[heapIndex].usage);
+18182 json.WriteString(
"Budget");
+18183 json.WriteNumber(budget[heapIndex].budget);
+
+
+
+
+
+18189 json.WriteString(
"Stats");
+18190 VmaPrintStatInfo(json, stats.
memoryHeap[heapIndex]);
+
-18193 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18195 VkResult res = allocator->CreatePool(pCreateInfo, pPool);
-
-18197 #if VMA_RECORDING_ENABLED
-18198 if(allocator->GetRecorder() != VMA_NULL)
-
-18200 allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
-
-
-
-
-
-
-
-
-
-
-18211 VMA_ASSERT(allocator);
-
-18213 if(pool == VK_NULL_HANDLE)
-
-
-
-
-18218 VMA_DEBUG_LOG(
"vmaDestroyPool");
-
-18220 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18222 #if VMA_RECORDING_ENABLED
-18223 if(allocator->GetRecorder() != VMA_NULL)
-
-18225 allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
-
-
-
-18229 allocator->DestroyPool(pool);
-
-
-
-
-
-
-
-18237 VMA_ASSERT(allocator && pool && pPoolStats);
-
-18239 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18241 allocator->GetPoolStats(pool, pPoolStats);
-
+18193 for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
+
+18195 if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
+
+18197 json.BeginString(
"Type ");
+18198 json.ContinueString(typeIndex);
+
+
+18201 json.BeginObject();
+
+18203 json.WriteString(
"Flags");
+18204 json.BeginArray(
true);
+18205 VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
+18206 if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
+
+18208 json.WriteString(
"DEVICE_LOCAL");
+
+18210 if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
+18212 json.WriteString(
"HOST_VISIBLE");
+
+18214 if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
+
+18216 json.WriteString(
"HOST_COHERENT");
+
+18218 if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
+
+18220 json.WriteString(
"HOST_CACHED");
+
+18222 if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
+
+18224 json.WriteString(
"LAZILY_ALLOCATED");
+
+18226 #if VMA_VULKAN_VERSION >= 1001000
+18227 if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
+
+18229 json.WriteString(
"PROTECTED");
+
+
+18232 #if VK_AMD_device_coherent_memory
+18233 if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+
+18235 json.WriteString(
"DEVICE_COHERENT");
+
+18237 if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
+
+18239 json.WriteString(
"DEVICE_UNCACHED");
+
+
+
-
-
-
-18247 size_t* pLostAllocationCount)
-
-18249 VMA_ASSERT(allocator && pool);
-
-18251 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18253 #if VMA_RECORDING_ENABLED
-18254 if(allocator->GetRecorder() != VMA_NULL)
-
-18256 allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
-
-
-
-18260 allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
-
-
-
-
-18265 VMA_ASSERT(allocator && pool);
-
-18267 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18269 VMA_DEBUG_LOG(
"vmaCheckPoolCorruption");
-
-18271 return allocator->CheckPoolCorruption(pool);
+
+
+18246 json.WriteString(
"Stats");
+18247 VmaPrintStatInfo(json, stats.
memoryType[typeIndex]);
+
+
+
+
+
+
+
+
+18256 if(detailedMap == VK_TRUE)
+
+18258 allocator->PrintDetailedMap(json);
+
+
+
+
+
+18264 const size_t len = sb.GetLength();
+18265 char*
const pChars = vma_new_array(allocator,
char, len + 1);
+
+
+18268 memcpy(pChars, sb.GetData(), len);
+
+18270 pChars[len] =
'\0';
+18271 *ppStatsString = pChars;
-
+
-
-18277 const char** ppName)
-
-18279 VMA_ASSERT(allocator && pool && ppName);
-
-18281 VMA_DEBUG_LOG(
"vmaGetPoolName");
-
-18283 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18285 *ppName = pool->GetName();
-
+18276 char* pStatsString)
+
+18278 if(pStatsString != VMA_NULL)
+
+18280 VMA_ASSERT(allocator);
+18281 size_t len = strlen(pStatsString);
+18282 vma_delete_array(allocator, pStatsString, len + 1);
+
+
+
+
-
-
-
-
-
-18293 VMA_ASSERT(allocator && pool);
-
-18295 VMA_DEBUG_LOG(
"vmaSetPoolName");
-
-18297 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18299 pool->SetName(pName);
+
+
+
+
+
+18293 uint32_t memoryTypeBits,
+
+18295 uint32_t* pMemoryTypeIndex)
+
+18297 VMA_ASSERT(allocator != VK_NULL_HANDLE);
+18298 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+18299 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-18301 #if VMA_RECORDING_ENABLED
-18302 if(allocator->GetRecorder() != VMA_NULL)
-
-18304 allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
-
-
-
-
-
-
-18311 const VkMemoryRequirements* pVkMemoryRequirements,
-
-
-
-
-18316 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
-
-18318 VMA_DEBUG_LOG(
"vmaAllocateMemory");
-
-18320 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18322 VkResult result = allocator->AllocateMemory(
-18323 *pVkMemoryRequirements,
-
-
-
-
-
-
-18330 VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
-
-
-18334 #if VMA_RECORDING_ENABLED
-18335 if(allocator->GetRecorder() != VMA_NULL)
-
-18337 allocator->GetRecorder()->RecordAllocateMemory(
-18338 allocator->GetCurrentFrameIndex(),
-18339 *pVkMemoryRequirements,
-
-
-
-
-
-18345 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
-18347 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
-
-
-
-
-
-
-18355 const VkMemoryRequirements* pVkMemoryRequirements,
-
-18357 size_t allocationCount,
-
-
-
-18361 if(allocationCount == 0)
-
-
-
-
-18366 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
-
-18368 VMA_DEBUG_LOG(
"vmaAllocateMemoryPages");
-
-18370 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18372 VkResult result = allocator->AllocateMemory(
-18373 *pVkMemoryRequirements,
-
-
-
-
-
-
-18380 VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
-
-
-18384 #if VMA_RECORDING_ENABLED
-18385 if(allocator->GetRecorder() != VMA_NULL)
-
-18387 allocator->GetRecorder()->RecordAllocateMemoryPages(
-18388 allocator->GetCurrentFrameIndex(),
-18389 *pVkMemoryRequirements,
-
-18391 (uint64_t)allocationCount,
-
-
-
-
-18396 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
-18398 for(
size_t i = 0; i < allocationCount; ++i)
-
-18400 allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
-
-
-
-
-
-
-
-
-
-
-
-
-
-18414 VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
-18416 VMA_DEBUG_LOG(
"vmaAllocateMemoryForBuffer");
-
-18418 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18420 VkMemoryRequirements vkMemReq = {};
-18421 bool requiresDedicatedAllocation =
false;
-18422 bool prefersDedicatedAllocation =
false;
-18423 allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
-18424 requiresDedicatedAllocation,
-18425 prefersDedicatedAllocation);
-
-18427 VkResult result = allocator->AllocateMemory(
-
-18429 requiresDedicatedAllocation,
-18430 prefersDedicatedAllocation,
-
-
-
-
-18435 VMA_SUBALLOCATION_TYPE_BUFFER,
-
-
-
-18439 #if VMA_RECORDING_ENABLED
-18440 if(allocator->GetRecorder() != VMA_NULL)
-
-18442 allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
-18443 allocator->GetCurrentFrameIndex(),
-
-18445 requiresDedicatedAllocation,
-18446 prefersDedicatedAllocation,
-
-
-
-
-
-18452 if(pAllocationInfo && result == VK_SUCCESS)
-
-18454 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
-
-
-
-
-
-
-
-
-
-
-
-18467 VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
-18469 VMA_DEBUG_LOG(
"vmaAllocateMemoryForImage");
-
-18471 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18473 VkMemoryRequirements vkMemReq = {};
-18474 bool requiresDedicatedAllocation =
false;
-18475 bool prefersDedicatedAllocation =
false;
-18476 allocator->GetImageMemoryRequirements(image, vkMemReq,
-18477 requiresDedicatedAllocation, prefersDedicatedAllocation);
-
-18479 VkResult result = allocator->AllocateMemory(
-
-18481 requiresDedicatedAllocation,
-18482 prefersDedicatedAllocation,
-
-
-
-
-18487 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
-
-
-
-18491 #if VMA_RECORDING_ENABLED
-18492 if(allocator->GetRecorder() != VMA_NULL)
-
-18494 allocator->GetRecorder()->RecordAllocateMemoryForImage(
-18495 allocator->GetCurrentFrameIndex(),
-
-18497 requiresDedicatedAllocation,
-18498 prefersDedicatedAllocation,
-
-
-
-
-
-18504 if(pAllocationInfo && result == VK_SUCCESS)
-
-18506 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
+18301 memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
+
+
+
+
+
+
+18308 uint32_t requiredFlags = pAllocationCreateInfo->
requiredFlags;
+18309 uint32_t preferredFlags = pAllocationCreateInfo->
preferredFlags;
+18310 uint32_t notPreferredFlags = 0;
+
+
+18313 switch(pAllocationCreateInfo->
usage)
+
+
+
+
+18318 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
+18320 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+
+
+18324 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
+
+18327 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+18328 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
+18330 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+
+
+18334 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+18335 preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+
+
+18338 notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+
+18341 requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+
+
+
+
+
+
+
+
+18350 (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
+
+18352 notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
+
+
+18355 *pMemoryTypeIndex = UINT32_MAX;
+18356 uint32_t minCost = UINT32_MAX;
+18357 for(uint32_t memTypeIndex = 0, memTypeBit = 1;
+18358 memTypeIndex < allocator->GetMemoryTypeCount();
+18359 ++memTypeIndex, memTypeBit <<= 1)
+
+
+18362 if((memTypeBit & memoryTypeBits) != 0)
+
+18364 const VkMemoryPropertyFlags currFlags =
+18365 allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+
+18367 if((requiredFlags & ~currFlags) == 0)
+
+
+18370 uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
+18371 VmaCountBitsSet(currFlags & notPreferredFlags);
+
+18373 if(currCost < minCost)
+
+18375 *pMemoryTypeIndex = memTypeIndex;
+
+
+
+
+18380 minCost = currCost;
+
+
+
+
+18385 return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
+
+
+
+
+18390 const VkBufferCreateInfo* pBufferCreateInfo,
+
+18392 uint32_t* pMemoryTypeIndex)
+
+18394 VMA_ASSERT(allocator != VK_NULL_HANDLE);
+18395 VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
+18396 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+18397 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+18399 const VkDevice hDev = allocator->m_hDevice;
+18400 VkBuffer hBuffer = VK_NULL_HANDLE;
+18401 VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
+18402 hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
+18403 if(res == VK_SUCCESS)
+
+18405 VkMemoryRequirements memReq = {};
+18406 allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
+18407 hDev, hBuffer, &memReq);
+
+
+
+18411 memReq.memoryTypeBits,
+18412 pAllocationCreateInfo,
+
+
+18415 allocator->GetVulkanFunctions().vkDestroyBuffer(
+18416 hDev, hBuffer, allocator->GetAllocationCallbacks());
+
+
+
+
+
+
+18423 const VkImageCreateInfo* pImageCreateInfo,
+
+18425 uint32_t* pMemoryTypeIndex)
+
+18427 VMA_ASSERT(allocator != VK_NULL_HANDLE);
+18428 VMA_ASSERT(pImageCreateInfo != VMA_NULL);
+18429 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+18430 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+18432 const VkDevice hDev = allocator->m_hDevice;
+18433 VkImage hImage = VK_NULL_HANDLE;
+18434 VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
+18435 hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
+18436 if(res == VK_SUCCESS)
+
+18438 VkMemoryRequirements memReq = {};
+18439 allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
+18440 hDev, hImage, &memReq);
+
+
+
+18444 memReq.memoryTypeBits,
+18445 pAllocationCreateInfo,
+
+
+18448 allocator->GetVulkanFunctions().vkDestroyImage(
+18449 hDev, hImage, allocator->GetAllocationCallbacks());
+
+
+
+
+
+
+
+
+
+18459 VMA_ASSERT(allocator && pCreateInfo && pPool);
+
+18461 VMA_DEBUG_LOG(
"vmaCreatePool");
+
+18463 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18465 VkResult res = allocator->CreatePool(pCreateInfo, pPool);
+
+18467 #if VMA_RECORDING_ENABLED
+18468 if(allocator->GetRecorder() != VMA_NULL)
+
+18470 allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
+
+
+
+
+
+
+
+
+
+
+18481 VMA_ASSERT(allocator);
+
+18483 if(pool == VK_NULL_HANDLE)
+
+
+
+
+18488 VMA_DEBUG_LOG(
"vmaDestroyPool");
+
+18490 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18492 #if VMA_RECORDING_ENABLED
+18493 if(allocator->GetRecorder() != VMA_NULL)
+
+18495 allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
+
+
+
+18499 allocator->DestroyPool(pool);
+
+
+
+
+
+
+
+18507 VMA_ASSERT(allocator && pool && pPoolStats);
-
-
-
-
-
-
-
-18516 VMA_ASSERT(allocator);
-
-18518 if(allocation == VK_NULL_HANDLE)
-
-
-
+18509 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18511 allocator->GetPoolStats(pool, pPoolStats);
+
+
+
+
+
+18517 size_t* pLostAllocationCount)
+
+18519 VMA_ASSERT(allocator && pool);
+
+18521 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-18523 VMA_DEBUG_LOG(
"vmaFreeMemory");
-
-18525 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18527 #if VMA_RECORDING_ENABLED
-18528 if(allocator->GetRecorder() != VMA_NULL)
-
-18530 allocator->GetRecorder()->RecordFreeMemory(
-18531 allocator->GetCurrentFrameIndex(),
-
-
-
-
-18536 allocator->FreeMemory(
-
-
-
+18523 #if VMA_RECORDING_ENABLED
+18524 if(allocator->GetRecorder() != VMA_NULL)
+
+18526 allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
+
+
+
+18530 allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
+
+
+
+
+18535 VMA_ASSERT(allocator && pool);
+
+18537 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18539 VMA_DEBUG_LOG(
"vmaCheckPoolCorruption");
-
-
-18543 size_t allocationCount,
-
-
-18546 if(allocationCount == 0)
-
-
-
+18541 return allocator->CheckPoolCorruption(pool);
+
+
+
+
+
+18547 const char** ppName)
+
+18549 VMA_ASSERT(allocator && pool && ppName);
-18551 VMA_ASSERT(allocator);
+18551 VMA_DEBUG_LOG(
"vmaGetPoolName");
-18553 VMA_DEBUG_LOG(
"vmaFreeMemoryPages");
+18553 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-18555 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18557 #if VMA_RECORDING_ENABLED
-18558 if(allocator->GetRecorder() != VMA_NULL)
-
-18560 allocator->GetRecorder()->RecordFreeMemoryPages(
-18561 allocator->GetCurrentFrameIndex(),
-18562 (uint64_t)allocationCount,
-
-
-
+18555 *ppName = pool->GetName();
+
+
+
+
+
+
+
+18563 VMA_ASSERT(allocator && pool);
+
+18565 VMA_DEBUG_LOG(
"vmaSetPoolName");
-18567 allocator->FreeMemory(allocationCount, pAllocations);
-
-
-
-
-
-18573 VkDeviceSize newSize)
-
-18575 VMA_ASSERT(allocator && allocation);
-
-18577 VMA_DEBUG_LOG(
"vmaResizeAllocation");
+18567 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18569 pool->SetName(pName);
+
+18571 #if VMA_RECORDING_ENABLED
+18572 if(allocator->GetRecorder() != VMA_NULL)
+
+18574 allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
+
+
+
-18579 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18581 return allocator->ResizeAllocation(allocation, newSize);
-
-
-
-
-
-
-
-18589 VMA_ASSERT(allocator && allocation && pAllocationInfo);
-
-18591 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18593 #if VMA_RECORDING_ENABLED
-18594 if(allocator->GetRecorder() != VMA_NULL)
-
-18596 allocator->GetRecorder()->RecordGetAllocationInfo(
-18597 allocator->GetCurrentFrameIndex(),
-
-
-
-
-18602 allocator->GetAllocationInfo(allocation, pAllocationInfo);
-
-
-
-
-
-
-18609 VMA_ASSERT(allocator && allocation);
-
-18611 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18613 #if VMA_RECORDING_ENABLED
-18614 if(allocator->GetRecorder() != VMA_NULL)
-
-18616 allocator->GetRecorder()->RecordTouchAllocation(
-18617 allocator->GetCurrentFrameIndex(),
-
-
-
-
-18622 return allocator->TouchAllocation(allocation);
-
-
-
-
-
-
-
-18630 VMA_ASSERT(allocator && allocation);
-
-18632 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18634 allocation->SetUserData(allocator, pUserData);
+
+
+18581 const VkMemoryRequirements* pVkMemoryRequirements,
+
+
+
+
+18586 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
+
+18588 VMA_DEBUG_LOG(
"vmaAllocateMemory");
+
+18590 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18592 VkResult result = allocator->AllocateMemory(
+18593 *pVkMemoryRequirements,
+
+
+
+
+
+
+18600 VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
+
+
+18604 #if VMA_RECORDING_ENABLED
+18605 if(allocator->GetRecorder() != VMA_NULL)
+
+18607 allocator->GetRecorder()->RecordAllocateMemory(
+18608 allocator->GetCurrentFrameIndex(),
+18609 *pVkMemoryRequirements,
+
+
+
+
+
+18615 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
+18617 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
+
+
+
+
+
+
+18625 const VkMemoryRequirements* pVkMemoryRequirements,
+
+18627 size_t allocationCount,
+
+
+
+18631 if(allocationCount == 0)
+
+
+
-18636 #if VMA_RECORDING_ENABLED
-18637 if(allocator->GetRecorder() != VMA_NULL)
-
-18639 allocator->GetRecorder()->RecordSetAllocationUserData(
-18640 allocator->GetCurrentFrameIndex(),
-
-
-
-
-
-
-
-
-
-
-18651 VMA_ASSERT(allocator && pAllocation);
-
-18653 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-
-18655 allocator->CreateLostAllocation(pAllocation);
-
-18657 #if VMA_RECORDING_ENABLED
-18658 if(allocator->GetRecorder() != VMA_NULL)
-
-18660 allocator->GetRecorder()->RecordCreateLostAllocation(
-18661 allocator->GetCurrentFrameIndex(),
-
+18636 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
+
+18638 VMA_DEBUG_LOG(
"vmaAllocateMemoryPages");
+
+18640 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18642 VkResult result = allocator->AllocateMemory(
+18643 *pVkMemoryRequirements,
+
+
+
+
+
+
+18650 VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
+
+
+18654 #if VMA_RECORDING_ENABLED
+18655 if(allocator->GetRecorder() != VMA_NULL)
+
+18657 allocator->GetRecorder()->RecordAllocateMemoryPages(
+18658 allocator->GetCurrentFrameIndex(),
+18659 *pVkMemoryRequirements,
+
+18661 (uint64_t)allocationCount,
+
-
-
-
-
-
-
-
-18672 VMA_ASSERT(allocator && allocation && ppData);
+
+18666 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
+18668 for(
size_t i = 0; i < allocationCount; ++i)
+
+18670 allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
+
+
-18674 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18676 VkResult res = allocator->Map(allocation, ppData);
-
-18678 #if VMA_RECORDING_ENABLED
-18679 if(allocator->GetRecorder() != VMA_NULL)
-
-18681 allocator->GetRecorder()->RecordMapMemory(
-18682 allocator->GetCurrentFrameIndex(),
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+18684 VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
+18686 VMA_DEBUG_LOG(
"vmaAllocateMemoryForBuffer");
+
+18688 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-
-
-
-18694 VMA_ASSERT(allocator && allocation);
-
-18696 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18698 #if VMA_RECORDING_ENABLED
-18699 if(allocator->GetRecorder() != VMA_NULL)
-
-18701 allocator->GetRecorder()->RecordUnmapMemory(
-18702 allocator->GetCurrentFrameIndex(),
-
-
-
-
-18707 allocator->Unmap(allocation);
-
-
-
-
-18712 VMA_ASSERT(allocator && allocation);
-
-18714 VMA_DEBUG_LOG(
"vmaFlushAllocation");
-
-18716 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18718 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
-
-18720 #if VMA_RECORDING_ENABLED
-18721 if(allocator->GetRecorder() != VMA_NULL)
-
-18723 allocator->GetRecorder()->RecordFlushAllocation(
-18724 allocator->GetCurrentFrameIndex(),
-18725 allocation, offset, size);
-
-
-
-
-
-
-
-
-18734 VMA_ASSERT(allocator && allocation);
-
-18736 VMA_DEBUG_LOG(
"vmaInvalidateAllocation");
-
-18738 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18740 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
-
-18742 #if VMA_RECORDING_ENABLED
-18743 if(allocator->GetRecorder() != VMA_NULL)
-
-18745 allocator->GetRecorder()->RecordInvalidateAllocation(
-18746 allocator->GetCurrentFrameIndex(),
-18747 allocation, offset, size);
-
-
-
-
-
-
-
-
-18756 uint32_t allocationCount,
-
-18758 const VkDeviceSize* offsets,
-18759 const VkDeviceSize* sizes)
-
-18761 VMA_ASSERT(allocator);
-
-18763 if(allocationCount == 0)
-
-
-
-
-18768 VMA_ASSERT(allocations);
-
-18770 VMA_DEBUG_LOG(
"vmaFlushAllocations");
-
-18772 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+18690 VkMemoryRequirements vkMemReq = {};
+18691 bool requiresDedicatedAllocation =
false;
+18692 bool prefersDedicatedAllocation =
false;
+18693 allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
+18694 requiresDedicatedAllocation,
+18695 prefersDedicatedAllocation);
+
+18697 VkResult result = allocator->AllocateMemory(
+
+18699 requiresDedicatedAllocation,
+18700 prefersDedicatedAllocation,
+
+
+
+
+18705 VMA_SUBALLOCATION_TYPE_BUFFER,
+
+
+
+18709 #if VMA_RECORDING_ENABLED
+18710 if(allocator->GetRecorder() != VMA_NULL)
+
+18712 allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
+18713 allocator->GetCurrentFrameIndex(),
+
+18715 requiresDedicatedAllocation,
+18716 prefersDedicatedAllocation,
+
+
+
+
+
+18722 if(pAllocationInfo && result == VK_SUCCESS)
+
+18724 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
+
+
+
+
+
+
+
+
+
+
+
+18737 VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
+18739 VMA_DEBUG_LOG(
"vmaAllocateMemoryForImage");
+
+18741 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18743 VkMemoryRequirements vkMemReq = {};
+18744 bool requiresDedicatedAllocation =
false;
+18745 bool prefersDedicatedAllocation =
false;
+18746 allocator->GetImageMemoryRequirements(image, vkMemReq,
+18747 requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+18749 VkResult result = allocator->AllocateMemory(
+
+18751 requiresDedicatedAllocation,
+18752 prefersDedicatedAllocation,
+
+
+
+
+18757 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
+
+
+
+18761 #if VMA_RECORDING_ENABLED
+18762 if(allocator->GetRecorder() != VMA_NULL)
+
+18764 allocator->GetRecorder()->RecordAllocateMemoryForImage(
+18765 allocator->GetCurrentFrameIndex(),
+
+18767 requiresDedicatedAllocation,
+18768 prefersDedicatedAllocation,
+
+
+
+
-18774 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
-
-18776 #if VMA_RECORDING_ENABLED
-18777 if(allocator->GetRecorder() != VMA_NULL)
-
-
-
-
-
-
-
-
-
-
-18788 uint32_t allocationCount,
-
-18790 const VkDeviceSize* offsets,
-18791 const VkDeviceSize* sizes)
-
-18793 VMA_ASSERT(allocator);
+18774 if(pAllocationInfo && result == VK_SUCCESS)
+
+18776 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
+
+
+
+
+
+
+
+
+18786 VMA_ASSERT(allocator);
+
+18788 if(allocation == VK_NULL_HANDLE)
+
+
+
+
+18793 VMA_DEBUG_LOG(
"vmaFreeMemory");
-18795 if(allocationCount == 0)
-
-
-
-
-18800 VMA_ASSERT(allocations);
-
-18802 VMA_DEBUG_LOG(
"vmaInvalidateAllocations");
-
-18804 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+18795 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18797 #if VMA_RECORDING_ENABLED
+18798 if(allocator->GetRecorder() != VMA_NULL)
+
+18800 allocator->GetRecorder()->RecordFreeMemory(
+18801 allocator->GetCurrentFrameIndex(),
+
+
+
-18806 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
-
-18808 #if VMA_RECORDING_ENABLED
-18809 if(allocator->GetRecorder() != VMA_NULL)
-
-
-
-
-
-
-
-
-
-
-18820 VMA_ASSERT(allocator);
-
-18822 VMA_DEBUG_LOG(
"vmaCheckCorruption");
-
-18824 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18826 return allocator->CheckCorruption(memoryTypeBits);
-
-
-
-
-
-18832 size_t allocationCount,
-18833 VkBool32* pAllocationsChanged,
-
-
-
-
-
-
-
-
-
-18843 if(pDefragmentationInfo != VMA_NULL)
-
-
-
-
-
-
-
-
-
-
-
-
-
-18857 if(res == VK_NOT_READY)
-
-
-
-
-
-
-
-
-
-
-
-
-18870 VMA_ASSERT(allocator && pInfo && pContext);
-
-
-
-
-
-
+18806 allocator->FreeMemory(
+
+
+
+
+
+
+18813 size_t allocationCount,
+
+
+18816 if(allocationCount == 0)
+
+
+
+
+18821 VMA_ASSERT(allocator);
+
+18823 VMA_DEBUG_LOG(
"vmaFreeMemoryPages");
+
+18825 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18827 #if VMA_RECORDING_ENABLED
+18828 if(allocator->GetRecorder() != VMA_NULL)
+
+18830 allocator->GetRecorder()->RecordFreeMemoryPages(
+18831 allocator->GetCurrentFrameIndex(),
+18832 (uint64_t)allocationCount,
+
+
+
+
+18837 allocator->FreeMemory(allocationCount, pAllocations);
+
+
+
+
+
+
+
+18845 VMA_ASSERT(allocator && allocation && pAllocationInfo);
+
+18847 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18849 #if VMA_RECORDING_ENABLED
+18850 if(allocator->GetRecorder() != VMA_NULL)
+
+18852 allocator->GetRecorder()->RecordGetAllocationInfo(
+18853 allocator->GetCurrentFrameIndex(),
+
+
+
+
+18858 allocator->GetAllocationInfo(allocation, pAllocationInfo);
+
+
+
+
+
+
+18865 VMA_ASSERT(allocator && allocation);
+
+18867 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18869 #if VMA_RECORDING_ENABLED
+18870 if(allocator->GetRecorder() != VMA_NULL)
+
+18872 allocator->GetRecorder()->RecordTouchAllocation(
+18873 allocator->GetCurrentFrameIndex(),
+
+
+
-
-
-
-18881 VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->
poolCount, pInfo->
pPools));
-
-18883 VMA_DEBUG_LOG(
"vmaDefragmentationBegin");
-
-18885 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18887 VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
-
-18889 #if VMA_RECORDING_ENABLED
-18890 if(allocator->GetRecorder() != VMA_NULL)
-
-18892 allocator->GetRecorder()->RecordDefragmentationBegin(
-18893 allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
-
-
-
-
-
-
-
-
-
-
-18904 VMA_ASSERT(allocator);
-
-18906 VMA_DEBUG_LOG(
"vmaDefragmentationEnd");
-
-18908 if(context != VK_NULL_HANDLE)
-
-18910 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18912 #if VMA_RECORDING_ENABLED
-18913 if(allocator->GetRecorder() != VMA_NULL)
-
-18915 allocator->GetRecorder()->RecordDefragmentationEnd(
-18916 allocator->GetCurrentFrameIndex(), context);
-
-
-
-18920 return allocator->DefragmentationEnd(context);
-
-
-
-
-
-
-
-
-
-
-
-
-
-18934 VMA_ASSERT(allocator);
-
-
-18937 VMA_DEBUG_LOG(
"vmaBeginDefragmentationPass");
-
-18939 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18941 if(context == VK_NULL_HANDLE)
-
-
-
-
-
-18947 return allocator->DefragmentationPassBegin(pInfo, context);
-
-
-
-
-
-18953 VMA_ASSERT(allocator);
-
-18955 VMA_DEBUG_LOG(
"vmaEndDefragmentationPass");
-18956 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18958 if(context == VK_NULL_HANDLE)
-
-
-18961 return allocator->DefragmentationPassEnd(context);
-
-
-
-
-
-
-
-18969 VMA_ASSERT(allocator && allocation && buffer);
-
-18971 VMA_DEBUG_LOG(
"vmaBindBufferMemory");
-
-18973 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18975 return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
-
-
-
-
-
-18981 VkDeviceSize allocationLocalOffset,
-
-
-
-18985 VMA_ASSERT(allocator && allocation && buffer);
-
-18987 VMA_DEBUG_LOG(
"vmaBindBufferMemory2");
-
-18989 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-18991 return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
-
+18878 return allocator->TouchAllocation(allocation);
+
+
+
+
+
+
+
+18886 VMA_ASSERT(allocator && allocation);
+
+18888 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18890 allocation->SetUserData(allocator, pUserData);
+
+18892 #if VMA_RECORDING_ENABLED
+18893 if(allocator->GetRecorder() != VMA_NULL)
+
+18895 allocator->GetRecorder()->RecordSetAllocationUserData(
+18896 allocator->GetCurrentFrameIndex(),
+
+
+
+
+
+
+
+
+
+
+18907 VMA_ASSERT(allocator && pAllocation);
+
+18909 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+
+18911 allocator->CreateLostAllocation(pAllocation);
+
+18913 #if VMA_RECORDING_ENABLED
+18914 if(allocator->GetRecorder() != VMA_NULL)
+
+18916 allocator->GetRecorder()->RecordCreateLostAllocation(
+18917 allocator->GetCurrentFrameIndex(),
+
+
+
+
+
+
+
+
+
+
+18928 VMA_ASSERT(allocator && allocation && ppData);
+
+18930 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18932 VkResult res = allocator->Map(allocation, ppData);
+
+18934 #if VMA_RECORDING_ENABLED
+18935 if(allocator->GetRecorder() != VMA_NULL)
+
+18937 allocator->GetRecorder()->RecordMapMemory(
+18938 allocator->GetCurrentFrameIndex(),
+
+
+
+
+
+
+
+
+
+
+
+18950 VMA_ASSERT(allocator && allocation);
+
+18952 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18954 #if VMA_RECORDING_ENABLED
+18955 if(allocator->GetRecorder() != VMA_NULL)
+
+18957 allocator->GetRecorder()->RecordUnmapMemory(
+18958 allocator->GetCurrentFrameIndex(),
+
+
+
+
+18963 allocator->Unmap(allocation);
+
+
+
+
+18968 VMA_ASSERT(allocator && allocation);
+
+18970 VMA_DEBUG_LOG(
"vmaFlushAllocation");
+
+18972 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18974 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+
+18976 #if VMA_RECORDING_ENABLED
+18977 if(allocator->GetRecorder() != VMA_NULL)
+
+18979 allocator->GetRecorder()->RecordFlushAllocation(
+18980 allocator->GetCurrentFrameIndex(),
+18981 allocation, offset, size);
+
+
+
+
+
+
+
+
+18990 VMA_ASSERT(allocator && allocation);
+
+18992 VMA_DEBUG_LOG(
"vmaInvalidateAllocation");
-
-
-
-
-
-18999 VMA_ASSERT(allocator && allocation && image);
-
-19001 VMA_DEBUG_LOG(
"vmaBindImageMemory");
-
-19003 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19005 return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
-
-
-
-
-
-19011 VkDeviceSize allocationLocalOffset,
-
-
-
-19015 VMA_ASSERT(allocator && allocation && image);
-
-19017 VMA_DEBUG_LOG(
"vmaBindImageMemory2");
+18994 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+18996 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+
+18998 #if VMA_RECORDING_ENABLED
+18999 if(allocator->GetRecorder() != VMA_NULL)
+
+19001 allocator->GetRecorder()->RecordInvalidateAllocation(
+19002 allocator->GetCurrentFrameIndex(),
+19003 allocation, offset, size);
+
+
+
+
+
+
+
+
+19012 uint32_t allocationCount,
+
+19014 const VkDeviceSize* offsets,
+19015 const VkDeviceSize* sizes)
+
+19017 VMA_ASSERT(allocator);
-19019 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19021 return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
-
+19019 if(allocationCount == 0)
+
+
+
-
-
-19026 const VkBufferCreateInfo* pBufferCreateInfo,
-
-
-
-
-
-19032 VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
-
-19034 if(pBufferCreateInfo->size == 0)
-
-19036 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-19038 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-19039 !allocator->m_UseKhrBufferDeviceAddress)
-
-19041 VMA_ASSERT(0 &&
"Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-19042 return VK_ERROR_VALIDATION_FAILED_EXT;
-
-
-19045 VMA_DEBUG_LOG(
"vmaCreateBuffer");
-
-19047 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19049 *pBuffer = VK_NULL_HANDLE;
-19050 *pAllocation = VK_NULL_HANDLE;
-
-
-19053 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-19054 allocator->m_hDevice,
-
-19056 allocator->GetAllocationCallbacks(),
-
-
-
-
-19061 VkMemoryRequirements vkMemReq = {};
-19062 bool requiresDedicatedAllocation =
false;
-19063 bool prefersDedicatedAllocation =
false;
-19064 allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-19065 requiresDedicatedAllocation, prefersDedicatedAllocation);
-
-
-19068 res = allocator->AllocateMemory(
-
-19070 requiresDedicatedAllocation,
-19071 prefersDedicatedAllocation,
-
-19073 pBufferCreateInfo->usage,
-
-19075 *pAllocationCreateInfo,
-19076 VMA_SUBALLOCATION_TYPE_BUFFER,
-
-
+19024 VMA_ASSERT(allocations);
+
+19026 VMA_DEBUG_LOG(
"vmaFlushAllocations");
+
+19028 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19030 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
+
+19032 #if VMA_RECORDING_ENABLED
+19033 if(allocator->GetRecorder() != VMA_NULL)
+
+
+
+
+
+
+
+
+
+
+19044 uint32_t allocationCount,
+
+19046 const VkDeviceSize* offsets,
+19047 const VkDeviceSize* sizes)
+
+19049 VMA_ASSERT(allocator);
+
+19051 if(allocationCount == 0)
+
+
+
+
+19056 VMA_ASSERT(allocations);
+
+19058 VMA_DEBUG_LOG(
"vmaInvalidateAllocations");
+
+19060 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19062 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
+
+19064 #if VMA_RECORDING_ENABLED
+19065 if(allocator->GetRecorder() != VMA_NULL)
+
+
+
+
+
+
+
+
+
+
+19076 VMA_ASSERT(allocator);
+
+19078 VMA_DEBUG_LOG(
"vmaCheckCorruption");
-19080 #if VMA_RECORDING_ENABLED
-19081 if(allocator->GetRecorder() != VMA_NULL)
-
-19083 allocator->GetRecorder()->RecordCreateBuffer(
-19084 allocator->GetCurrentFrameIndex(),
-19085 *pBufferCreateInfo,
-19086 *pAllocationCreateInfo,
-
-
-
-
-
-
-
-
-
-19096 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-
-
-
-
-19101 #if VMA_STATS_STRING_ENABLED
-19102 (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-
-19104 if(pAllocationInfo != VMA_NULL)
-
-19106 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
-
-
-
-19111 allocator->FreeMemory(
-
-
-19114 *pAllocation = VK_NULL_HANDLE;
-19115 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-19116 *pBuffer = VK_NULL_HANDLE;
-
-
-19119 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-19120 *pBuffer = VK_NULL_HANDLE;
-
-
-
-
-
-
-
-
-
-
-19131 VMA_ASSERT(allocator);
-
-19133 if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
-
-
-
-19138 VMA_DEBUG_LOG(
"vmaDestroyBuffer");
-
-19140 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19142 #if VMA_RECORDING_ENABLED
-19143 if(allocator->GetRecorder() != VMA_NULL)
-
-19145 allocator->GetRecorder()->RecordDestroyBuffer(
-19146 allocator->GetCurrentFrameIndex(),
-
-
-
-
-19151 if(buffer != VK_NULL_HANDLE)
-
-19153 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
-
+19080 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19082 return allocator->CheckCorruption(memoryTypeBits);
+
+
+
+
+
+19088 size_t allocationCount,
+19089 VkBool32* pAllocationsChanged,
+
+
+
+
+
+
+
+
+
+19099 if(pDefragmentationInfo != VMA_NULL)
+
+
+
+
+
+
+
+
+
+
+
+
+
+19113 if(res == VK_NOT_READY)
+
+
+
+
+
+
+
+
+
+
+
+
+19126 VMA_ASSERT(allocator && pInfo && pContext);
+
+
+
+
+
+
+
+
+
+
+19137 VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->
poolCount, pInfo->
pPools));
+
+19139 VMA_DEBUG_LOG(
"vmaDefragmentationBegin");
+
+19141 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19143 VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
+19145 #if VMA_RECORDING_ENABLED
+19146 if(allocator->GetRecorder() != VMA_NULL)
+
+19148 allocator->GetRecorder()->RecordDefragmentationBegin(
+19149 allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
+
+
+
+
+
-19156 if(allocation != VK_NULL_HANDLE)
-
-19158 allocator->FreeMemory(
-
-
-
-
+
+
+
+
+19160 VMA_ASSERT(allocator);
+
+19162 VMA_DEBUG_LOG(
"vmaDefragmentationEnd");
-
-
-19166 const VkImageCreateInfo* pImageCreateInfo,
-
-
-
-
-
-19172 VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
-
-19174 if(pImageCreateInfo->extent.width == 0 ||
-19175 pImageCreateInfo->extent.height == 0 ||
-19176 pImageCreateInfo->extent.depth == 0 ||
-19177 pImageCreateInfo->mipLevels == 0 ||
-19178 pImageCreateInfo->arrayLayers == 0)
+19164 if(context != VK_NULL_HANDLE)
+
+19166 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19168 #if VMA_RECORDING_ENABLED
+19169 if(allocator->GetRecorder() != VMA_NULL)
+
+19171 allocator->GetRecorder()->RecordDefragmentationEnd(
+19172 allocator->GetCurrentFrameIndex(), context);
+
+
+
+19176 return allocator->DefragmentationEnd(context);
+
+
-19180 return VK_ERROR_VALIDATION_FAILED_EXT;
+
-
-19183 VMA_DEBUG_LOG(
"vmaCreateImage");
-
-19185 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19187 *pImage = VK_NULL_HANDLE;
-19188 *pAllocation = VK_NULL_HANDLE;
-
-
-19191 VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-19192 allocator->m_hDevice,
-
-19194 allocator->GetAllocationCallbacks(),
-
-
-
-19198 VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
-19199 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
-19200 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
-
-
-19203 VkMemoryRequirements vkMemReq = {};
-19204 bool requiresDedicatedAllocation =
false;
-19205 bool prefersDedicatedAllocation =
false;
-19206 allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
-19207 requiresDedicatedAllocation, prefersDedicatedAllocation);
-
-19209 res = allocator->AllocateMemory(
-
-19211 requiresDedicatedAllocation,
-19212 prefersDedicatedAllocation,
-
-
-
-19216 *pAllocationCreateInfo,
-
-
-
-
-19221 #if VMA_RECORDING_ENABLED
-19222 if(allocator->GetRecorder() != VMA_NULL)
-
-19224 allocator->GetRecorder()->RecordCreateImage(
-19225 allocator->GetCurrentFrameIndex(),
-
-19227 *pAllocationCreateInfo,
-
-
-
-
-
-
-
-
-
-19237 res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
-
-
-
-
-19242 #if VMA_STATS_STRING_ENABLED
-19243 (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
-
-19245 if(pAllocationInfo != VMA_NULL)
-
-19247 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
+
+
+
+
+
+
+
+
+19190 VMA_ASSERT(allocator);
+
+
+19193 VMA_DEBUG_LOG(
"vmaBeginDefragmentationPass");
+
+19195 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19197 if(context == VK_NULL_HANDLE)
+
+
+
+
+
+19203 return allocator->DefragmentationPassBegin(pInfo, context);
+
+
+
+
+
+19209 VMA_ASSERT(allocator);
+
+19211 VMA_DEBUG_LOG(
"vmaEndDefragmentationPass");
+19212 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19214 if(context == VK_NULL_HANDLE)
+
+
+19217 return allocator->DefragmentationPassEnd(context);
+
+
+
+
+
+
+
+19225 VMA_ASSERT(allocator && allocation && buffer);
+
+19227 VMA_DEBUG_LOG(
"vmaBindBufferMemory");
+
+19229 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19231 return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
+
+
+
+
+
+19237 VkDeviceSize allocationLocalOffset,
+
+
+
+19241 VMA_ASSERT(allocator && allocation && buffer);
+
+19243 VMA_DEBUG_LOG(
"vmaBindBufferMemory2");
+
+19245 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19247 return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
+
-
-
-19252 allocator->FreeMemory(
-
-
-19255 *pAllocation = VK_NULL_HANDLE;
-19256 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-19257 *pImage = VK_NULL_HANDLE;
-
-
-19260 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-19261 *pImage = VK_NULL_HANDLE;
-
-
-
-
-
-
-
-
-
-
-19272 VMA_ASSERT(allocator);
-
-19274 if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
-
-
-
-19279 VMA_DEBUG_LOG(
"vmaDestroyImage");
-
-19281 VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
-19283 #if VMA_RECORDING_ENABLED
-19284 if(allocator->GetRecorder() != VMA_NULL)
-
-19286 allocator->GetRecorder()->RecordDestroyImage(
-19287 allocator->GetCurrentFrameIndex(),
-
-
-
-
-19292 if(image != VK_NULL_HANDLE)
-
-19294 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
-
-19296 if(allocation != VK_NULL_HANDLE)
-
-19298 allocator->FreeMemory(
-
-
-
-
-
-
+
+
+
+
+
+19255 VMA_ASSERT(allocator && allocation && image);
+
+19257 VMA_DEBUG_LOG(
"vmaBindImageMemory");
+
+19259 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19261 return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
+
+
+
+
+
+19267 VkDeviceSize allocationLocalOffset,
+
+
+
+19271 VMA_ASSERT(allocator && allocation && image);
+
+19273 VMA_DEBUG_LOG(
"vmaBindImageMemory2");
+
+19275 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19277 return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
+
+
+
+
+19282 const VkBufferCreateInfo* pBufferCreateInfo,
+
+
+
+
+
+19288 VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
+
+19290 if(pBufferCreateInfo->size == 0)
+
+19292 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+19294 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+19295 !allocator->m_UseKhrBufferDeviceAddress)
+
+19297 VMA_ASSERT(0 &&
"Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+19298 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+19301 VMA_DEBUG_LOG(
"vmaCreateBuffer");
+
+19303 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19305 *pBuffer = VK_NULL_HANDLE;
+19306 *pAllocation = VK_NULL_HANDLE;
+
+
+19309 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+19310 allocator->m_hDevice,
+
+19312 allocator->GetAllocationCallbacks(),
+
+
+
+
+19317 VkMemoryRequirements vkMemReq = {};
+19318 bool requiresDedicatedAllocation =
false;
+19319 bool prefersDedicatedAllocation =
false;
+19320 allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+19321 requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+
+19324 res = allocator->AllocateMemory(
+
+19326 requiresDedicatedAllocation,
+19327 prefersDedicatedAllocation,
+
+19329 pBufferCreateInfo->usage,
+
+19331 *pAllocationCreateInfo,
+19332 VMA_SUBALLOCATION_TYPE_BUFFER,
+
+
+
+19336 #if VMA_RECORDING_ENABLED
+19337 if(allocator->GetRecorder() != VMA_NULL)
+
+19339 allocator->GetRecorder()->RecordCreateBuffer(
+19340 allocator->GetCurrentFrameIndex(),
+19341 *pBufferCreateInfo,
+19342 *pAllocationCreateInfo,
+
+
+
+
+
+
+
+
+
+19352 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
+
+
+
+
+19357 #if VMA_STATS_STRING_ENABLED
+19358 (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+
+19360 if(pAllocationInfo != VMA_NULL)
+
+19362 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
+
+
+
+19367 allocator->FreeMemory(
+
+
+19370 *pAllocation = VK_NULL_HANDLE;
+19371 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+19372 *pBuffer = VK_NULL_HANDLE;
+
+
+19375 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+19376 *pBuffer = VK_NULL_HANDLE;
+
+
+
+
+
+
+
+
+
+
+19387 VMA_ASSERT(allocator);
+
+19389 if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
+
+
+
+19394 VMA_DEBUG_LOG(
"vmaDestroyBuffer");
+
+19396 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19398 #if VMA_RECORDING_ENABLED
+19399 if(allocator->GetRecorder() != VMA_NULL)
+
+19401 allocator->GetRecorder()->RecordDestroyBuffer(
+19402 allocator->GetCurrentFrameIndex(),
+
+
+
+
+19407 if(buffer != VK_NULL_HANDLE)
+
+19409 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+
+
+19412 if(allocation != VK_NULL_HANDLE)
+
+19414 allocator->FreeMemory(
+
+
+
+
+
+
+
+19422 const VkImageCreateInfo* pImageCreateInfo,
+
+
+
+
+
+19428 VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
+
+19430 if(pImageCreateInfo->extent.width == 0 ||
+19431 pImageCreateInfo->extent.height == 0 ||
+19432 pImageCreateInfo->extent.depth == 0 ||
+19433 pImageCreateInfo->mipLevels == 0 ||
+19434 pImageCreateInfo->arrayLayers == 0)
+
+19436 return VK_ERROR_VALIDATION_FAILED_EXT;
+
+
+19439 VMA_DEBUG_LOG(
"vmaCreateImage");
+
+19441 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19443 *pImage = VK_NULL_HANDLE;
+19444 *pAllocation = VK_NULL_HANDLE;
+
+
+19447 VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+19448 allocator->m_hDevice,
+
+19450 allocator->GetAllocationCallbacks(),
+
+
+
+19454 VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
+19455 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
+19456 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
+
+
+19459 VkMemoryRequirements vkMemReq = {};
+19460 bool requiresDedicatedAllocation =
false;
+19461 bool prefersDedicatedAllocation =
false;
+19462 allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
+19463 requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+19465 res = allocator->AllocateMemory(
+
+19467 requiresDedicatedAllocation,
+19468 prefersDedicatedAllocation,
+
+
+
+19472 *pAllocationCreateInfo,
+
+
+
+
+19477 #if VMA_RECORDING_ENABLED
+19478 if(allocator->GetRecorder() != VMA_NULL)
+
+19480 allocator->GetRecorder()->RecordCreateImage(
+19481 allocator->GetCurrentFrameIndex(),
+
+19483 *pAllocationCreateInfo,
+
+
+
+
+
+
+
+
+
+19493 res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
+
+
+
+
+19498 #if VMA_STATS_STRING_ENABLED
+19499 (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
+
+19501 if(pAllocationInfo != VMA_NULL)
+
+19503 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
+
+
+
+19508 allocator->FreeMemory(
+
+
+19511 *pAllocation = VK_NULL_HANDLE;
+19512 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+19513 *pImage = VK_NULL_HANDLE;
+
+
+19516 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+19517 *pImage = VK_NULL_HANDLE;
+
+
+
+
+
+
+
+
+
+
+19528 VMA_ASSERT(allocator);
+
+19530 if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
+
+
+
+19535 VMA_DEBUG_LOG(
"vmaDestroyImage");
+
+19537 VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+19539 #if VMA_RECORDING_ENABLED
+19540 if(allocator->GetRecorder() != VMA_NULL)
+
+19542 allocator->GetRecorder()->RecordDestroyImage(
+19543 allocator->GetCurrentFrameIndex(),
+
+
+
+
+19548 if(image != VK_NULL_HANDLE)
+
+19550 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
+
+19552 if(allocation != VK_NULL_HANDLE)
+
+19554 allocator->FreeMemory(
+
+
+
+
+
+
Definition: vk_mem_alloc.h:2881
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2907
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2913
@@ -16102,33 +16364,33 @@ $(function() {
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2643
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2654
Represents Opaque object that represents started defragmentation process.
-Parameters for defragmentation.
Definition: vk_mem_alloc.h:3642
-const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3682
-uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3648
-uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3702
-VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3697
-VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3645
-VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3663
-uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3666
-VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3711
-uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3692
-const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3657
-VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3687
-Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3733
-uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3743
-VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3738
-Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3724
-uint32_t moveCount
Definition: vk_mem_alloc.h:3725
-VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3726
-Definition: vk_mem_alloc.h:3714
-VkDeviceMemory memory
Definition: vk_mem_alloc.h:3716
-VkDeviceSize offset
Definition: vk_mem_alloc.h:3717
-VmaAllocation allocation
Definition: vk_mem_alloc.h:3715
-Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3747
-uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3755
-VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3749
-VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3751
-uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3753
+Parameters for defragmentation.
Definition: vk_mem_alloc.h:3630
+const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3670
+uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3636
+uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3690
+VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3685
+VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3633
+VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3651
+uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3654
+VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3699
+uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3680
+const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3645
+VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3675
+Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3721
+uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3731
+VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3726
+Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3712
+uint32_t moveCount
Definition: vk_mem_alloc.h:3713
+VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3714
+Definition: vk_mem_alloc.h:3702
+VkDeviceMemory memory
Definition: vk_mem_alloc.h:3704
+VkDeviceSize offset
Definition: vk_mem_alloc.h:3705
+VmaAllocation allocation
Definition: vk_mem_alloc.h:3703
+Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3735
+uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3743
+VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3737
+VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3739
+uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3741
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2224
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2230
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2226
@@ -16190,7 +16452,6 @@ $(function() {
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
-VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2210
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
@@ -16227,9 +16488,9 @@ $(function() {
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
-VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3632
-@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3633
-@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3634
+VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3620
+@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3621
+@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3622
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
@@ -16240,7 +16501,7 @@ $(function() {
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
-VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3636
+VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3624
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2989
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3024