23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H 24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H 1639 #ifndef VMA_RECORDING_ENABLED 1641 #define VMA_RECORDING_ENABLED 1 1643 #define VMA_RECORDING_ENABLED 0 1648 #define NOMINMAX // For windows.h 1652 #include <vulkan/vulkan.h> 1655 #if VMA_RECORDING_ENABLED 1656 #include <windows.h> 1659 #if !defined(VMA_DEDICATED_ALLOCATION) 1660 #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation 1661 #define VMA_DEDICATED_ALLOCATION 1 1663 #define VMA_DEDICATED_ALLOCATION 0 1681 uint32_t memoryType,
1682 VkDeviceMemory memory,
1687 uint32_t memoryType,
1688 VkDeviceMemory memory,
1761 #if VMA_DEDICATED_ALLOCATION 1762 PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1763 PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1890 const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1898 const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1908 uint32_t memoryTypeIndex,
1909 VkMemoryPropertyFlags* pFlags);
1921 uint32_t frameIndex);
1954 #define VMA_STATS_STRING_ENABLED 1 1956 #if VMA_STATS_STRING_ENABLED 1963 char** ppStatsString,
1964 VkBool32 detailedMap);
1968 char* pStatsString);
1970 #endif // #if VMA_STATS_STRING_ENABLED 2197 uint32_t memoryTypeBits,
2199 uint32_t* pMemoryTypeIndex);
2215 const VkBufferCreateInfo* pBufferCreateInfo,
2217 uint32_t* pMemoryTypeIndex);
2233 const VkImageCreateInfo* pImageCreateInfo,
2235 uint32_t* pMemoryTypeIndex);
2407 size_t* pLostAllocationCount);
2506 const VkMemoryRequirements* pVkMemoryRequirements,
2532 const VkMemoryRequirements* pVkMemoryRequirements,
2534 size_t allocationCount,
2579 size_t allocationCount,
2605 VkDeviceSize newSize);
2974 size_t allocationCount,
2975 VkBool32* pAllocationsChanged,
3041 const VkBufferCreateInfo* pBufferCreateInfo,
3066 const VkImageCreateInfo* pImageCreateInfo,
3092 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H 3095 #if defined(__cplusplus) && defined(__INTELLISENSE__) 3096 #define VMA_IMPLEMENTATION 3099 #ifdef VMA_IMPLEMENTATION 3100 #undef VMA_IMPLEMENTATION 3122 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) 3123 #define VMA_STATIC_VULKAN_FUNCTIONS 1 3135 #if VMA_USE_STL_CONTAINERS 3136 #define VMA_USE_STL_VECTOR 1 3137 #define VMA_USE_STL_UNORDERED_MAP 1 3138 #define VMA_USE_STL_LIST 1 3141 #ifndef VMA_USE_STL_SHARED_MUTEX 3143 #if __cplusplus >= 201703L 3144 #define VMA_USE_STL_SHARED_MUTEX 1 3148 #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L 3149 #define VMA_USE_STL_SHARED_MUTEX 1 3151 #define VMA_USE_STL_SHARED_MUTEX 0 3155 #if VMA_USE_STL_VECTOR 3159 #if VMA_USE_STL_UNORDERED_MAP 3160 #include <unordered_map> 3163 #if VMA_USE_STL_LIST 3172 #include <algorithm> 3178 #define VMA_NULL nullptr 3181 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) 3183 void *aligned_alloc(
size_t alignment,
size_t size)
3186 if(alignment <
sizeof(
void*))
3188 alignment =
sizeof(
void*);
3191 return memalign(alignment, size);
3193 #elif defined(__APPLE__) || defined(__ANDROID__) 3195 void *aligned_alloc(
size_t alignment,
size_t size)
3198 if(alignment <
sizeof(
void*))
3200 alignment =
sizeof(
void*);
3204 if(posix_memalign(&pointer, alignment, size) == 0)
3218 #define VMA_ASSERT(expr) assert(expr) 3220 #define VMA_ASSERT(expr) 3226 #ifndef VMA_HEAVY_ASSERT 3228 #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) 3230 #define VMA_HEAVY_ASSERT(expr) 3234 #ifndef VMA_ALIGN_OF 3235 #define VMA_ALIGN_OF(type) (__alignof(type)) 3238 #ifndef VMA_SYSTEM_ALIGNED_MALLOC 3240 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment))) 3242 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) )) 3246 #ifndef VMA_SYSTEM_FREE 3248 #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr) 3250 #define VMA_SYSTEM_FREE(ptr) free(ptr) 3255 #define VMA_MIN(v1, v2) (std::min((v1), (v2))) 3259 #define VMA_MAX(v1, v2) (std::max((v1), (v2))) 3263 #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) 3267 #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) 3270 #ifndef VMA_DEBUG_LOG 3271 #define VMA_DEBUG_LOG(format, ...) 3281 #if VMA_STATS_STRING_ENABLED 3282 static inline void VmaUint32ToStr(
char* outStr,
size_t strLen, uint32_t num)
3284 snprintf(outStr, strLen,
"%u", static_cast<unsigned int>(num));
3286 static inline void VmaUint64ToStr(
char* outStr,
size_t strLen, uint64_t num)
3288 snprintf(outStr, strLen,
"%llu", static_cast<unsigned long long>(num));
3290 static inline void VmaPtrToStr(
char* outStr,
size_t strLen,
const void* ptr)
3292 snprintf(outStr, strLen,
"%p", ptr);
3300 void Lock() { m_Mutex.lock(); }
3301 void Unlock() { m_Mutex.unlock(); }
3305 #define VMA_MUTEX VmaMutex 3309 #ifndef VMA_RW_MUTEX 3310 #if VMA_USE_STL_SHARED_MUTEX 3312 #include <shared_mutex> 3316 void LockRead() { m_Mutex.lock_shared(); }
3317 void UnlockRead() { m_Mutex.unlock_shared(); }
3318 void LockWrite() { m_Mutex.lock(); }
3319 void UnlockWrite() { m_Mutex.unlock(); }
3321 std::shared_mutex m_Mutex;
3323 #define VMA_RW_MUTEX VmaRWMutex 3324 #elif defined(_WIN32) 3329 VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3330 void LockRead() { AcquireSRWLockShared(&m_Lock); }
3331 void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3332 void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3333 void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3337 #define VMA_RW_MUTEX VmaRWMutex 3343 void LockRead() { m_Mutex.Lock(); }
3344 void UnlockRead() { m_Mutex.Unlock(); }
3345 void LockWrite() { m_Mutex.Lock(); }
3346 void UnlockWrite() { m_Mutex.Unlock(); }
3350 #define VMA_RW_MUTEX VmaRWMutex 3351 #endif // #if VMA_USE_STL_SHARED_MUTEX 3352 #endif // #ifndef VMA_RW_MUTEX 3362 #ifndef VMA_ATOMIC_UINT32 3363 #define VMA_ATOMIC_UINT32 std::atomic<uint32_t> 3366 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY 3371 #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) 3374 #ifndef VMA_DEBUG_ALIGNMENT 3379 #define VMA_DEBUG_ALIGNMENT (1) 3382 #ifndef VMA_DEBUG_MARGIN 3387 #define VMA_DEBUG_MARGIN (0) 3390 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS 3395 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) 3398 #ifndef VMA_DEBUG_DETECT_CORRUPTION 3404 #define VMA_DEBUG_DETECT_CORRUPTION (0) 3407 #ifndef VMA_DEBUG_GLOBAL_MUTEX 3412 #define VMA_DEBUG_GLOBAL_MUTEX (0) 3415 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY 3420 #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) 3423 #ifndef VMA_SMALL_HEAP_MAX_SIZE 3424 #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) 3428 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE 3429 #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) 3433 #ifndef VMA_CLASS_NO_COPY 3434 #define VMA_CLASS_NO_COPY(className) \ 3436 className(const className&) = delete; \ 3437 className& operator=(const className&) = delete; 3440 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3443 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3445 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3446 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3452 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3454 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3455 VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3458 static inline uint32_t VmaCountBitsSet(uint32_t v)
3460 uint32_t c = v - ((v >> 1) & 0x55555555);
3461 c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3462 c = ((c >> 4) + c) & 0x0F0F0F0F;
3463 c = ((c >> 8) + c) & 0x00FF00FF;
3464 c = ((c >> 16) + c) & 0x0000FFFF;
3470 template <
typename T>
3471 static inline T VmaAlignUp(T val, T align)
3473 return (val + align - 1) / align * align;
3477 template <
typename T>
3478 static inline T VmaAlignDown(T val, T align)
3480 return val / align * align;
3484 template <
typename T>
3485 static inline T VmaRoundDiv(T x, T y)
3487 return (x + (y / (T)2)) / y;
3495 template <
typename T>
3496 inline bool VmaIsPow2(T x)
3498 return (x & (x-1)) == 0;
3502 static inline uint32_t VmaNextPow2(uint32_t v)
3513 static inline uint64_t VmaNextPow2(uint64_t v)
3527 static inline uint32_t VmaPrevPow2(uint32_t v)
3537 static inline uint64_t VmaPrevPow2(uint64_t v)
3549 static inline bool VmaStrIsEmpty(
const char* pStr)
3551 return pStr == VMA_NULL || *pStr ==
'\0';
3554 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3572 template<
typename Iterator,
typename Compare>
3573 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3575 Iterator centerValue = end; --centerValue;
3576 Iterator insertIndex = beg;
3577 for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3579 if(cmp(*memTypeIndex, *centerValue))
3581 if(insertIndex != memTypeIndex)
3583 VMA_SWAP(*memTypeIndex, *insertIndex);
3588 if(insertIndex != centerValue)
3590 VMA_SWAP(*insertIndex, *centerValue);
3595 template<
typename Iterator,
typename Compare>
3596 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3600 Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3601 VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3602 VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3606 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp) 3608 #endif // #ifndef VMA_SORT 3617 static inline bool VmaBlocksOnSamePage(
3618 VkDeviceSize resourceAOffset,
3619 VkDeviceSize resourceASize,
3620 VkDeviceSize resourceBOffset,
3621 VkDeviceSize pageSize)
3623 VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3624 VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3625 VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3626 VkDeviceSize resourceBStart = resourceBOffset;
3627 VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3628 return resourceAEndPage == resourceBStartPage;
3631 enum VmaSuballocationType
3633 VMA_SUBALLOCATION_TYPE_FREE = 0,
3634 VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3635 VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3636 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3637 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3638 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3639 VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3648 static inline bool VmaIsBufferImageGranularityConflict(
3649 VmaSuballocationType suballocType1,
3650 VmaSuballocationType suballocType2)
3652 if(suballocType1 > suballocType2)
3654 VMA_SWAP(suballocType1, suballocType2);
3657 switch(suballocType1)
3659 case VMA_SUBALLOCATION_TYPE_FREE:
3661 case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3663 case VMA_SUBALLOCATION_TYPE_BUFFER:
3665 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3666 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3667 case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3669 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3670 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3671 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3672 case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3674 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3675 case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3683 static void VmaWriteMagicValue(
void* pData, VkDeviceSize offset)
3685 uint32_t* pDst = (uint32_t*)((
char*)pData + offset);
3686 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
3687 for(
size_t i = 0; i < numberCount; ++i, ++pDst)
3689 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3693 static bool VmaValidateMagicValue(
const void* pData, VkDeviceSize offset)
3695 const uint32_t* pSrc = (
const uint32_t*)((
const char*)pData + offset);
3696 const size_t numberCount = VMA_DEBUG_MARGIN /
sizeof(uint32_t);
3697 for(
size_t i = 0; i < numberCount; ++i, ++pSrc)
3699 if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3710 VMA_CLASS_NO_COPY(VmaMutexLock)
3712 VmaMutexLock(VMA_MUTEX& mutex,
bool useMutex) :
3713 m_pMutex(useMutex ? &mutex : VMA_NULL)
3714 {
if(m_pMutex) { m_pMutex->Lock(); } }
3716 {
if(m_pMutex) { m_pMutex->Unlock(); } }
3718 VMA_MUTEX* m_pMutex;
3722 struct VmaMutexLockRead
3724 VMA_CLASS_NO_COPY(VmaMutexLockRead)
3726 VmaMutexLockRead(VMA_RW_MUTEX& mutex,
bool useMutex) :
3727 m_pMutex(useMutex ? &mutex : VMA_NULL)
3728 {
if(m_pMutex) { m_pMutex->LockRead(); } }
3729 ~VmaMutexLockRead() {
if(m_pMutex) { m_pMutex->UnlockRead(); } }
3731 VMA_RW_MUTEX* m_pMutex;
3735 struct VmaMutexLockWrite
3737 VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3739 VmaMutexLockWrite(VMA_RW_MUTEX& mutex,
bool useMutex) :
3740 m_pMutex(useMutex ? &mutex : VMA_NULL)
3741 {
if(m_pMutex) { m_pMutex->LockWrite(); } }
3742 ~VmaMutexLockWrite() {
if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3744 VMA_RW_MUTEX* m_pMutex;
3747 #if VMA_DEBUG_GLOBAL_MUTEX 3748 static VMA_MUTEX gDebugGlobalMutex;
3749 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); 3751 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK 3755 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3766 template <
typename CmpLess,
typename IterT,
typename KeyT>
3767 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end,
const KeyT &key, CmpLess cmp)
3769 size_t down = 0, up = (end - beg);
3772 const size_t mid = (down + up) / 2;
3773 if(cmp(*(beg+mid), key))
3790 template<
typename T>
3791 static bool VmaValidatePointerArray(uint32_t count,
const T* arr)
3793 for(uint32_t i = 0; i < count; ++i)
3795 const T iPtr = arr[i];
3796 if(iPtr == VMA_NULL)
3800 for(uint32_t j = i + 1; j < count; ++j)
3814 static void* VmaMalloc(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t size,
size_t alignment)
3816 if((pAllocationCallbacks != VMA_NULL) &&
3817 (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3819 return (*pAllocationCallbacks->pfnAllocation)(
3820 pAllocationCallbacks->pUserData,
3823 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3827 return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3831 static void VmaFree(
const VkAllocationCallbacks* pAllocationCallbacks,
void* ptr)
3833 if((pAllocationCallbacks != VMA_NULL) &&
3834 (pAllocationCallbacks->pfnFree != VMA_NULL))
3836 (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3840 VMA_SYSTEM_FREE(ptr);
3844 template<
typename T>
3845 static T* VmaAllocate(
const VkAllocationCallbacks* pAllocationCallbacks)
3847 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T), VMA_ALIGN_OF(T));
3850 template<
typename T>
3851 static T* VmaAllocateArray(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t count)
3853 return (T*)VmaMalloc(pAllocationCallbacks,
sizeof(T) * count, VMA_ALIGN_OF(T));
3856 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type) 3858 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type) 3860 template<
typename T>
3861 static void vma_delete(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3864 VmaFree(pAllocationCallbacks, ptr);
3867 template<
typename T>
3868 static void vma_delete_array(
const VkAllocationCallbacks* pAllocationCallbacks, T* ptr,
size_t count)
3872 for(
size_t i = count; i--; )
3876 VmaFree(pAllocationCallbacks, ptr);
3881 template<
typename T>
3882 class VmaStlAllocator
3885 const VkAllocationCallbacks*
const m_pCallbacks;
3886 typedef T value_type;
3888 VmaStlAllocator(
const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3889 template<
typename U> VmaStlAllocator(
const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3891 T* allocate(
size_t n) {
return VmaAllocateArray<T>(m_pCallbacks, n); }
3892 void deallocate(T* p,
size_t n) { VmaFree(m_pCallbacks, p); }
3894 template<
typename U>
3895 bool operator==(
const VmaStlAllocator<U>& rhs)
const 3897 return m_pCallbacks == rhs.m_pCallbacks;
3899 template<
typename U>
3900 bool operator!=(
const VmaStlAllocator<U>& rhs)
const 3902 return m_pCallbacks != rhs.m_pCallbacks;
3905 VmaStlAllocator& operator=(
const VmaStlAllocator& x) =
delete;
3908 #if VMA_USE_STL_VECTOR 3910 #define VmaVector std::vector 3912 template<
typename T,
typename allocatorT>
3913 static void VmaVectorInsert(std::vector<T, allocatorT>& vec,
size_t index,
const T& item)
3915 vec.insert(vec.begin() + index, item);
3918 template<
typename T,
typename allocatorT>
3919 static void VmaVectorRemove(std::vector<T, allocatorT>& vec,
size_t index)
3921 vec.erase(vec.begin() + index);
3924 #else // #if VMA_USE_STL_VECTOR 3929 template<
typename T,
typename AllocatorT>
3933 typedef T value_type;
3935 VmaVector(
const AllocatorT& allocator) :
3936 m_Allocator(allocator),
3943 VmaVector(
size_t count,
const AllocatorT& allocator) :
3944 m_Allocator(allocator),
3945 m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3951 VmaVector(
const VmaVector<T, AllocatorT>& src) :
3952 m_Allocator(src.m_Allocator),
3953 m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3954 m_Count(src.m_Count),
3955 m_Capacity(src.m_Count)
3959 memcpy(m_pArray, src.m_pArray, m_Count *
sizeof(T));
3965 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3968 VmaVector& operator=(
const VmaVector<T, AllocatorT>& rhs)
3972 resize(rhs.m_Count);
3975 memcpy(m_pArray, rhs.m_pArray, m_Count *
sizeof(T));
3981 bool empty()
const {
return m_Count == 0; }
3982 size_t size()
const {
return m_Count; }
3983 T* data() {
return m_pArray; }
3984 const T* data()
const {
return m_pArray; }
3986 T& operator[](
size_t index)
3988 VMA_HEAVY_ASSERT(index < m_Count);
3989 return m_pArray[index];
3991 const T& operator[](
size_t index)
const 3993 VMA_HEAVY_ASSERT(index < m_Count);
3994 return m_pArray[index];
3999 VMA_HEAVY_ASSERT(m_Count > 0);
4002 const T& front()
const 4004 VMA_HEAVY_ASSERT(m_Count > 0);
4009 VMA_HEAVY_ASSERT(m_Count > 0);
4010 return m_pArray[m_Count - 1];
4012 const T& back()
const 4014 VMA_HEAVY_ASSERT(m_Count > 0);
4015 return m_pArray[m_Count - 1];
4018 void reserve(
size_t newCapacity,
bool freeMemory =
false)
4020 newCapacity = VMA_MAX(newCapacity, m_Count);
4022 if((newCapacity < m_Capacity) && !freeMemory)
4024 newCapacity = m_Capacity;
4027 if(newCapacity != m_Capacity)
4029 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4032 memcpy(newArray, m_pArray, m_Count *
sizeof(T));
4034 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4035 m_Capacity = newCapacity;
4036 m_pArray = newArray;
4040 void resize(
size_t newCount,
bool freeMemory =
false)
4042 size_t newCapacity = m_Capacity;
4043 if(newCount > m_Capacity)
4045 newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (
size_t)8));
4049 newCapacity = newCount;
4052 if(newCapacity != m_Capacity)
4054 T*
const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4055 const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4056 if(elementsToCopy != 0)
4058 memcpy(newArray, m_pArray, elementsToCopy *
sizeof(T));
4060 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4061 m_Capacity = newCapacity;
4062 m_pArray = newArray;
4068 void clear(
bool freeMemory =
false)
4070 resize(0, freeMemory);
4073 void insert(
size_t index,
const T& src)
4075 VMA_HEAVY_ASSERT(index <= m_Count);
4076 const size_t oldCount = size();
4077 resize(oldCount + 1);
4078 if(index < oldCount)
4080 memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) *
sizeof(T));
4082 m_pArray[index] = src;
4085 void remove(
size_t index)
4087 VMA_HEAVY_ASSERT(index < m_Count);
4088 const size_t oldCount = size();
4089 if(index < oldCount - 1)
4091 memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) *
sizeof(T));
4093 resize(oldCount - 1);
4096 void push_back(
const T& src)
4098 const size_t newIndex = size();
4099 resize(newIndex + 1);
4100 m_pArray[newIndex] = src;
4105 VMA_HEAVY_ASSERT(m_Count > 0);
4109 void push_front(
const T& src)
4116 VMA_HEAVY_ASSERT(m_Count > 0);
4120 typedef T* iterator;
4122 iterator begin() {
return m_pArray; }
4123 iterator end() {
return m_pArray + m_Count; }
4126 AllocatorT m_Allocator;
4132 template<
typename T,
typename allocatorT>
4133 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec,
size_t index,
const T& item)
4135 vec.insert(index, item);
4138 template<
typename T,
typename allocatorT>
4139 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec,
size_t index)
4144 #endif // #if VMA_USE_STL_VECTOR 4146 template<
typename CmpLess,
typename VectorT>
4147 size_t VmaVectorInsertSorted(VectorT& vector,
const typename VectorT::value_type& value)
4149 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4151 vector.data() + vector.size(),
4153 CmpLess()) - vector.data();
4154 VmaVectorInsert(vector, indexToInsert, value);
4155 return indexToInsert;
4158 template<
typename CmpLess,
typename VectorT>
4159 bool VmaVectorRemoveSorted(VectorT& vector,
const typename VectorT::value_type& value)
4162 typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4167 if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4169 size_t indexToRemove = it - vector.begin();
4170 VmaVectorRemove(vector, indexToRemove);
4176 template<
typename CmpLess,
typename IterT,
typename KeyT>
4177 IterT VmaVectorFindSorted(
const IterT& beg,
const IterT& end,
const KeyT& value)
4180 IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4181 beg, end, value, comparator);
4183 (!comparator(*it, value) && !comparator(value, *it)))
4198 template<
typename T>
4199 class VmaPoolAllocator
4201 VMA_CLASS_NO_COPY(VmaPoolAllocator)
4203 VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t itemsPerBlock);
4204 ~VmaPoolAllocator();
4212 uint32_t NextFreeIndex;
4219 uint32_t FirstFreeIndex;
4222 const VkAllocationCallbacks* m_pAllocationCallbacks;
4223 size_t m_ItemsPerBlock;
4224 VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4226 ItemBlock& CreateNewBlock();
4229 template<
typename T>
4230 VmaPoolAllocator<T>::VmaPoolAllocator(
const VkAllocationCallbacks* pAllocationCallbacks,
size_t itemsPerBlock) :
4231 m_pAllocationCallbacks(pAllocationCallbacks),
4232 m_ItemsPerBlock(itemsPerBlock),
4233 m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4235 VMA_ASSERT(itemsPerBlock > 0);
4238 template<
typename T>
4239 VmaPoolAllocator<T>::~VmaPoolAllocator()
4244 template<
typename T>
4245 void VmaPoolAllocator<T>::Clear()
4247 for(
size_t i = m_ItemBlocks.size(); i--; )
4248 vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4249 m_ItemBlocks.clear();
4252 template<
typename T>
4253 T* VmaPoolAllocator<T>::Alloc()
4255 for(
size_t i = m_ItemBlocks.size(); i--; )
4257 ItemBlock& block = m_ItemBlocks[i];
4259 if(block.FirstFreeIndex != UINT32_MAX)
4261 Item*
const pItem = &block.pItems[block.FirstFreeIndex];
4262 block.FirstFreeIndex = pItem->NextFreeIndex;
4263 return &pItem->Value;
4268 ItemBlock& newBlock = CreateNewBlock();
4269 Item*
const pItem = &newBlock.pItems[0];
4270 newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4271 return &pItem->Value;
4274 template<
typename T>
4275 void VmaPoolAllocator<T>::Free(T* ptr)
4278 for(
size_t i = 0; i < m_ItemBlocks.size(); ++i)
4280 ItemBlock& block = m_ItemBlocks[i];
4284 memcpy(&pItemPtr, &ptr,
sizeof(pItemPtr));
4287 if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4289 const uint32_t index =
static_cast<uint32_t
>(pItemPtr - block.pItems);
4290 pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4291 block.FirstFreeIndex = index;
4295 VMA_ASSERT(0 &&
"Pointer doesn't belong to this memory pool.");
4298 template<
typename T>
4299 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4301 ItemBlock newBlock = {
4302 vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4304 m_ItemBlocks.push_back(newBlock);
4307 for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4308 newBlock.pItems[i].NextFreeIndex = i + 1;
4309 newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4310 return m_ItemBlocks.back();
4316 #if VMA_USE_STL_LIST 4318 #define VmaList std::list 4320 #else // #if VMA_USE_STL_LIST 4322 template<
typename T>
4331 template<
typename T>
4334 VMA_CLASS_NO_COPY(VmaRawList)
4336 typedef VmaListItem<T> ItemType;
4338 VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks);
4342 size_t GetCount()
const {
return m_Count; }
4343 bool IsEmpty()
const {
return m_Count == 0; }
4345 ItemType* Front() {
return m_pFront; }
4346 const ItemType* Front()
const {
return m_pFront; }
4347 ItemType* Back() {
return m_pBack; }
4348 const ItemType* Back()
const {
return m_pBack; }
4350 ItemType* PushBack();
4351 ItemType* PushFront();
4352 ItemType* PushBack(
const T& value);
4353 ItemType* PushFront(
const T& value);
4358 ItemType* InsertBefore(ItemType* pItem);
4360 ItemType* InsertAfter(ItemType* pItem);
4362 ItemType* InsertBefore(ItemType* pItem,
const T& value);
4363 ItemType* InsertAfter(ItemType* pItem,
const T& value);
4365 void Remove(ItemType* pItem);
4368 const VkAllocationCallbacks*
const m_pAllocationCallbacks;
4369 VmaPoolAllocator<ItemType> m_ItemAllocator;
4375 template<
typename T>
4376 VmaRawList<T>::VmaRawList(
const VkAllocationCallbacks* pAllocationCallbacks) :
4377 m_pAllocationCallbacks(pAllocationCallbacks),
4378 m_ItemAllocator(pAllocationCallbacks, 128),
4385 template<
typename T>
4386 VmaRawList<T>::~VmaRawList()
4392 template<
typename T>
4393 void VmaRawList<T>::Clear()
4395 if(IsEmpty() ==
false)
4397 ItemType* pItem = m_pBack;
4398 while(pItem != VMA_NULL)
4400 ItemType*
const pPrevItem = pItem->pPrev;
4401 m_ItemAllocator.Free(pItem);
4404 m_pFront = VMA_NULL;
4410 template<
typename T>
4411 VmaListItem<T>* VmaRawList<T>::PushBack()
4413 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
4414 pNewItem->pNext = VMA_NULL;
4417 pNewItem->pPrev = VMA_NULL;
4418 m_pFront = pNewItem;
4424 pNewItem->pPrev = m_pBack;
4425 m_pBack->pNext = pNewItem;
4432 template<
typename T>
4433 VmaListItem<T>* VmaRawList<T>::PushFront()
4435 ItemType*
const pNewItem = m_ItemAllocator.Alloc();
4436 pNewItem->pPrev = VMA_NULL;
4439 pNewItem->pNext = VMA_NULL;
4440 m_pFront = pNewItem;
4446 pNewItem->pNext = m_pFront;
4447 m_pFront->pPrev = pNewItem;
4448 m_pFront = pNewItem;
4454 template<
typename T>
4455 VmaListItem<T>* VmaRawList<T>::PushBack(
const T& value)
4457 ItemType*
const pNewItem = PushBack();
4458 pNewItem->Value = value;
4462 template<
typename T>
4463 VmaListItem<T>* VmaRawList<T>::PushFront(
const T& value)
4465 ItemType*
const pNewItem = PushFront();
4466 pNewItem->Value = value;
4470 template<
typename T>
4471 void VmaRawList<T>::PopBack()
4473 VMA_HEAVY_ASSERT(m_Count > 0);
4474 ItemType*
const pBackItem = m_pBack;
4475 ItemType*
const pPrevItem = pBackItem->pPrev;
4476 if(pPrevItem != VMA_NULL)
4478 pPrevItem->pNext = VMA_NULL;
4480 m_pBack = pPrevItem;
4481 m_ItemAllocator.Free(pBackItem);
4485 template<
typename T>
4486 void VmaRawList<T>::PopFront()
4488 VMA_HEAVY_ASSERT(m_Count > 0);
4489 ItemType*
const pFrontItem = m_pFront;
4490 ItemType*
const pNextItem = pFrontItem->pNext;
4491 if(pNextItem != VMA_NULL)
4493 pNextItem->pPrev = VMA_NULL;
4495 m_pFront = pNextItem;
4496 m_ItemAllocator.Free(pFrontItem);
4500 template<
typename T>
4501 void VmaRawList<T>::Remove(ItemType* pItem)
4503 VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4504 VMA_HEAVY_ASSERT(m_Count > 0);
4506 if(pItem->pPrev != VMA_NULL)
4508 pItem->pPrev->pNext = pItem->pNext;
4512 VMA_HEAVY_ASSERT(m_pFront == pItem);
4513 m_pFront = pItem->pNext;
4516 if(pItem->pNext != VMA_NULL)
4518 pItem->pNext->pPrev = pItem->pPrev;
4522 VMA_HEAVY_ASSERT(m_pBack == pItem);
4523 m_pBack = pItem->pPrev;
4526 m_ItemAllocator.Free(pItem);
4530 template<
typename T>
4531 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4533 if(pItem != VMA_NULL)
4535 ItemType*
const prevItem = pItem->pPrev;
4536 ItemType*
const newItem = m_ItemAllocator.Alloc();
4537 newItem->pPrev = prevItem;
4538 newItem->pNext = pItem;
4539 pItem->pPrev = newItem;
4540 if(prevItem != VMA_NULL)
4542 prevItem->pNext = newItem;
4546 VMA_HEAVY_ASSERT(m_pFront == pItem);
4556 template<
typename T>
4557 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4559 if(pItem != VMA_NULL)
4561 ItemType*
const nextItem = pItem->pNext;
4562 ItemType*
const newItem = m_ItemAllocator.Alloc();
4563 newItem->pNext = nextItem;
4564 newItem->pPrev = pItem;
4565 pItem->pNext = newItem;
4566 if(nextItem != VMA_NULL)
4568 nextItem->pPrev = newItem;
4572 VMA_HEAVY_ASSERT(m_pBack == pItem);
4582 template<
typename T>
4583 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem,
const T& value)
4585 ItemType*
const newItem = InsertBefore(pItem);
4586 newItem->Value = value;
4590 template<
typename T>
4591 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem,
const T& value)
4593 ItemType*
const newItem = InsertAfter(pItem);
4594 newItem->Value = value;
4598 template<
typename T,
typename AllocatorT>
4601 VMA_CLASS_NO_COPY(VmaList)
4612 T& operator*()
const 4614 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4615 return m_pItem->Value;
4617 T* operator->()
const 4619 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4620 return &m_pItem->Value;
4623 iterator& operator++()
4625 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4626 m_pItem = m_pItem->pNext;
4629 iterator& operator--()
4631 if(m_pItem != VMA_NULL)
4633 m_pItem = m_pItem->pPrev;
4637 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4638 m_pItem = m_pList->Back();
4643 iterator operator++(
int)
4645 iterator result = *
this;
4649 iterator operator--(
int)
4651 iterator result = *
this;
4656 bool operator==(
const iterator& rhs)
const 4658 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4659 return m_pItem == rhs.m_pItem;
4661 bool operator!=(
const iterator& rhs)
const 4663 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4664 return m_pItem != rhs.m_pItem;
4668 VmaRawList<T>* m_pList;
4669 VmaListItem<T>* m_pItem;
4671 iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4677 friend class VmaList<T, AllocatorT>;
4680 class const_iterator
4689 const_iterator(
const iterator& src) :
4690 m_pList(src.m_pList),
4691 m_pItem(src.m_pItem)
4695 const T& operator*()
const 4697 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4698 return m_pItem->Value;
4700 const T* operator->()
const 4702 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4703 return &m_pItem->Value;
4706 const_iterator& operator++()
4708 VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4709 m_pItem = m_pItem->pNext;
4712 const_iterator& operator--()
4714 if(m_pItem != VMA_NULL)
4716 m_pItem = m_pItem->pPrev;
4720 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4721 m_pItem = m_pList->Back();
4726 const_iterator operator++(
int)
4728 const_iterator result = *
this;
4732 const_iterator operator--(
int)
4734 const_iterator result = *
this;
4739 bool operator==(
const const_iterator& rhs)
const 4741 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4742 return m_pItem == rhs.m_pItem;
4744 bool operator!=(
const const_iterator& rhs)
const 4746 VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4747 return m_pItem != rhs.m_pItem;
4751 const_iterator(
const VmaRawList<T>* pList,
const VmaListItem<T>* pItem) :
4757 const VmaRawList<T>* m_pList;
4758 const VmaListItem<T>* m_pItem;
4760 friend class VmaList<T, AllocatorT>;
4763 VmaList(
const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4765 bool empty()
const {
return m_RawList.IsEmpty(); }
4766 size_t size()
const {
return m_RawList.GetCount(); }
4768 iterator begin() {
return iterator(&m_RawList, m_RawList.Front()); }
4769 iterator end() {
return iterator(&m_RawList, VMA_NULL); }
4771 const_iterator cbegin()
const {
return const_iterator(&m_RawList, m_RawList.Front()); }
4772 const_iterator cend()
const {
return const_iterator(&m_RawList, VMA_NULL); }
4774 void clear() { m_RawList.Clear(); }
4775 void push_back(
const T& value) { m_RawList.PushBack(value); }
4776 void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4777 iterator insert(iterator it,
const T& value) {
return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4780 VmaRawList<T> m_RawList;
4783 #endif // #if VMA_USE_STL_LIST 4791 #if VMA_USE_STL_UNORDERED_MAP 4793 #define VmaPair std::pair 4795 #define VMA_MAP_TYPE(KeyT, ValueT) \ 4796 std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > > 4798 #else // #if VMA_USE_STL_UNORDERED_MAP 4800 template<
typename T1,
typename T2>
4806 VmaPair() : first(), second() { }
4807 VmaPair(
const T1& firstSrc,
const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4813 template<
typename KeyT,
typename ValueT>
4817 typedef VmaPair<KeyT, ValueT> PairType;
4818 typedef PairType* iterator;
4820 VmaMap(
const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4822 iterator begin() {
return m_Vector.begin(); }
4823 iterator end() {
return m_Vector.end(); }
4825 void insert(
const PairType& pair);
4826 iterator find(
const KeyT& key);
4827 void erase(iterator it);
4830 VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4833 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT> 4835 template<
typename FirstT,
typename SecondT>
4836 struct VmaPairFirstLess
4838 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const VmaPair<FirstT, SecondT>& rhs)
const 4840 return lhs.first < rhs.first;
4842 bool operator()(
const VmaPair<FirstT, SecondT>& lhs,
const FirstT& rhsFirst)
const 4844 return lhs.first < rhsFirst;
4848 template<
typename KeyT,
typename ValueT>
4849 void VmaMap<KeyT, ValueT>::insert(
const PairType& pair)
4851 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4853 m_Vector.data() + m_Vector.size(),
4855 VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4856 VmaVectorInsert(m_Vector, indexToInsert, pair);
4859 template<
typename KeyT,
typename ValueT>
4860 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(
const KeyT& key)
4862 PairType* it = VmaBinaryFindFirstNotLess(
4864 m_Vector.data() + m_Vector.size(),
4866 VmaPairFirstLess<KeyT, ValueT>());
4867 if((it != m_Vector.end()) && (it->first == key))
4873 return m_Vector.end();
4877 template<
typename KeyT,
typename ValueT>
4878 void VmaMap<KeyT, ValueT>::erase(iterator it)
4880 VmaVectorRemove(m_Vector, it - m_Vector.begin());
4883 #endif // #if VMA_USE_STL_UNORDERED_MAP 4889 class VmaDeviceMemoryBlock;
4891 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4893 struct VmaAllocation_T
4895 VMA_CLASS_NO_COPY(VmaAllocation_T)
4897 static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4901 FLAG_USER_DATA_STRING = 0x01,
4905 enum ALLOCATION_TYPE
4907 ALLOCATION_TYPE_NONE,
4908 ALLOCATION_TYPE_BLOCK,
4909 ALLOCATION_TYPE_DEDICATED,
4912 VmaAllocation_T(uint32_t currentFrameIndex,
bool userDataString) :
4915 m_pUserData(VMA_NULL),
4916 m_LastUseFrameIndex(currentFrameIndex),
4917 m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4918 m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4920 m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4922 #if VMA_STATS_STRING_ENABLED 4923 m_CreationFrameIndex = currentFrameIndex;
4924 m_BufferImageUsage = 0;
4930 VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 &&
"Allocation was not unmapped before destruction.");
4933 VMA_ASSERT(m_pUserData == VMA_NULL);
4936 void InitBlockAllocation(
4938 VmaDeviceMemoryBlock* block,
4939 VkDeviceSize offset,
4940 VkDeviceSize alignment,
4942 VmaSuballocationType suballocationType,
4946 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4947 VMA_ASSERT(block != VMA_NULL);
4948 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4949 m_Alignment = alignment;
4951 m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4952 m_SuballocationType = (uint8_t)suballocationType;
4953 m_BlockAllocation.m_hPool = hPool;
4954 m_BlockAllocation.m_Block = block;
4955 m_BlockAllocation.m_Offset = offset;
4956 m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4961 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4962 VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4963 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4964 m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4965 m_BlockAllocation.m_Block = VMA_NULL;
4966 m_BlockAllocation.m_Offset = 0;
4967 m_BlockAllocation.m_CanBecomeLost =
true;
4970 void ChangeBlockAllocation(
4972 VmaDeviceMemoryBlock* block,
4973 VkDeviceSize offset);
4975 void ChangeSize(VkDeviceSize newSize);
4976 void ChangeOffset(VkDeviceSize newOffset);
4979 void InitDedicatedAllocation(
4980 uint32_t memoryTypeIndex,
4981 VkDeviceMemory hMemory,
4982 VmaSuballocationType suballocationType,
4986 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4987 VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4988 m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4991 m_SuballocationType = (uint8_t)suballocationType;
4992 m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4993 m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4994 m_DedicatedAllocation.m_hMemory = hMemory;
4995 m_DedicatedAllocation.m_pMappedData = pMappedData;
4998 ALLOCATION_TYPE GetType()
const {
return (ALLOCATION_TYPE)m_Type; }
4999 VkDeviceSize GetAlignment()
const {
return m_Alignment; }
5000 VkDeviceSize GetSize()
const {
return m_Size; }
5001 bool IsUserDataString()
const {
return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5002 void* GetUserData()
const {
return m_pUserData; }
5003 void SetUserData(
VmaAllocator hAllocator,
void* pUserData);
5004 VmaSuballocationType GetSuballocationType()
const {
return (VmaSuballocationType)m_SuballocationType; }
5006 VmaDeviceMemoryBlock* GetBlock()
const 5008 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5009 return m_BlockAllocation.m_Block;
5011 VkDeviceSize GetOffset()
const;
5012 VkDeviceMemory GetMemory()
const;
5013 uint32_t GetMemoryTypeIndex()
const;
5014 bool IsPersistentMap()
const {
return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5015 void* GetMappedData()
const;
5016 bool CanBecomeLost()
const;
5019 uint32_t GetLastUseFrameIndex()
const 5021 return m_LastUseFrameIndex.load();
5023 bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5025 return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5035 bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5037 void DedicatedAllocCalcStatsInfo(
VmaStatInfo& outInfo)
5039 VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5050 void BlockAllocMap();
5051 void BlockAllocUnmap();
5052 VkResult DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData);
5055 #if VMA_STATS_STRING_ENABLED 5056 uint32_t GetCreationFrameIndex()
const {
return m_CreationFrameIndex; }
5057 uint32_t GetBufferImageUsage()
const {
return m_BufferImageUsage; }
5059 void InitBufferImageUsage(uint32_t bufferImageUsage)
5061 VMA_ASSERT(m_BufferImageUsage == 0);
5062 m_BufferImageUsage = bufferImageUsage;
5065 void PrintParameters(
class VmaJsonWriter& json)
const;
5069 VkDeviceSize m_Alignment;
5070 VkDeviceSize m_Size;
5072 VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5074 uint8_t m_SuballocationType;
5081 struct BlockAllocation
5084 VmaDeviceMemoryBlock* m_Block;
5085 VkDeviceSize m_Offset;
5086 bool m_CanBecomeLost;
5090 struct DedicatedAllocation
5092 uint32_t m_MemoryTypeIndex;
5093 VkDeviceMemory m_hMemory;
5094 void* m_pMappedData;
5100 BlockAllocation m_BlockAllocation;
5102 DedicatedAllocation m_DedicatedAllocation;
5105 #if VMA_STATS_STRING_ENABLED 5106 uint32_t m_CreationFrameIndex;
5107 uint32_t m_BufferImageUsage;
5117 struct VmaSuballocation
5119 VkDeviceSize offset;
5122 VmaSuballocationType type;
5126 struct VmaSuballocationOffsetLess
5128 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const 5130 return lhs.offset < rhs.offset;
5133 struct VmaSuballocationOffsetGreater
5135 bool operator()(
const VmaSuballocation& lhs,
const VmaSuballocation& rhs)
const 5137 return lhs.offset > rhs.offset;
5141 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5144 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5159 struct VmaAllocationRequest
5161 VkDeviceSize offset;
5162 VkDeviceSize sumFreeSize;
5163 VkDeviceSize sumItemSize;
5164 VmaSuballocationList::iterator item;
5165 size_t itemsToMakeLostCount;
5168 VkDeviceSize CalcCost()
const 5170 return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5178 class VmaBlockMetadata
5182 virtual ~VmaBlockMetadata() { }
5183 virtual void Init(VkDeviceSize size) { m_Size = size; }
5186 virtual bool Validate()
const = 0;
5187 VkDeviceSize GetSize()
const {
return m_Size; }
5188 virtual size_t GetAllocationCount()
const = 0;
5189 virtual VkDeviceSize GetSumFreeSize()
const = 0;
5190 virtual VkDeviceSize GetUnusedRangeSizeMax()
const = 0;
5192 virtual bool IsEmpty()
const = 0;
5194 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const = 0;
5196 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const = 0;
5198 #if VMA_STATS_STRING_ENABLED 5199 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const = 0;
5205 virtual bool CreateAllocationRequest(
5206 uint32_t currentFrameIndex,
5207 uint32_t frameInUseCount,
5208 VkDeviceSize bufferImageGranularity,
5209 VkDeviceSize allocSize,
5210 VkDeviceSize allocAlignment,
5212 VmaSuballocationType allocType,
5213 bool canMakeOtherLost,
5216 VmaAllocationRequest* pAllocationRequest) = 0;
5218 virtual bool MakeRequestedAllocationsLost(
5219 uint32_t currentFrameIndex,
5220 uint32_t frameInUseCount,
5221 VmaAllocationRequest* pAllocationRequest) = 0;
5223 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5225 virtual VkResult CheckCorruption(
const void* pBlockData) = 0;
5229 const VmaAllocationRequest& request,
5230 VmaSuballocationType type,
5231 VkDeviceSize allocSize,
5237 virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5240 virtual bool ResizeAllocation(
const VmaAllocation alloc, VkDeviceSize newSize) {
return false; }
5243 const VkAllocationCallbacks* GetAllocationCallbacks()
const {
return m_pAllocationCallbacks; }
5245 #if VMA_STATS_STRING_ENABLED 5246 void PrintDetailedMap_Begin(
class VmaJsonWriter& json,
5247 VkDeviceSize unusedBytes,
5248 size_t allocationCount,
5249 size_t unusedRangeCount)
const;
5250 void PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
5251 VkDeviceSize offset,
5253 void PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
5254 VkDeviceSize offset,
5255 VkDeviceSize size)
const;
5256 void PrintDetailedMap_End(
class VmaJsonWriter& json)
const;
5260 VkDeviceSize m_Size;
5261 const VkAllocationCallbacks* m_pAllocationCallbacks;
5264 #define VMA_VALIDATE(cond) do { if(!(cond)) { \ 5265 VMA_ASSERT(0 && "Validation failed: " #cond); \ 5269 class VmaBlockMetadata_Generic :
public VmaBlockMetadata
5271 VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5274 virtual ~VmaBlockMetadata_Generic();
5275 virtual void Init(VkDeviceSize size);
5277 virtual bool Validate()
const;
5278 virtual size_t GetAllocationCount()
const {
return m_Suballocations.size() - m_FreeCount; }
5279 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
5280 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
5281 virtual bool IsEmpty()
const;
5283 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
5284 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
5286 #if VMA_STATS_STRING_ENABLED 5287 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
5290 virtual bool CreateAllocationRequest(
5291 uint32_t currentFrameIndex,
5292 uint32_t frameInUseCount,
5293 VkDeviceSize bufferImageGranularity,
5294 VkDeviceSize allocSize,
5295 VkDeviceSize allocAlignment,
5297 VmaSuballocationType allocType,
5298 bool canMakeOtherLost,
5300 VmaAllocationRequest* pAllocationRequest);
5302 virtual bool MakeRequestedAllocationsLost(
5303 uint32_t currentFrameIndex,
5304 uint32_t frameInUseCount,
5305 VmaAllocationRequest* pAllocationRequest);
5307 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5309 virtual VkResult CheckCorruption(
const void* pBlockData);
5312 const VmaAllocationRequest& request,
5313 VmaSuballocationType type,
5314 VkDeviceSize allocSize,
5319 virtual void FreeAtOffset(VkDeviceSize offset);
5321 virtual bool ResizeAllocation(
const VmaAllocation alloc, VkDeviceSize newSize);
5326 bool IsBufferImageGranularityConflictPossible(
5327 VkDeviceSize bufferImageGranularity,
5328 VmaSuballocationType& inOutPrevSuballocType)
const;
5331 friend class VmaDefragmentationAlgorithm_Generic;
5332 friend class VmaDefragmentationAlgorithm_Fast;
5334 uint32_t m_FreeCount;
5335 VkDeviceSize m_SumFreeSize;
5336 VmaSuballocationList m_Suballocations;
5339 VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5341 bool ValidateFreeSuballocationList()
const;
5345 bool CheckAllocation(
5346 uint32_t currentFrameIndex,
5347 uint32_t frameInUseCount,
5348 VkDeviceSize bufferImageGranularity,
5349 VkDeviceSize allocSize,
5350 VkDeviceSize allocAlignment,
5351 VmaSuballocationType allocType,
5352 VmaSuballocationList::const_iterator suballocItem,
5353 bool canMakeOtherLost,
5354 VkDeviceSize* pOffset,
5355 size_t* itemsToMakeLostCount,
5356 VkDeviceSize* pSumFreeSize,
5357 VkDeviceSize* pSumItemSize)
const;
5359 void MergeFreeWithNext(VmaSuballocationList::iterator item);
5363 VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5366 void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5369 void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5450 class VmaBlockMetadata_Linear :
public VmaBlockMetadata
5452 VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5455 virtual ~VmaBlockMetadata_Linear();
5456 virtual void Init(VkDeviceSize size);
5458 virtual bool Validate()
const;
5459 virtual size_t GetAllocationCount()
const;
5460 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize; }
5461 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
5462 virtual bool IsEmpty()
const {
return GetAllocationCount() == 0; }
5464 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
5465 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
5467 #if VMA_STATS_STRING_ENABLED 5468 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
5471 virtual bool CreateAllocationRequest(
5472 uint32_t currentFrameIndex,
5473 uint32_t frameInUseCount,
5474 VkDeviceSize bufferImageGranularity,
5475 VkDeviceSize allocSize,
5476 VkDeviceSize allocAlignment,
5478 VmaSuballocationType allocType,
5479 bool canMakeOtherLost,
5481 VmaAllocationRequest* pAllocationRequest);
5483 virtual bool MakeRequestedAllocationsLost(
5484 uint32_t currentFrameIndex,
5485 uint32_t frameInUseCount,
5486 VmaAllocationRequest* pAllocationRequest);
5488 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5490 virtual VkResult CheckCorruption(
const void* pBlockData);
5493 const VmaAllocationRequest& request,
5494 VmaSuballocationType type,
5495 VkDeviceSize allocSize,
5500 virtual void FreeAtOffset(VkDeviceSize offset);
5510 typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5512 enum SECOND_VECTOR_MODE
5514 SECOND_VECTOR_EMPTY,
5519 SECOND_VECTOR_RING_BUFFER,
5525 SECOND_VECTOR_DOUBLE_STACK,
5528 VkDeviceSize m_SumFreeSize;
5529 SuballocationVectorType m_Suballocations0, m_Suballocations1;
5530 uint32_t m_1stVectorIndex;
5531 SECOND_VECTOR_MODE m_2ndVectorMode;
5533 SuballocationVectorType& AccessSuballocations1st() {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5534 SuballocationVectorType& AccessSuballocations2nd() {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5535 const SuballocationVectorType& AccessSuballocations1st()
const {
return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5536 const SuballocationVectorType& AccessSuballocations2nd()
const {
return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5539 size_t m_1stNullItemsBeginCount;
5541 size_t m_1stNullItemsMiddleCount;
5543 size_t m_2ndNullItemsCount;
5545 bool ShouldCompact1st()
const;
5546 void CleanupAfterFree();
5560 class VmaBlockMetadata_Buddy :
public VmaBlockMetadata
5562 VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5565 virtual ~VmaBlockMetadata_Buddy();
5566 virtual void Init(VkDeviceSize size);
5568 virtual bool Validate()
const;
5569 virtual size_t GetAllocationCount()
const {
return m_AllocationCount; }
5570 virtual VkDeviceSize GetSumFreeSize()
const {
return m_SumFreeSize + GetUnusableSize(); }
5571 virtual VkDeviceSize GetUnusedRangeSizeMax()
const;
5572 virtual bool IsEmpty()
const {
return m_Root->type == Node::TYPE_FREE; }
5574 virtual void CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const;
5575 virtual void AddPoolStats(
VmaPoolStats& inoutStats)
const;
5577 #if VMA_STATS_STRING_ENABLED 5578 virtual void PrintDetailedMap(
class VmaJsonWriter& json)
const;
5581 virtual bool CreateAllocationRequest(
5582 uint32_t currentFrameIndex,
5583 uint32_t frameInUseCount,
5584 VkDeviceSize bufferImageGranularity,
5585 VkDeviceSize allocSize,
5586 VkDeviceSize allocAlignment,
5588 VmaSuballocationType allocType,
5589 bool canMakeOtherLost,
5591 VmaAllocationRequest* pAllocationRequest);
5593 virtual bool MakeRequestedAllocationsLost(
5594 uint32_t currentFrameIndex,
5595 uint32_t frameInUseCount,
5596 VmaAllocationRequest* pAllocationRequest);
5598 virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5600 virtual VkResult CheckCorruption(
const void* pBlockData) {
return VK_ERROR_FEATURE_NOT_PRESENT; }
5603 const VmaAllocationRequest& request,
5604 VmaSuballocationType type,
5605 VkDeviceSize allocSize,
5609 virtual void Free(
const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5610 virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5613 static const VkDeviceSize MIN_NODE_SIZE = 32;
5614 static const size_t MAX_LEVELS = 30;
5616 struct ValidationContext
5618 size_t calculatedAllocationCount;
5619 size_t calculatedFreeCount;
5620 VkDeviceSize calculatedSumFreeSize;
5622 ValidationContext() :
5623 calculatedAllocationCount(0),
5624 calculatedFreeCount(0),
5625 calculatedSumFreeSize(0) { }
5630 VkDeviceSize offset;
5660 VkDeviceSize m_UsableSize;
5661 uint32_t m_LevelCount;
5667 } m_FreeList[MAX_LEVELS];
5669 size_t m_AllocationCount;
5673 VkDeviceSize m_SumFreeSize;
5675 VkDeviceSize GetUnusableSize()
const {
return GetSize() - m_UsableSize; }
5676 void DeleteNode(Node* node);
5677 bool ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const;
5678 uint32_t AllocSizeToLevel(VkDeviceSize allocSize)
const;
5679 inline VkDeviceSize LevelToNodeSize(uint32_t level)
const {
return m_UsableSize >> level; }
5681 void FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset);
5682 void CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const;
5686 void AddToFreeListFront(uint32_t level, Node* node);
5690 void RemoveFromFreeList(uint32_t level, Node* node);
5692 #if VMA_STATS_STRING_ENABLED 5693 void PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const;
5703 class VmaDeviceMemoryBlock
5705 VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5707 VmaBlockMetadata* m_pMetadata;
5711 ~VmaDeviceMemoryBlock()
5713 VMA_ASSERT(m_MapCount == 0 &&
"VkDeviceMemory block is being destroyed while it is still mapped.");
5714 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5720 uint32_t newMemoryTypeIndex,
5721 VkDeviceMemory newMemory,
5722 VkDeviceSize newSize,
5724 uint32_t algorithm);
5728 VkDeviceMemory GetDeviceMemory()
const {
return m_hMemory; }
5729 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
5730 uint32_t GetId()
const {
return m_Id; }
5731 void* GetMappedData()
const {
return m_pMappedData; }
5734 bool Validate()
const;
5739 VkResult Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData);
5742 VkResult WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5743 VkResult ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5745 VkResult BindBufferMemory(
5749 VkResult BindImageMemory(
5755 uint32_t m_MemoryTypeIndex;
5757 VkDeviceMemory m_hMemory;
5765 uint32_t m_MapCount;
5766 void* m_pMappedData;
5769 struct VmaPointerLess
5771 bool operator()(
const void* lhs,
const void* rhs)
const 5777 struct VmaDefragmentationMove
5779 size_t srcBlockIndex;
5780 size_t dstBlockIndex;
5781 VkDeviceSize srcOffset;
5782 VkDeviceSize dstOffset;
5786 class VmaDefragmentationAlgorithm;
5794 struct VmaBlockVector
5796 VMA_CLASS_NO_COPY(VmaBlockVector)
5800 uint32_t memoryTypeIndex,
5801 VkDeviceSize preferredBlockSize,
5802 size_t minBlockCount,
5803 size_t maxBlockCount,
5804 VkDeviceSize bufferImageGranularity,
5805 uint32_t frameInUseCount,
5807 bool explicitBlockSize,
5808 uint32_t algorithm);
5811 VkResult CreateMinBlocks();
5813 uint32_t GetMemoryTypeIndex()
const {
return m_MemoryTypeIndex; }
5814 VkDeviceSize GetPreferredBlockSize()
const {
return m_PreferredBlockSize; }
5815 VkDeviceSize GetBufferImageGranularity()
const {
return m_BufferImageGranularity; }
5816 uint32_t GetFrameInUseCount()
const {
return m_FrameInUseCount; }
5817 uint32_t GetAlgorithm()
const {
return m_Algorithm; }
5821 bool IsEmpty()
const {
return m_Blocks.empty(); }
5822 bool IsCorruptionDetectionEnabled()
const;
5826 uint32_t currentFrameIndex,
5828 VkDeviceSize alignment,
5830 VmaSuballocationType suballocType,
5831 size_t allocationCount,
5840 #if VMA_STATS_STRING_ENABLED 5841 void PrintDetailedMap(
class VmaJsonWriter& json);
5844 void MakePoolAllocationsLost(
5845 uint32_t currentFrameIndex,
5846 size_t* pLostAllocationCount);
5847 VkResult CheckCorruption();
5851 class VmaBlockVectorDefragmentationContext* pCtx,
5853 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5854 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5855 VkCommandBuffer commandBuffer);
5856 void DefragmentationEnd(
5857 class VmaBlockVectorDefragmentationContext* pCtx,
5863 size_t GetBlockCount()
const {
return m_Blocks.size(); }
5864 VmaDeviceMemoryBlock* GetBlock(
size_t index)
const {
return m_Blocks[index]; }
5865 size_t CalcAllocationCount()
const;
5866 bool IsBufferImageGranularityConflictPossible()
const;
5869 friend class VmaDefragmentationAlgorithm_Generic;
5872 const uint32_t m_MemoryTypeIndex;
5873 const VkDeviceSize m_PreferredBlockSize;
5874 const size_t m_MinBlockCount;
5875 const size_t m_MaxBlockCount;
5876 const VkDeviceSize m_BufferImageGranularity;
5877 const uint32_t m_FrameInUseCount;
5878 const bool m_IsCustomPool;
5879 const bool m_ExplicitBlockSize;
5880 const uint32_t m_Algorithm;
5884 bool m_HasEmptyBlock;
5885 VMA_RW_MUTEX m_Mutex;
5887 VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5888 uint32_t m_NextBlockId;
5890 VkDeviceSize CalcMaxBlockSize()
const;
5893 void Remove(VmaDeviceMemoryBlock* pBlock);
5897 void IncrementallySortBlocks();
5899 VkResult AllocatePage(
5901 uint32_t currentFrameIndex,
5903 VkDeviceSize alignment,
5905 VmaSuballocationType suballocType,
5909 VkResult AllocateFromBlock(
5910 VmaDeviceMemoryBlock* pBlock,
5912 uint32_t currentFrameIndex,
5914 VkDeviceSize alignment,
5917 VmaSuballocationType suballocType,
5921 VkResult CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex);
5924 void ApplyDefragmentationMovesCpu(
5925 class VmaBlockVectorDefragmentationContext* pDefragCtx,
5926 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5928 void ApplyDefragmentationMovesGpu(
5929 class VmaBlockVectorDefragmentationContext* pDefragCtx,
5930 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5931 VkCommandBuffer commandBuffer);
5942 VMA_CLASS_NO_COPY(VmaPool_T)
5944 VmaBlockVector m_BlockVector;
5949 VkDeviceSize preferredBlockSize);
5952 uint32_t GetId()
const {
return m_Id; }
5953 void SetId(uint32_t
id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5955 #if VMA_STATS_STRING_ENABLED 5970 class VmaDefragmentationAlgorithm
5972 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5974 VmaDefragmentationAlgorithm(
5976 VmaBlockVector* pBlockVector,
5977 uint32_t currentFrameIndex) :
5978 m_hAllocator(hAllocator),
5979 m_pBlockVector(pBlockVector),
5980 m_CurrentFrameIndex(currentFrameIndex)
5983 virtual ~VmaDefragmentationAlgorithm()
5987 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5988 virtual void AddAll() = 0;
5990 virtual VkResult Defragment(
5991 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5992 VkDeviceSize maxBytesToMove,
5993 uint32_t maxAllocationsToMove) = 0;
5995 virtual VkDeviceSize GetBytesMoved()
const = 0;
5996 virtual uint32_t GetAllocationsMoved()
const = 0;
6000 VmaBlockVector*
const m_pBlockVector;
6001 const uint32_t m_CurrentFrameIndex;
6003 struct AllocationInfo
6006 VkBool32* m_pChanged;
6009 m_hAllocation(VK_NULL_HANDLE),
6010 m_pChanged(VMA_NULL)
6014 m_hAllocation(hAlloc),
6015 m_pChanged(pChanged)
6021 class VmaDefragmentationAlgorithm_Generic :
public VmaDefragmentationAlgorithm
6023 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6025 VmaDefragmentationAlgorithm_Generic(
6027 VmaBlockVector* pBlockVector,
6028 uint32_t currentFrameIndex,
6029 bool overlappingMoveSupported);
6030 virtual ~VmaDefragmentationAlgorithm_Generic();
6032 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
6033 virtual void AddAll() { m_AllAllocations =
true; }
6035 virtual VkResult Defragment(
6036 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6037 VkDeviceSize maxBytesToMove,
6038 uint32_t maxAllocationsToMove);
6040 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
6041 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
6044 uint32_t m_AllocationCount;
6045 bool m_AllAllocations;
6047 VkDeviceSize m_BytesMoved;
6048 uint32_t m_AllocationsMoved;
6050 struct AllocationInfoSizeGreater
6052 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const 6054 return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6058 struct AllocationInfoOffsetGreater
6060 bool operator()(
const AllocationInfo& lhs,
const AllocationInfo& rhs)
const 6062 return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6068 size_t m_OriginalBlockIndex;
6069 VmaDeviceMemoryBlock* m_pBlock;
6070 bool m_HasNonMovableAllocations;
6071 VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6073 BlockInfo(
const VkAllocationCallbacks* pAllocationCallbacks) :
6074 m_OriginalBlockIndex(SIZE_MAX),
6076 m_HasNonMovableAllocations(true),
6077 m_Allocations(pAllocationCallbacks)
6081 void CalcHasNonMovableAllocations()
6083 const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6084 const size_t defragmentAllocCount = m_Allocations.size();
6085 m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6088 void SortAllocationsBySizeDescending()
6090 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6093 void SortAllocationsByOffsetDescending()
6095 VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6099 struct BlockPointerLess
6101 bool operator()(
const BlockInfo* pLhsBlockInfo,
const VmaDeviceMemoryBlock* pRhsBlock)
const 6103 return pLhsBlockInfo->m_pBlock < pRhsBlock;
6105 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const 6107 return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6113 struct BlockInfoCompareMoveDestination
6115 bool operator()(
const BlockInfo* pLhsBlockInfo,
const BlockInfo* pRhsBlockInfo)
const 6117 if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6121 if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6125 if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6133 typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6134 BlockInfoVector m_Blocks;
6136 VkResult DefragmentRound(
6137 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6138 VkDeviceSize maxBytesToMove,
6139 uint32_t maxAllocationsToMove);
6141 size_t CalcBlocksWithNonMovableCount()
const;
6143 static bool MoveMakesSense(
6144 size_t dstBlockIndex, VkDeviceSize dstOffset,
6145 size_t srcBlockIndex, VkDeviceSize srcOffset);
6148 class VmaDefragmentationAlgorithm_Fast :
public VmaDefragmentationAlgorithm
6150 VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6152 VmaDefragmentationAlgorithm_Fast(
6154 VmaBlockVector* pBlockVector,
6155 uint32_t currentFrameIndex,
6156 bool overlappingMoveSupported);
6157 virtual ~VmaDefragmentationAlgorithm_Fast();
6159 virtual void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6160 virtual void AddAll() { m_AllAllocations =
true; }
6162 virtual VkResult Defragment(
6163 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6164 VkDeviceSize maxBytesToMove,
6165 uint32_t maxAllocationsToMove);
6167 virtual VkDeviceSize GetBytesMoved()
const {
return m_BytesMoved; }
6168 virtual uint32_t GetAllocationsMoved()
const {
return m_AllocationsMoved; }
6173 size_t origBlockIndex;
6176 class FreeSpaceDatabase
6182 s.blockInfoIndex = SIZE_MAX;
6183 for(
size_t i = 0; i < MAX_COUNT; ++i)
6185 m_FreeSpaces[i] = s;
6189 void Register(
size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6191 if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6197 size_t bestIndex = SIZE_MAX;
6198 for(
size_t i = 0; i < MAX_COUNT; ++i)
6201 if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6206 if(m_FreeSpaces[i].size < size &&
6207 (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6213 if(bestIndex != SIZE_MAX)
6215 m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6216 m_FreeSpaces[bestIndex].offset = offset;
6217 m_FreeSpaces[bestIndex].size = size;
6221 bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6222 size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6224 size_t bestIndex = SIZE_MAX;
6225 VkDeviceSize bestFreeSpaceAfter = 0;
6226 for(
size_t i = 0; i < MAX_COUNT; ++i)
6229 if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6231 const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6233 if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6235 const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6237 if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6240 bestFreeSpaceAfter = freeSpaceAfter;
6246 if(bestIndex != SIZE_MAX)
6248 outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6249 outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6251 if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6254 const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6255 m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6256 m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6261 m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6271 static const size_t MAX_COUNT = 4;
6275 size_t blockInfoIndex;
6276 VkDeviceSize offset;
6278 } m_FreeSpaces[MAX_COUNT];
6281 const bool m_OverlappingMoveSupported;
6283 uint32_t m_AllocationCount;
6284 bool m_AllAllocations;
6286 VkDeviceSize m_BytesMoved;
6287 uint32_t m_AllocationsMoved;
6289 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6291 void PreprocessMetadata();
6292 void PostprocessMetadata();
6293 void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc);
6296 struct VmaBlockDefragmentationContext
6300 BLOCK_FLAG_USED = 0x00000001,
6305 VmaBlockDefragmentationContext() :
6307 hBuffer(VK_NULL_HANDLE)
6312 class VmaBlockVectorDefragmentationContext
6314 VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6318 VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6320 VmaBlockVectorDefragmentationContext(
6323 VmaBlockVector* pBlockVector,
6324 uint32_t currFrameIndex,
6326 ~VmaBlockVectorDefragmentationContext();
6328 VmaPool GetCustomPool()
const {
return m_hCustomPool; }
6329 VmaBlockVector* GetBlockVector()
const {
return m_pBlockVector; }
6330 VmaDefragmentationAlgorithm* GetAlgorithm()
const {
return m_pAlgorithm; }
6332 void AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged);
6333 void AddAll() { m_AllAllocations =
true; }
6335 void Begin(
bool overlappingMoveSupported);
6342 VmaBlockVector*
const m_pBlockVector;
6343 const uint32_t m_CurrFrameIndex;
6344 const uint32_t m_AlgorithmFlags;
6346 VmaDefragmentationAlgorithm* m_pAlgorithm;
6354 VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6355 bool m_AllAllocations;
6358 struct VmaDefragmentationContext_T
6361 VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6363 VmaDefragmentationContext_T(
6365 uint32_t currFrameIndex,
6368 ~VmaDefragmentationContext_T();
6370 void AddPools(uint32_t poolCount,
VmaPool* pPools);
6371 void AddAllocations(
6372 uint32_t allocationCount,
6374 VkBool32* pAllocationsChanged);
6382 VkResult Defragment(
6383 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6384 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6389 const uint32_t m_CurrFrameIndex;
6390 const uint32_t m_Flags;
6393 VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6395 VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6398 #if VMA_RECORDING_ENABLED 6405 void WriteConfiguration(
6406 const VkPhysicalDeviceProperties& devProps,
6407 const VkPhysicalDeviceMemoryProperties& memProps,
6408 bool dedicatedAllocationExtensionEnabled);
6411 void RecordCreateAllocator(uint32_t frameIndex);
6412 void RecordDestroyAllocator(uint32_t frameIndex);
6413 void RecordCreatePool(uint32_t frameIndex,
6416 void RecordDestroyPool(uint32_t frameIndex,
VmaPool pool);
6417 void RecordAllocateMemory(uint32_t frameIndex,
6418 const VkMemoryRequirements& vkMemReq,
6421 void RecordAllocateMemoryPages(uint32_t frameIndex,
6422 const VkMemoryRequirements& vkMemReq,
6424 uint64_t allocationCount,
6426 void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6427 const VkMemoryRequirements& vkMemReq,
6428 bool requiresDedicatedAllocation,
6429 bool prefersDedicatedAllocation,
6432 void RecordAllocateMemoryForImage(uint32_t frameIndex,
6433 const VkMemoryRequirements& vkMemReq,
6434 bool requiresDedicatedAllocation,
6435 bool prefersDedicatedAllocation,
6438 void RecordFreeMemory(uint32_t frameIndex,
6440 void RecordFreeMemoryPages(uint32_t frameIndex,
6441 uint64_t allocationCount,
6443 void RecordResizeAllocation(
6444 uint32_t frameIndex,
6446 VkDeviceSize newSize);
6447 void RecordSetAllocationUserData(uint32_t frameIndex,
6449 const void* pUserData);
6450 void RecordCreateLostAllocation(uint32_t frameIndex,
6452 void RecordMapMemory(uint32_t frameIndex,
6454 void RecordUnmapMemory(uint32_t frameIndex,
6456 void RecordFlushAllocation(uint32_t frameIndex,
6457 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6458 void RecordInvalidateAllocation(uint32_t frameIndex,
6459 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6460 void RecordCreateBuffer(uint32_t frameIndex,
6461 const VkBufferCreateInfo& bufCreateInfo,
6464 void RecordCreateImage(uint32_t frameIndex,
6465 const VkImageCreateInfo& imageCreateInfo,
6468 void RecordDestroyBuffer(uint32_t frameIndex,
6470 void RecordDestroyImage(uint32_t frameIndex,
6472 void RecordTouchAllocation(uint32_t frameIndex,
6474 void RecordGetAllocationInfo(uint32_t frameIndex,
6476 void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6478 void RecordDefragmentationBegin(uint32_t frameIndex,
6481 void RecordDefragmentationEnd(uint32_t frameIndex,
6491 class UserDataString
6495 const char* GetString()
const {
return m_Str; }
6505 VMA_MUTEX m_FileMutex;
6507 int64_t m_StartCounter;
6509 void GetBasicParams(CallParams& outParams);
6512 template<
typename T>
6513 void PrintPointerList(uint64_t count,
const T* pItems)
6517 fprintf(m_File,
"%p", pItems[0]);
6518 for(uint64_t i = 1; i < count; ++i)
6520 fprintf(m_File,
" %p", pItems[i]);
6525 void PrintPointerList(uint64_t count,
const VmaAllocation* pItems);
6529 #endif // #if VMA_RECORDING_ENABLED 6532 struct VmaAllocator_T
6534 VMA_CLASS_NO_COPY(VmaAllocator_T)
6537 bool m_UseKhrDedicatedAllocation;
6539 bool m_AllocationCallbacksSpecified;
6540 VkAllocationCallbacks m_AllocationCallbacks;
6544 VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6545 VMA_MUTEX m_HeapSizeLimitMutex;
6547 VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6548 VkPhysicalDeviceMemoryProperties m_MemProps;
6551 VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6554 typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6555 AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6556 VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6562 const VkAllocationCallbacks* GetAllocationCallbacks()
const 6564 return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6568 return m_VulkanFunctions;
6571 VkDeviceSize GetBufferImageGranularity()
const 6574 static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6575 m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6578 uint32_t GetMemoryHeapCount()
const {
return m_MemProps.memoryHeapCount; }
6579 uint32_t GetMemoryTypeCount()
const {
return m_MemProps.memoryTypeCount; }
6581 uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex)
const 6583 VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6584 return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6587 bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex)
const 6589 return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6590 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6593 VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex)
const 6595 return IsMemoryTypeNonCoherent(memTypeIndex) ?
6596 VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6597 (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6600 bool IsIntegratedGpu()
const 6602 return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6605 #if VMA_RECORDING_ENABLED 6606 VmaRecorder* GetRecorder()
const {
return m_pRecorder; }
6609 void GetBufferMemoryRequirements(
6611 VkMemoryRequirements& memReq,
6612 bool& requiresDedicatedAllocation,
6613 bool& prefersDedicatedAllocation)
const;
6614 void GetImageMemoryRequirements(
6616 VkMemoryRequirements& memReq,
6617 bool& requiresDedicatedAllocation,
6618 bool& prefersDedicatedAllocation)
const;
6621 VkResult AllocateMemory(
6622 const VkMemoryRequirements& vkMemReq,
6623 bool requiresDedicatedAllocation,
6624 bool prefersDedicatedAllocation,
6625 VkBuffer dedicatedBuffer,
6626 VkImage dedicatedImage,
6628 VmaSuballocationType suballocType,
6629 size_t allocationCount,
6634 size_t allocationCount,
6637 VkResult ResizeAllocation(
6639 VkDeviceSize newSize);
6641 void CalculateStats(
VmaStats* pStats);
6643 #if VMA_STATS_STRING_ENABLED 6644 void PrintDetailedMap(
class VmaJsonWriter& json);
6647 VkResult DefragmentationBegin(
6651 VkResult DefragmentationEnd(
6658 void DestroyPool(
VmaPool pool);
6661 void SetCurrentFrameIndex(uint32_t frameIndex);
6662 uint32_t GetCurrentFrameIndex()
const {
return m_CurrentFrameIndex.load(); }
6664 void MakePoolAllocationsLost(
6666 size_t* pLostAllocationCount);
6667 VkResult CheckPoolCorruption(
VmaPool hPool);
6668 VkResult CheckCorruption(uint32_t memoryTypeBits);
6672 VkResult AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6673 void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6678 VkResult BindBufferMemory(
VmaAllocation hAllocation, VkBuffer hBuffer);
6679 VkResult BindImageMemory(
VmaAllocation hAllocation, VkImage hImage);
6681 void FlushOrInvalidateAllocation(
6683 VkDeviceSize offset, VkDeviceSize size,
6684 VMA_CACHE_OPERATION op);
6686 void FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern);
6689 VkDeviceSize m_PreferredLargeHeapBlockSize;
6691 VkPhysicalDevice m_PhysicalDevice;
6692 VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6694 VMA_RW_MUTEX m_PoolsMutex;
6696 VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6697 uint32_t m_NextPoolId;
6701 #if VMA_RECORDING_ENABLED 6702 VmaRecorder* m_pRecorder;
6707 VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6709 VkResult AllocateMemoryOfType(
6711 VkDeviceSize alignment,
6712 bool dedicatedAllocation,
6713 VkBuffer dedicatedBuffer,
6714 VkImage dedicatedImage,
6716 uint32_t memTypeIndex,
6717 VmaSuballocationType suballocType,
6718 size_t allocationCount,
6722 VkResult AllocateDedicatedMemoryPage(
6724 VmaSuballocationType suballocType,
6725 uint32_t memTypeIndex,
6726 const VkMemoryAllocateInfo& allocInfo,
6728 bool isUserDataString,
6733 VkResult AllocateDedicatedMemory(
6735 VmaSuballocationType suballocType,
6736 uint32_t memTypeIndex,
6738 bool isUserDataString,
6740 VkBuffer dedicatedBuffer,
6741 VkImage dedicatedImage,
6742 size_t allocationCount,
6752 static void* VmaMalloc(
VmaAllocator hAllocator,
size_t size,
size_t alignment)
6754 return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6757 static void VmaFree(
VmaAllocator hAllocator,
void* ptr)
6759 VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6762 template<
typename T>
6765 return (T*)VmaMalloc(hAllocator,
sizeof(T), VMA_ALIGN_OF(T));
6768 template<
typename T>
6769 static T* VmaAllocateArray(
VmaAllocator hAllocator,
size_t count)
6771 return (T*)VmaMalloc(hAllocator,
sizeof(T) * count, VMA_ALIGN_OF(T));
6774 template<
typename T>
6775 static void vma_delete(
VmaAllocator hAllocator, T* ptr)
6780 VmaFree(hAllocator, ptr);
6784 template<
typename T>
6785 static void vma_delete_array(
VmaAllocator hAllocator, T* ptr,
size_t count)
6789 for(
size_t i = count; i--; )
6791 VmaFree(hAllocator, ptr);
6798 #if VMA_STATS_STRING_ENABLED 6800 class VmaStringBuilder
6803 VmaStringBuilder(
VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6804 size_t GetLength()
const {
return m_Data.size(); }
6805 const char* GetData()
const {
return m_Data.data(); }
6807 void Add(
char ch) { m_Data.push_back(ch); }
6808 void Add(
const char* pStr);
6809 void AddNewLine() { Add(
'\n'); }
6810 void AddNumber(uint32_t num);
6811 void AddNumber(uint64_t num);
6812 void AddPointer(
const void* ptr);
6815 VmaVector< char, VmaStlAllocator<char> > m_Data;
6818 void VmaStringBuilder::Add(
const char* pStr)
6820 const size_t strLen = strlen(pStr);
6823 const size_t oldCount = m_Data.size();
6824 m_Data.resize(oldCount + strLen);
6825 memcpy(m_Data.data() + oldCount, pStr, strLen);
6829 void VmaStringBuilder::AddNumber(uint32_t num)
6832 VmaUint32ToStr(buf,
sizeof(buf), num);
6836 void VmaStringBuilder::AddNumber(uint64_t num)
6839 VmaUint64ToStr(buf,
sizeof(buf), num);
6843 void VmaStringBuilder::AddPointer(
const void* ptr)
6846 VmaPtrToStr(buf,
sizeof(buf), ptr);
6850 #endif // #if VMA_STATS_STRING_ENABLED 6855 #if VMA_STATS_STRING_ENABLED 6859 VMA_CLASS_NO_COPY(VmaJsonWriter)
6861 VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6864 void BeginObject(
bool singleLine =
false);
6867 void BeginArray(
bool singleLine =
false);
6870 void WriteString(
const char* pStr);
6871 void BeginString(
const char* pStr = VMA_NULL);
6872 void ContinueString(
const char* pStr);
6873 void ContinueString(uint32_t n);
6874 void ContinueString(uint64_t n);
6875 void ContinueString_Pointer(
const void* ptr);
6876 void EndString(
const char* pStr = VMA_NULL);
6878 void WriteNumber(uint32_t n);
6879 void WriteNumber(uint64_t n);
6880 void WriteBool(
bool b);
6884 static const char*
const INDENT;
6886 enum COLLECTION_TYPE
6888 COLLECTION_TYPE_OBJECT,
6889 COLLECTION_TYPE_ARRAY,
6893 COLLECTION_TYPE type;
6894 uint32_t valueCount;
6895 bool singleLineMode;
6898 VmaStringBuilder& m_SB;
6899 VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6900 bool m_InsideString;
6902 void BeginValue(
bool isString);
6903 void WriteIndent(
bool oneLess =
false);
6906 const char*
const VmaJsonWriter::INDENT =
" ";
6908 VmaJsonWriter::VmaJsonWriter(
const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6910 m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6911 m_InsideString(false)
6915 VmaJsonWriter::~VmaJsonWriter()
6917 VMA_ASSERT(!m_InsideString);
6918 VMA_ASSERT(m_Stack.empty());
6921 void VmaJsonWriter::BeginObject(
bool singleLine)
6923 VMA_ASSERT(!m_InsideString);
6929 item.type = COLLECTION_TYPE_OBJECT;
6930 item.valueCount = 0;
6931 item.singleLineMode = singleLine;
6932 m_Stack.push_back(item);
6935 void VmaJsonWriter::EndObject()
6937 VMA_ASSERT(!m_InsideString);
6942 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6946 void VmaJsonWriter::BeginArray(
bool singleLine)
6948 VMA_ASSERT(!m_InsideString);
6954 item.type = COLLECTION_TYPE_ARRAY;
6955 item.valueCount = 0;
6956 item.singleLineMode = singleLine;
6957 m_Stack.push_back(item);
6960 void VmaJsonWriter::EndArray()
6962 VMA_ASSERT(!m_InsideString);
6967 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6971 void VmaJsonWriter::WriteString(
const char* pStr)
6977 void VmaJsonWriter::BeginString(
const char* pStr)
6979 VMA_ASSERT(!m_InsideString);
6983 m_InsideString =
true;
6984 if(pStr != VMA_NULL && pStr[0] !=
'\0')
6986 ContinueString(pStr);
6990 void VmaJsonWriter::ContinueString(
const char* pStr)
6992 VMA_ASSERT(m_InsideString);
6994 const size_t strLen = strlen(pStr);
6995 for(
size_t i = 0; i < strLen; ++i)
7028 VMA_ASSERT(0 &&
"Character not currently supported.");
7034 void VmaJsonWriter::ContinueString(uint32_t n)
7036 VMA_ASSERT(m_InsideString);
7040 void VmaJsonWriter::ContinueString(uint64_t n)
7042 VMA_ASSERT(m_InsideString);
7046 void VmaJsonWriter::ContinueString_Pointer(
const void* ptr)
7048 VMA_ASSERT(m_InsideString);
7049 m_SB.AddPointer(ptr);
7052 void VmaJsonWriter::EndString(
const char* pStr)
7054 VMA_ASSERT(m_InsideString);
7055 if(pStr != VMA_NULL && pStr[0] !=
'\0')
7057 ContinueString(pStr);
7060 m_InsideString =
false;
7063 void VmaJsonWriter::WriteNumber(uint32_t n)
7065 VMA_ASSERT(!m_InsideString);
7070 void VmaJsonWriter::WriteNumber(uint64_t n)
7072 VMA_ASSERT(!m_InsideString);
7077 void VmaJsonWriter::WriteBool(
bool b)
7079 VMA_ASSERT(!m_InsideString);
7081 m_SB.Add(b ?
"true" :
"false");
7084 void VmaJsonWriter::WriteNull()
7086 VMA_ASSERT(!m_InsideString);
7091 void VmaJsonWriter::BeginValue(
bool isString)
7093 if(!m_Stack.empty())
7095 StackItem& currItem = m_Stack.back();
7096 if(currItem.type == COLLECTION_TYPE_OBJECT &&
7097 currItem.valueCount % 2 == 0)
7099 VMA_ASSERT(isString);
7102 if(currItem.type == COLLECTION_TYPE_OBJECT &&
7103 currItem.valueCount % 2 != 0)
7107 else if(currItem.valueCount > 0)
7116 ++currItem.valueCount;
7120 void VmaJsonWriter::WriteIndent(
bool oneLess)
7122 if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7126 size_t count = m_Stack.size();
7127 if(count > 0 && oneLess)
7131 for(
size_t i = 0; i < count; ++i)
7138 #endif // #if VMA_STATS_STRING_ENABLED 7142 void VmaAllocation_T::SetUserData(
VmaAllocator hAllocator,
void* pUserData)
7144 if(IsUserDataString())
7146 VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7148 FreeUserDataString(hAllocator);
7150 if(pUserData != VMA_NULL)
7152 const char*
const newStrSrc = (
char*)pUserData;
7153 const size_t newStrLen = strlen(newStrSrc);
7154 char*
const newStrDst = vma_new_array(hAllocator,
char, newStrLen + 1);
7155 memcpy(newStrDst, newStrSrc, newStrLen + 1);
7156 m_pUserData = newStrDst;
7161 m_pUserData = pUserData;
7165 void VmaAllocation_T::ChangeBlockAllocation(
7167 VmaDeviceMemoryBlock* block,
7168 VkDeviceSize offset)
7170 VMA_ASSERT(block != VMA_NULL);
7171 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7174 if(block != m_BlockAllocation.m_Block)
7176 uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7177 if(IsPersistentMap())
7179 m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7180 block->Map(hAllocator, mapRefCount, VMA_NULL);
7183 m_BlockAllocation.m_Block = block;
7184 m_BlockAllocation.m_Offset = offset;
7187 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7189 VMA_ASSERT(newSize > 0);
7193 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7195 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7196 m_BlockAllocation.m_Offset = newOffset;
7199 VkDeviceSize VmaAllocation_T::GetOffset()
const 7203 case ALLOCATION_TYPE_BLOCK:
7204 return m_BlockAllocation.m_Offset;
7205 case ALLOCATION_TYPE_DEDICATED:
7213 VkDeviceMemory VmaAllocation_T::GetMemory()
const 7217 case ALLOCATION_TYPE_BLOCK:
7218 return m_BlockAllocation.m_Block->GetDeviceMemory();
7219 case ALLOCATION_TYPE_DEDICATED:
7220 return m_DedicatedAllocation.m_hMemory;
7223 return VK_NULL_HANDLE;
7227 uint32_t VmaAllocation_T::GetMemoryTypeIndex()
const 7231 case ALLOCATION_TYPE_BLOCK:
7232 return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7233 case ALLOCATION_TYPE_DEDICATED:
7234 return m_DedicatedAllocation.m_MemoryTypeIndex;
7241 void* VmaAllocation_T::GetMappedData()
const 7245 case ALLOCATION_TYPE_BLOCK:
7248 void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7249 VMA_ASSERT(pBlockData != VMA_NULL);
7250 return (
char*)pBlockData + m_BlockAllocation.m_Offset;
7257 case ALLOCATION_TYPE_DEDICATED:
7258 VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7259 return m_DedicatedAllocation.m_pMappedData;
7266 bool VmaAllocation_T::CanBecomeLost()
const 7270 case ALLOCATION_TYPE_BLOCK:
7271 return m_BlockAllocation.m_CanBecomeLost;
7272 case ALLOCATION_TYPE_DEDICATED:
7280 VmaPool VmaAllocation_T::GetPool()
const 7282 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7283 return m_BlockAllocation.m_hPool;
7286 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7288 VMA_ASSERT(CanBecomeLost());
7294 uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7297 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7302 else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7308 if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7318 #if VMA_STATS_STRING_ENABLED 7321 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7330 void VmaAllocation_T::PrintParameters(
class VmaJsonWriter& json)
const 7332 json.WriteString(
"Type");
7333 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7335 json.WriteString(
"Size");
7336 json.WriteNumber(m_Size);
7338 if(m_pUserData != VMA_NULL)
7340 json.WriteString(
"UserData");
7341 if(IsUserDataString())
7343 json.WriteString((
const char*)m_pUserData);
7348 json.ContinueString_Pointer(m_pUserData);
7353 json.WriteString(
"CreationFrameIndex");
7354 json.WriteNumber(m_CreationFrameIndex);
7356 json.WriteString(
"LastUseFrameIndex");
7357 json.WriteNumber(GetLastUseFrameIndex());
7359 if(m_BufferImageUsage != 0)
7361 json.WriteString(
"Usage");
7362 json.WriteNumber(m_BufferImageUsage);
7368 void VmaAllocation_T::FreeUserDataString(
VmaAllocator hAllocator)
7370 VMA_ASSERT(IsUserDataString());
7371 if(m_pUserData != VMA_NULL)
7373 char*
const oldStr = (
char*)m_pUserData;
7374 const size_t oldStrLen = strlen(oldStr);
7375 vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7376 m_pUserData = VMA_NULL;
7380 void VmaAllocation_T::BlockAllocMap()
7382 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7384 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7390 VMA_ASSERT(0 &&
"Allocation mapped too many times simultaneously.");
7394 void VmaAllocation_T::BlockAllocUnmap()
7396 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7398 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7404 VMA_ASSERT(0 &&
"Unmapping allocation not previously mapped.");
7408 VkResult VmaAllocation_T::DedicatedAllocMap(
VmaAllocator hAllocator,
void** ppData)
7410 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7414 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7416 VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7417 *ppData = m_DedicatedAllocation.m_pMappedData;
7423 VMA_ASSERT(0 &&
"Dedicated allocation mapped too many times simultaneously.");
7424 return VK_ERROR_MEMORY_MAP_FAILED;
7429 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7430 hAllocator->m_hDevice,
7431 m_DedicatedAllocation.m_hMemory,
7436 if(result == VK_SUCCESS)
7438 m_DedicatedAllocation.m_pMappedData = *ppData;
7445 void VmaAllocation_T::DedicatedAllocUnmap(
VmaAllocator hAllocator)
7447 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7449 if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7454 m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7455 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7456 hAllocator->m_hDevice,
7457 m_DedicatedAllocation.m_hMemory);
7462 VMA_ASSERT(0 &&
"Unmapping dedicated allocation not previously mapped.");
7466 #if VMA_STATS_STRING_ENABLED 7468 static void VmaPrintStatInfo(VmaJsonWriter& json,
const VmaStatInfo& stat)
7472 json.WriteString(
"Blocks");
7475 json.WriteString(
"Allocations");
7478 json.WriteString(
"UnusedRanges");
7481 json.WriteString(
"UsedBytes");
7484 json.WriteString(
"UnusedBytes");
7489 json.WriteString(
"AllocationSize");
7490 json.BeginObject(
true);
7491 json.WriteString(
"Min");
7493 json.WriteString(
"Avg");
7495 json.WriteString(
"Max");
7502 json.WriteString(
"UnusedRangeSize");
7503 json.BeginObject(
true);
7504 json.WriteString(
"Min");
7506 json.WriteString(
"Avg");
7508 json.WriteString(
"Max");
7516 #endif // #if VMA_STATS_STRING_ENABLED 7518 struct VmaSuballocationItemSizeLess
7521 const VmaSuballocationList::iterator lhs,
7522 const VmaSuballocationList::iterator rhs)
const 7524 return lhs->size < rhs->size;
7527 const VmaSuballocationList::iterator lhs,
7528 VkDeviceSize rhsSize)
const 7530 return lhs->size < rhsSize;
7538 VmaBlockMetadata::VmaBlockMetadata(
VmaAllocator hAllocator) :
7540 m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7544 #if VMA_STATS_STRING_ENABLED 7546 void VmaBlockMetadata::PrintDetailedMap_Begin(
class VmaJsonWriter& json,
7547 VkDeviceSize unusedBytes,
7548 size_t allocationCount,
7549 size_t unusedRangeCount)
const 7553 json.WriteString(
"TotalBytes");
7554 json.WriteNumber(GetSize());
7556 json.WriteString(
"UnusedBytes");
7557 json.WriteNumber(unusedBytes);
7559 json.WriteString(
"Allocations");
7560 json.WriteNumber((uint64_t)allocationCount);
7562 json.WriteString(
"UnusedRanges");
7563 json.WriteNumber((uint64_t)unusedRangeCount);
7565 json.WriteString(
"Suballocations");
7569 void VmaBlockMetadata::PrintDetailedMap_Allocation(
class VmaJsonWriter& json,
7570 VkDeviceSize offset,
7573 json.BeginObject(
true);
7575 json.WriteString(
"Offset");
7576 json.WriteNumber(offset);
7578 hAllocation->PrintParameters(json);
7583 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(
class VmaJsonWriter& json,
7584 VkDeviceSize offset,
7585 VkDeviceSize size)
const 7587 json.BeginObject(
true);
7589 json.WriteString(
"Offset");
7590 json.WriteNumber(offset);
7592 json.WriteString(
"Type");
7593 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7595 json.WriteString(
"Size");
7596 json.WriteNumber(size);
7601 void VmaBlockMetadata::PrintDetailedMap_End(
class VmaJsonWriter& json)
const 7607 #endif // #if VMA_STATS_STRING_ENABLED 7612 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(
VmaAllocator hAllocator) :
7613 VmaBlockMetadata(hAllocator),
7616 m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7617 m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7621 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7625 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7627 VmaBlockMetadata::Init(size);
7630 m_SumFreeSize = size;
7632 VmaSuballocation suballoc = {};
7633 suballoc.offset = 0;
7634 suballoc.size = size;
7635 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7636 suballoc.hAllocation = VK_NULL_HANDLE;
7638 VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7639 m_Suballocations.push_back(suballoc);
7640 VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7642 m_FreeSuballocationsBySize.push_back(suballocItem);
7645 bool VmaBlockMetadata_Generic::Validate()
const 7647 VMA_VALIDATE(!m_Suballocations.empty());
7650 VkDeviceSize calculatedOffset = 0;
7652 uint32_t calculatedFreeCount = 0;
7654 VkDeviceSize calculatedSumFreeSize = 0;
7657 size_t freeSuballocationsToRegister = 0;
7659 bool prevFree =
false;
7661 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7662 suballocItem != m_Suballocations.cend();
7665 const VmaSuballocation& subAlloc = *suballocItem;
7668 VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7670 const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7672 VMA_VALIDATE(!prevFree || !currFree);
7674 VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7678 calculatedSumFreeSize += subAlloc.size;
7679 ++calculatedFreeCount;
7680 if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7682 ++freeSuballocationsToRegister;
7686 VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7690 VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7691 VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7694 VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7697 calculatedOffset += subAlloc.size;
7698 prevFree = currFree;
7703 VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7705 VkDeviceSize lastSize = 0;
7706 for(
size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7708 VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7711 VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7713 VMA_VALIDATE(suballocItem->size >= lastSize);
7715 lastSize = suballocItem->size;
7719 VMA_VALIDATE(ValidateFreeSuballocationList());
7720 VMA_VALIDATE(calculatedOffset == GetSize());
7721 VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7722 VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7727 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax()
const 7729 if(!m_FreeSuballocationsBySize.empty())
7731 return m_FreeSuballocationsBySize.back()->size;
7739 bool VmaBlockMetadata_Generic::IsEmpty()
const 7741 return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7744 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const 7748 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7760 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7761 suballocItem != m_Suballocations.cend();
7764 const VmaSuballocation& suballoc = *suballocItem;
7765 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7778 void VmaBlockMetadata_Generic::AddPoolStats(
VmaPoolStats& inoutStats)
const 7780 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7782 inoutStats.
size += GetSize();
7789 #if VMA_STATS_STRING_ENABLED 7791 void VmaBlockMetadata_Generic::PrintDetailedMap(
class VmaJsonWriter& json)
const 7793 PrintDetailedMap_Begin(json,
7795 m_Suballocations.size() - (size_t)m_FreeCount,
7799 for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7800 suballocItem != m_Suballocations.cend();
7801 ++suballocItem, ++i)
7803 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7805 PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7809 PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7813 PrintDetailedMap_End(json);
7816 #endif // #if VMA_STATS_STRING_ENABLED 7818 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7819 uint32_t currentFrameIndex,
7820 uint32_t frameInUseCount,
7821 VkDeviceSize bufferImageGranularity,
7822 VkDeviceSize allocSize,
7823 VkDeviceSize allocAlignment,
7825 VmaSuballocationType allocType,
7826 bool canMakeOtherLost,
7828 VmaAllocationRequest* pAllocationRequest)
7830 VMA_ASSERT(allocSize > 0);
7831 VMA_ASSERT(!upperAddress);
7832 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7833 VMA_ASSERT(pAllocationRequest != VMA_NULL);
7834 VMA_HEAVY_ASSERT(Validate());
7837 if(canMakeOtherLost ==
false &&
7838 m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7844 const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7845 if(freeSuballocCount > 0)
7850 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
7851 m_FreeSuballocationsBySize.data(),
7852 m_FreeSuballocationsBySize.data() + freeSuballocCount,
7853 allocSize + 2 * VMA_DEBUG_MARGIN,
7854 VmaSuballocationItemSizeLess());
7855 size_t index = it - m_FreeSuballocationsBySize.data();
7856 for(; index < freeSuballocCount; ++index)
7861 bufferImageGranularity,
7865 m_FreeSuballocationsBySize[index],
7867 &pAllocationRequest->offset,
7868 &pAllocationRequest->itemsToMakeLostCount,
7869 &pAllocationRequest->sumFreeSize,
7870 &pAllocationRequest->sumItemSize))
7872 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7877 else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7879 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7880 it != m_Suballocations.end();
7883 if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7886 bufferImageGranularity,
7892 &pAllocationRequest->offset,
7893 &pAllocationRequest->itemsToMakeLostCount,
7894 &pAllocationRequest->sumFreeSize,
7895 &pAllocationRequest->sumItemSize))
7897 pAllocationRequest->item = it;
7905 for(
size_t index = freeSuballocCount; index--; )
7910 bufferImageGranularity,
7914 m_FreeSuballocationsBySize[index],
7916 &pAllocationRequest->offset,
7917 &pAllocationRequest->itemsToMakeLostCount,
7918 &pAllocationRequest->sumFreeSize,
7919 &pAllocationRequest->sumItemSize))
7921 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7928 if(canMakeOtherLost)
7932 pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7933 pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7935 VmaAllocationRequest tmpAllocRequest = {};
7936 for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7937 suballocIt != m_Suballocations.end();
7940 if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7941 suballocIt->hAllocation->CanBecomeLost())
7946 bufferImageGranularity,
7952 &tmpAllocRequest.offset,
7953 &tmpAllocRequest.itemsToMakeLostCount,
7954 &tmpAllocRequest.sumFreeSize,
7955 &tmpAllocRequest.sumItemSize))
7957 tmpAllocRequest.item = suballocIt;
7959 if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7962 *pAllocationRequest = tmpAllocRequest;
7968 if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7977 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7978 uint32_t currentFrameIndex,
7979 uint32_t frameInUseCount,
7980 VmaAllocationRequest* pAllocationRequest)
7982 while(pAllocationRequest->itemsToMakeLostCount > 0)
7984 if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7986 ++pAllocationRequest->item;
7988 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7989 VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7990 VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7991 if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7993 pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7994 --pAllocationRequest->itemsToMakeLostCount;
8002 VMA_HEAVY_ASSERT(Validate());
8003 VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8004 VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8009 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8011 uint32_t lostAllocationCount = 0;
8012 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8013 it != m_Suballocations.end();
8016 if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8017 it->hAllocation->CanBecomeLost() &&
8018 it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8020 it = FreeSuballocation(it);
8021 ++lostAllocationCount;
8024 return lostAllocationCount;
8027 VkResult VmaBlockMetadata_Generic::CheckCorruption(
const void* pBlockData)
8029 for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8030 it != m_Suballocations.end();
8033 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8035 if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8037 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8038 return VK_ERROR_VALIDATION_FAILED_EXT;
8040 if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8042 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8043 return VK_ERROR_VALIDATION_FAILED_EXT;
8051 void VmaBlockMetadata_Generic::Alloc(
8052 const VmaAllocationRequest& request,
8053 VmaSuballocationType type,
8054 VkDeviceSize allocSize,
8058 VMA_ASSERT(!upperAddress);
8059 VMA_ASSERT(request.item != m_Suballocations.end());
8060 VmaSuballocation& suballoc = *request.item;
8062 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8064 VMA_ASSERT(request.offset >= suballoc.offset);
8065 const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8066 VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8067 const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8071 UnregisterFreeSuballocation(request.item);
8073 suballoc.offset = request.offset;
8074 suballoc.size = allocSize;
8075 suballoc.type = type;
8076 suballoc.hAllocation = hAllocation;
8081 VmaSuballocation paddingSuballoc = {};
8082 paddingSuballoc.offset = request.offset + allocSize;
8083 paddingSuballoc.size = paddingEnd;
8084 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8085 VmaSuballocationList::iterator next = request.item;
8087 const VmaSuballocationList::iterator paddingEndItem =
8088 m_Suballocations.insert(next, paddingSuballoc);
8089 RegisterFreeSuballocation(paddingEndItem);
8095 VmaSuballocation paddingSuballoc = {};
8096 paddingSuballoc.offset = request.offset - paddingBegin;
8097 paddingSuballoc.size = paddingBegin;
8098 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8099 const VmaSuballocationList::iterator paddingBeginItem =
8100 m_Suballocations.insert(request.item, paddingSuballoc);
8101 RegisterFreeSuballocation(paddingBeginItem);
8105 m_FreeCount = m_FreeCount - 1;
8106 if(paddingBegin > 0)
8114 m_SumFreeSize -= allocSize;
8117 void VmaBlockMetadata_Generic::Free(
const VmaAllocation allocation)
8119 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8120 suballocItem != m_Suballocations.end();
8123 VmaSuballocation& suballoc = *suballocItem;
8124 if(suballoc.hAllocation == allocation)
8126 FreeSuballocation(suballocItem);
8127 VMA_HEAVY_ASSERT(Validate());
8131 VMA_ASSERT(0 &&
"Not found!");
8134 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8136 for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8137 suballocItem != m_Suballocations.end();
8140 VmaSuballocation& suballoc = *suballocItem;
8141 if(suballoc.offset == offset)
8143 FreeSuballocation(suballocItem);
8147 VMA_ASSERT(0 &&
"Not found!");
8150 bool VmaBlockMetadata_Generic::ResizeAllocation(
const VmaAllocation alloc, VkDeviceSize newSize)
8152 typedef VmaSuballocationList::iterator iter_type;
8153 for(iter_type suballocItem = m_Suballocations.begin();
8154 suballocItem != m_Suballocations.end();
8157 VmaSuballocation& suballoc = *suballocItem;
8158 if(suballoc.hAllocation == alloc)
8160 iter_type nextItem = suballocItem;
8164 VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8167 if(newSize < alloc->GetSize())
8169 const VkDeviceSize sizeDiff = suballoc.size - newSize;
8172 if(nextItem != m_Suballocations.end())
8175 if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8178 UnregisterFreeSuballocation(nextItem);
8179 nextItem->offset -= sizeDiff;
8180 nextItem->size += sizeDiff;
8181 RegisterFreeSuballocation(nextItem);
8187 VmaSuballocation newFreeSuballoc;
8188 newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8189 newFreeSuballoc.offset = suballoc.offset + newSize;
8190 newFreeSuballoc.size = sizeDiff;
8191 newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8192 iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8193 RegisterFreeSuballocation(newFreeSuballocIt);
8202 VmaSuballocation newFreeSuballoc;
8203 newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8204 newFreeSuballoc.offset = suballoc.offset + newSize;
8205 newFreeSuballoc.size = sizeDiff;
8206 newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8207 m_Suballocations.push_back(newFreeSuballoc);
8209 iter_type newFreeSuballocIt = m_Suballocations.end();
8210 RegisterFreeSuballocation(--newFreeSuballocIt);
8215 suballoc.size = newSize;
8216 m_SumFreeSize += sizeDiff;
8221 const VkDeviceSize sizeDiff = newSize - suballoc.size;
8224 if(nextItem != m_Suballocations.end())
8227 if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8230 if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8236 if(nextItem->size > sizeDiff)
8239 UnregisterFreeSuballocation(nextItem);
8240 nextItem->offset += sizeDiff;
8241 nextItem->size -= sizeDiff;
8242 RegisterFreeSuballocation(nextItem);
8248 UnregisterFreeSuballocation(nextItem);
8249 m_Suballocations.erase(nextItem);
8265 suballoc.size = newSize;
8266 m_SumFreeSize -= sizeDiff;
8273 VMA_ASSERT(0 &&
"Not found!");
8277 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList()
const 8279 VkDeviceSize lastSize = 0;
8280 for(
size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8282 const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8284 VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8285 VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8286 VMA_VALIDATE(it->size >= lastSize);
8287 lastSize = it->size;
8292 bool VmaBlockMetadata_Generic::CheckAllocation(
8293 uint32_t currentFrameIndex,
8294 uint32_t frameInUseCount,
8295 VkDeviceSize bufferImageGranularity,
8296 VkDeviceSize allocSize,
8297 VkDeviceSize allocAlignment,
8298 VmaSuballocationType allocType,
8299 VmaSuballocationList::const_iterator suballocItem,
8300 bool canMakeOtherLost,
8301 VkDeviceSize* pOffset,
8302 size_t* itemsToMakeLostCount,
8303 VkDeviceSize* pSumFreeSize,
8304 VkDeviceSize* pSumItemSize)
const 8306 VMA_ASSERT(allocSize > 0);
8307 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8308 VMA_ASSERT(suballocItem != m_Suballocations.cend());
8309 VMA_ASSERT(pOffset != VMA_NULL);
8311 *itemsToMakeLostCount = 0;
8315 if(canMakeOtherLost)
8317 if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8319 *pSumFreeSize = suballocItem->size;
8323 if(suballocItem->hAllocation->CanBecomeLost() &&
8324 suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8326 ++*itemsToMakeLostCount;
8327 *pSumItemSize = suballocItem->size;
8336 if(GetSize() - suballocItem->offset < allocSize)
8342 *pOffset = suballocItem->offset;
8345 if(VMA_DEBUG_MARGIN > 0)
8347 *pOffset += VMA_DEBUG_MARGIN;
8351 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8355 if(bufferImageGranularity > 1)
8357 bool bufferImageGranularityConflict =
false;
8358 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8359 while(prevSuballocItem != m_Suballocations.cbegin())
8362 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8363 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8365 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8367 bufferImageGranularityConflict =
true;
8375 if(bufferImageGranularityConflict)
8377 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8383 if(*pOffset >= suballocItem->offset + suballocItem->size)
8389 const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8392 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8394 const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8396 if(suballocItem->offset + totalSize > GetSize())
8403 VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8404 if(totalSize > suballocItem->size)
8406 VkDeviceSize remainingSize = totalSize - suballocItem->size;
8407 while(remainingSize > 0)
8410 if(lastSuballocItem == m_Suballocations.cend())
8414 if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8416 *pSumFreeSize += lastSuballocItem->size;
8420 VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8421 if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8422 lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8424 ++*itemsToMakeLostCount;
8425 *pSumItemSize += lastSuballocItem->size;
8432 remainingSize = (lastSuballocItem->size < remainingSize) ?
8433 remainingSize - lastSuballocItem->size : 0;
8439 if(bufferImageGranularity > 1)
8441 VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8443 while(nextSuballocItem != m_Suballocations.cend())
8445 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8446 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8448 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8450 VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8451 if(nextSuballoc.hAllocation->CanBecomeLost() &&
8452 nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8454 ++*itemsToMakeLostCount;
8473 const VmaSuballocation& suballoc = *suballocItem;
8474 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8476 *pSumFreeSize = suballoc.size;
8479 if(suballoc.size < allocSize)
8485 *pOffset = suballoc.offset;
8488 if(VMA_DEBUG_MARGIN > 0)
8490 *pOffset += VMA_DEBUG_MARGIN;
8494 *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8498 if(bufferImageGranularity > 1)
8500 bool bufferImageGranularityConflict =
false;
8501 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8502 while(prevSuballocItem != m_Suballocations.cbegin())
8505 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8506 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8508 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8510 bufferImageGranularityConflict =
true;
8518 if(bufferImageGranularityConflict)
8520 *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8525 const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8528 const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8531 if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8538 if(bufferImageGranularity > 1)
8540 VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8542 while(nextSuballocItem != m_Suballocations.cend())
8544 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8545 if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8547 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8566 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8568 VMA_ASSERT(item != m_Suballocations.end());
8569 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8571 VmaSuballocationList::iterator nextItem = item;
8573 VMA_ASSERT(nextItem != m_Suballocations.end());
8574 VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8576 item->size += nextItem->size;
8578 m_Suballocations.erase(nextItem);
8581 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8584 VmaSuballocation& suballoc = *suballocItem;
8585 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8586 suballoc.hAllocation = VK_NULL_HANDLE;
8590 m_SumFreeSize += suballoc.size;
8593 bool mergeWithNext =
false;
8594 bool mergeWithPrev =
false;
8596 VmaSuballocationList::iterator nextItem = suballocItem;
8598 if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8600 mergeWithNext =
true;
8603 VmaSuballocationList::iterator prevItem = suballocItem;
8604 if(suballocItem != m_Suballocations.begin())
8607 if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8609 mergeWithPrev =
true;
8615 UnregisterFreeSuballocation(nextItem);
8616 MergeFreeWithNext(suballocItem);
8621 UnregisterFreeSuballocation(prevItem);
8622 MergeFreeWithNext(prevItem);
8623 RegisterFreeSuballocation(prevItem);
8628 RegisterFreeSuballocation(suballocItem);
8629 return suballocItem;
8633 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8635 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8636 VMA_ASSERT(item->size > 0);
8640 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8642 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8644 if(m_FreeSuballocationsBySize.empty())
8646 m_FreeSuballocationsBySize.push_back(item);
8650 VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8658 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8660 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8661 VMA_ASSERT(item->size > 0);
8665 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8667 if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8669 VmaSuballocationList::iterator*
const it = VmaBinaryFindFirstNotLess(
8670 m_FreeSuballocationsBySize.data(),
8671 m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8673 VmaSuballocationItemSizeLess());
8674 for(
size_t index = it - m_FreeSuballocationsBySize.data();
8675 index < m_FreeSuballocationsBySize.size();
8678 if(m_FreeSuballocationsBySize[index] == item)
8680 VmaVectorRemove(m_FreeSuballocationsBySize, index);
8683 VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) &&
"Not found.");
8685 VMA_ASSERT(0 &&
"Not found.");
8691 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8692 VkDeviceSize bufferImageGranularity,
8693 VmaSuballocationType& inOutPrevSuballocType)
const 8695 if(bufferImageGranularity == 1 || IsEmpty())
8700 VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8701 bool typeConflictFound =
false;
8702 for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8703 it != m_Suballocations.cend();
8706 const VmaSuballocationType suballocType = it->type;
8707 if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8709 minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8710 if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8712 typeConflictFound =
true;
8714 inOutPrevSuballocType = suballocType;
8718 return typeConflictFound || minAlignment >= bufferImageGranularity;
8724 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(
VmaAllocator hAllocator) :
8725 VmaBlockMetadata(hAllocator),
8727 m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8728 m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8729 m_1stVectorIndex(0),
8730 m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8731 m_1stNullItemsBeginCount(0),
8732 m_1stNullItemsMiddleCount(0),
8733 m_2ndNullItemsCount(0)
8737 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8741 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8743 VmaBlockMetadata::Init(size);
8744 m_SumFreeSize = size;
8747 bool VmaBlockMetadata_Linear::Validate()
const 8749 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8750 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8752 VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8753 VMA_VALIDATE(!suballocations1st.empty() ||
8754 suballocations2nd.empty() ||
8755 m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8757 if(!suballocations1st.empty())
8760 VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8762 VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8764 if(!suballocations2nd.empty())
8767 VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8770 VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8771 VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8773 VkDeviceSize sumUsedSize = 0;
8774 const size_t suballoc1stCount = suballocations1st.size();
8775 VkDeviceSize offset = VMA_DEBUG_MARGIN;
8777 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8779 const size_t suballoc2ndCount = suballocations2nd.size();
8780 size_t nullItem2ndCount = 0;
8781 for(
size_t i = 0; i < suballoc2ndCount; ++i)
8783 const VmaSuballocation& suballoc = suballocations2nd[i];
8784 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8786 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8787 VMA_VALIDATE(suballoc.offset >= offset);
8791 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8792 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8793 sumUsedSize += suballoc.size;
8800 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8803 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8806 for(
size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8808 const VmaSuballocation& suballoc = suballocations1st[i];
8809 VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8810 suballoc.hAllocation == VK_NULL_HANDLE);
8813 size_t nullItem1stCount = m_1stNullItemsBeginCount;
8815 for(
size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8817 const VmaSuballocation& suballoc = suballocations1st[i];
8818 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8820 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8821 VMA_VALIDATE(suballoc.offset >= offset);
8822 VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8826 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8827 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8828 sumUsedSize += suballoc.size;
8835 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8837 VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8839 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8841 const size_t suballoc2ndCount = suballocations2nd.size();
8842 size_t nullItem2ndCount = 0;
8843 for(
size_t i = suballoc2ndCount; i--; )
8845 const VmaSuballocation& suballoc = suballocations2nd[i];
8846 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8848 VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8849 VMA_VALIDATE(suballoc.offset >= offset);
8853 VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8854 VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8855 sumUsedSize += suballoc.size;
8862 offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8865 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8868 VMA_VALIDATE(offset <= GetSize());
8869 VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8874 size_t VmaBlockMetadata_Linear::GetAllocationCount()
const 8876 return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8877 AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8880 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax()
const 8882 const VkDeviceSize size = GetSize();
8894 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8896 switch(m_2ndVectorMode)
8898 case SECOND_VECTOR_EMPTY:
8904 const size_t suballocations1stCount = suballocations1st.size();
8905 VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8906 const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8907 const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8909 firstSuballoc.offset,
8910 size - (lastSuballoc.offset + lastSuballoc.size));
8914 case SECOND_VECTOR_RING_BUFFER:
8919 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8920 const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8921 const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8922 return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8926 case SECOND_VECTOR_DOUBLE_STACK:
8931 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8932 const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8933 const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8934 return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8944 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const 8946 const VkDeviceSize size = GetSize();
8947 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8948 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8949 const size_t suballoc1stCount = suballocations1st.size();
8950 const size_t suballoc2ndCount = suballocations2nd.size();
8961 VkDeviceSize lastOffset = 0;
8963 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8965 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8966 size_t nextAlloc2ndIndex = 0;
8967 while(lastOffset < freeSpace2ndTo1stEnd)
8970 while(nextAlloc2ndIndex < suballoc2ndCount &&
8971 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8973 ++nextAlloc2ndIndex;
8977 if(nextAlloc2ndIndex < suballoc2ndCount)
8979 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8982 if(lastOffset < suballoc.offset)
8985 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8999 lastOffset = suballoc.offset + suballoc.size;
9000 ++nextAlloc2ndIndex;
9006 if(lastOffset < freeSpace2ndTo1stEnd)
9008 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9016 lastOffset = freeSpace2ndTo1stEnd;
9021 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9022 const VkDeviceSize freeSpace1stTo2ndEnd =
9023 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9024 while(lastOffset < freeSpace1stTo2ndEnd)
9027 while(nextAlloc1stIndex < suballoc1stCount &&
9028 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9030 ++nextAlloc1stIndex;
9034 if(nextAlloc1stIndex < suballoc1stCount)
9036 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9039 if(lastOffset < suballoc.offset)
9042 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9056 lastOffset = suballoc.offset + suballoc.size;
9057 ++nextAlloc1stIndex;
9063 if(lastOffset < freeSpace1stTo2ndEnd)
9065 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9073 lastOffset = freeSpace1stTo2ndEnd;
9077 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9079 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9080 while(lastOffset < size)
9083 while(nextAlloc2ndIndex != SIZE_MAX &&
9084 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9086 --nextAlloc2ndIndex;
9090 if(nextAlloc2ndIndex != SIZE_MAX)
9092 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9095 if(lastOffset < suballoc.offset)
9098 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9112 lastOffset = suballoc.offset + suballoc.size;
9113 --nextAlloc2ndIndex;
9119 if(lastOffset < size)
9121 const VkDeviceSize unusedRangeSize = size - lastOffset;
9137 void VmaBlockMetadata_Linear::AddPoolStats(
VmaPoolStats& inoutStats)
const 9139 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9140 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9141 const VkDeviceSize size = GetSize();
9142 const size_t suballoc1stCount = suballocations1st.size();
9143 const size_t suballoc2ndCount = suballocations2nd.size();
9145 inoutStats.
size += size;
9147 VkDeviceSize lastOffset = 0;
9149 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9151 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9152 size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9153 while(lastOffset < freeSpace2ndTo1stEnd)
9156 while(nextAlloc2ndIndex < suballoc2ndCount &&
9157 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9159 ++nextAlloc2ndIndex;
9163 if(nextAlloc2ndIndex < suballoc2ndCount)
9165 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9168 if(lastOffset < suballoc.offset)
9171 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9182 lastOffset = suballoc.offset + suballoc.size;
9183 ++nextAlloc2ndIndex;
9188 if(lastOffset < freeSpace2ndTo1stEnd)
9191 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9198 lastOffset = freeSpace2ndTo1stEnd;
9203 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9204 const VkDeviceSize freeSpace1stTo2ndEnd =
9205 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9206 while(lastOffset < freeSpace1stTo2ndEnd)
9209 while(nextAlloc1stIndex < suballoc1stCount &&
9210 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9212 ++nextAlloc1stIndex;
9216 if(nextAlloc1stIndex < suballoc1stCount)
9218 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9221 if(lastOffset < suballoc.offset)
9224 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9235 lastOffset = suballoc.offset + suballoc.size;
9236 ++nextAlloc1stIndex;
9241 if(lastOffset < freeSpace1stTo2ndEnd)
9244 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9251 lastOffset = freeSpace1stTo2ndEnd;
9255 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9257 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9258 while(lastOffset < size)
9261 while(nextAlloc2ndIndex != SIZE_MAX &&
9262 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9264 --nextAlloc2ndIndex;
9268 if(nextAlloc2ndIndex != SIZE_MAX)
9270 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9273 if(lastOffset < suballoc.offset)
9276 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9287 lastOffset = suballoc.offset + suballoc.size;
9288 --nextAlloc2ndIndex;
9293 if(lastOffset < size)
9296 const VkDeviceSize unusedRangeSize = size - lastOffset;
9309 #if VMA_STATS_STRING_ENABLED 9310 void VmaBlockMetadata_Linear::PrintDetailedMap(
class VmaJsonWriter& json)
const 9312 const VkDeviceSize size = GetSize();
9313 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9314 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9315 const size_t suballoc1stCount = suballocations1st.size();
9316 const size_t suballoc2ndCount = suballocations2nd.size();
9320 size_t unusedRangeCount = 0;
9321 VkDeviceSize usedBytes = 0;
9323 VkDeviceSize lastOffset = 0;
9325 size_t alloc2ndCount = 0;
9326 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9328 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9329 size_t nextAlloc2ndIndex = 0;
9330 while(lastOffset < freeSpace2ndTo1stEnd)
9333 while(nextAlloc2ndIndex < suballoc2ndCount &&
9334 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9336 ++nextAlloc2ndIndex;
9340 if(nextAlloc2ndIndex < suballoc2ndCount)
9342 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9345 if(lastOffset < suballoc.offset)
9354 usedBytes += suballoc.size;
9357 lastOffset = suballoc.offset + suballoc.size;
9358 ++nextAlloc2ndIndex;
9363 if(lastOffset < freeSpace2ndTo1stEnd)
9370 lastOffset = freeSpace2ndTo1stEnd;
9375 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9376 size_t alloc1stCount = 0;
9377 const VkDeviceSize freeSpace1stTo2ndEnd =
9378 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9379 while(lastOffset < freeSpace1stTo2ndEnd)
9382 while(nextAlloc1stIndex < suballoc1stCount &&
9383 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9385 ++nextAlloc1stIndex;
9389 if(nextAlloc1stIndex < suballoc1stCount)
9391 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9394 if(lastOffset < suballoc.offset)
9403 usedBytes += suballoc.size;
9406 lastOffset = suballoc.offset + suballoc.size;
9407 ++nextAlloc1stIndex;
9412 if(lastOffset < size)
9419 lastOffset = freeSpace1stTo2ndEnd;
9423 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9425 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9426 while(lastOffset < size)
9429 while(nextAlloc2ndIndex != SIZE_MAX &&
9430 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9432 --nextAlloc2ndIndex;
9436 if(nextAlloc2ndIndex != SIZE_MAX)
9438 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9441 if(lastOffset < suballoc.offset)
9450 usedBytes += suballoc.size;
9453 lastOffset = suballoc.offset + suballoc.size;
9454 --nextAlloc2ndIndex;
9459 if(lastOffset < size)
9471 const VkDeviceSize unusedBytes = size - usedBytes;
9472 PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9477 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9479 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9480 size_t nextAlloc2ndIndex = 0;
9481 while(lastOffset < freeSpace2ndTo1stEnd)
9484 while(nextAlloc2ndIndex < suballoc2ndCount &&
9485 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9487 ++nextAlloc2ndIndex;
9491 if(nextAlloc2ndIndex < suballoc2ndCount)
9493 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9496 if(lastOffset < suballoc.offset)
9499 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9500 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9505 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9508 lastOffset = suballoc.offset + suballoc.size;
9509 ++nextAlloc2ndIndex;
9514 if(lastOffset < freeSpace2ndTo1stEnd)
9517 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9518 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9522 lastOffset = freeSpace2ndTo1stEnd;
9527 nextAlloc1stIndex = m_1stNullItemsBeginCount;
9528 while(lastOffset < freeSpace1stTo2ndEnd)
9531 while(nextAlloc1stIndex < suballoc1stCount &&
9532 suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9534 ++nextAlloc1stIndex;
9538 if(nextAlloc1stIndex < suballoc1stCount)
9540 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9543 if(lastOffset < suballoc.offset)
9546 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9547 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9552 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9555 lastOffset = suballoc.offset + suballoc.size;
9556 ++nextAlloc1stIndex;
9561 if(lastOffset < freeSpace1stTo2ndEnd)
9564 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9565 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9569 lastOffset = freeSpace1stTo2ndEnd;
9573 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9575 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9576 while(lastOffset < size)
9579 while(nextAlloc2ndIndex != SIZE_MAX &&
9580 suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9582 --nextAlloc2ndIndex;
9586 if(nextAlloc2ndIndex != SIZE_MAX)
9588 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9591 if(lastOffset < suballoc.offset)
9594 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9595 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9600 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9603 lastOffset = suballoc.offset + suballoc.size;
9604 --nextAlloc2ndIndex;
9609 if(lastOffset < size)
9612 const VkDeviceSize unusedRangeSize = size - lastOffset;
9613 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9622 PrintDetailedMap_End(json);
9624 #endif // #if VMA_STATS_STRING_ENABLED 9626 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9627 uint32_t currentFrameIndex,
9628 uint32_t frameInUseCount,
9629 VkDeviceSize bufferImageGranularity,
9630 VkDeviceSize allocSize,
9631 VkDeviceSize allocAlignment,
9633 VmaSuballocationType allocType,
9634 bool canMakeOtherLost,
9636 VmaAllocationRequest* pAllocationRequest)
9638 VMA_ASSERT(allocSize > 0);
9639 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9640 VMA_ASSERT(pAllocationRequest != VMA_NULL);
9641 VMA_HEAVY_ASSERT(Validate());
9643 const VkDeviceSize size = GetSize();
9644 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9645 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9649 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9651 VMA_ASSERT(0 &&
"Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9656 if(allocSize > size)
9660 VkDeviceSize resultBaseOffset = size - allocSize;
9661 if(!suballocations2nd.empty())
9663 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9664 resultBaseOffset = lastSuballoc.offset - allocSize;
9665 if(allocSize > lastSuballoc.offset)
9672 VkDeviceSize resultOffset = resultBaseOffset;
9675 if(VMA_DEBUG_MARGIN > 0)
9677 if(resultOffset < VMA_DEBUG_MARGIN)
9681 resultOffset -= VMA_DEBUG_MARGIN;
9685 resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9689 if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9691 bool bufferImageGranularityConflict =
false;
9692 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9694 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9695 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9697 if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9699 bufferImageGranularityConflict =
true;
9707 if(bufferImageGranularityConflict)
9709 resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9714 const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9715 suballocations1st.back().offset + suballocations1st.back().size :
9717 if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9721 if(bufferImageGranularity > 1)
9723 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9725 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9726 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9728 if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9742 pAllocationRequest->offset = resultOffset;
9743 pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9744 pAllocationRequest->sumItemSize = 0;
9746 pAllocationRequest->itemsToMakeLostCount = 0;
9752 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9756 VkDeviceSize resultBaseOffset = 0;
9757 if(!suballocations1st.empty())
9759 const VmaSuballocation& lastSuballoc = suballocations1st.back();
9760 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9764 VkDeviceSize resultOffset = resultBaseOffset;
9767 if(VMA_DEBUG_MARGIN > 0)
9769 resultOffset += VMA_DEBUG_MARGIN;
9773 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9777 if(bufferImageGranularity > 1 && !suballocations1st.empty())
9779 bool bufferImageGranularityConflict =
false;
9780 for(
size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9782 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9783 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9785 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9787 bufferImageGranularityConflict =
true;
9795 if(bufferImageGranularityConflict)
9797 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9801 const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9802 suballocations2nd.back().offset : size;
9805 if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9809 if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9811 for(
size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9813 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9814 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9816 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9830 pAllocationRequest->offset = resultOffset;
9831 pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9832 pAllocationRequest->sumItemSize = 0;
9834 pAllocationRequest->itemsToMakeLostCount = 0;
9841 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9843 VMA_ASSERT(!suballocations1st.empty());
9845 VkDeviceSize resultBaseOffset = 0;
9846 if(!suballocations2nd.empty())
9848 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9849 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9853 VkDeviceSize resultOffset = resultBaseOffset;
9856 if(VMA_DEBUG_MARGIN > 0)
9858 resultOffset += VMA_DEBUG_MARGIN;
9862 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9866 if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9868 bool bufferImageGranularityConflict =
false;
9869 for(
size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9871 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9872 if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9874 if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9876 bufferImageGranularityConflict =
true;
9884 if(bufferImageGranularityConflict)
9886 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9890 pAllocationRequest->itemsToMakeLostCount = 0;
9891 pAllocationRequest->sumItemSize = 0;
9892 size_t index1st = m_1stNullItemsBeginCount;
9894 if(canMakeOtherLost)
9896 while(index1st < suballocations1st.size() &&
9897 resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9900 const VmaSuballocation& suballoc = suballocations1st[index1st];
9901 if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9907 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9908 if(suballoc.hAllocation->CanBecomeLost() &&
9909 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9911 ++pAllocationRequest->itemsToMakeLostCount;
9912 pAllocationRequest->sumItemSize += suballoc.size;
9924 if(bufferImageGranularity > 1)
9926 while(index1st < suballocations1st.size())
9928 const VmaSuballocation& suballoc = suballocations1st[index1st];
9929 if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9931 if(suballoc.hAllocation != VK_NULL_HANDLE)
9934 if(suballoc.hAllocation->CanBecomeLost() &&
9935 suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9937 ++pAllocationRequest->itemsToMakeLostCount;
9938 pAllocationRequest->sumItemSize += suballoc.size;
9957 if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9958 (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9962 if(bufferImageGranularity > 1)
9964 for(
size_t nextSuballocIndex = index1st;
9965 nextSuballocIndex < suballocations1st.size();
9966 nextSuballocIndex++)
9968 const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9969 if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9971 if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9985 pAllocationRequest->offset = resultOffset;
9986 pAllocationRequest->sumFreeSize =
9987 (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9989 - pAllocationRequest->sumItemSize;
9999 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10000 uint32_t currentFrameIndex,
10001 uint32_t frameInUseCount,
10002 VmaAllocationRequest* pAllocationRequest)
10004 if(pAllocationRequest->itemsToMakeLostCount == 0)
10009 VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10011 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10012 size_t index1st = m_1stNullItemsBeginCount;
10013 size_t madeLostCount = 0;
10014 while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10016 VMA_ASSERT(index1st < suballocations1st.size());
10017 VmaSuballocation& suballoc = suballocations1st[index1st];
10018 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10020 VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10021 VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10022 if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10024 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10025 suballoc.hAllocation = VK_NULL_HANDLE;
10026 m_SumFreeSize += suballoc.size;
10027 ++m_1stNullItemsMiddleCount;
10038 CleanupAfterFree();
10044 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10046 uint32_t lostAllocationCount = 0;
10048 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10049 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10051 VmaSuballocation& suballoc = suballocations1st[i];
10052 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10053 suballoc.hAllocation->CanBecomeLost() &&
10054 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10056 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10057 suballoc.hAllocation = VK_NULL_HANDLE;
10058 ++m_1stNullItemsMiddleCount;
10059 m_SumFreeSize += suballoc.size;
10060 ++lostAllocationCount;
10064 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10065 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10067 VmaSuballocation& suballoc = suballocations2nd[i];
10068 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10069 suballoc.hAllocation->CanBecomeLost() &&
10070 suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10072 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10073 suballoc.hAllocation = VK_NULL_HANDLE;
10074 ++m_2ndNullItemsCount;
10075 ++lostAllocationCount;
10079 if(lostAllocationCount)
10081 CleanupAfterFree();
10084 return lostAllocationCount;
10087 VkResult VmaBlockMetadata_Linear::CheckCorruption(
const void* pBlockData)
10089 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10090 for(
size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10092 const VmaSuballocation& suballoc = suballocations1st[i];
10093 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10095 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10097 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10098 return VK_ERROR_VALIDATION_FAILED_EXT;
10100 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10102 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10103 return VK_ERROR_VALIDATION_FAILED_EXT;
10108 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10109 for(
size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10111 const VmaSuballocation& suballoc = suballocations2nd[i];
10112 if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10114 if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10116 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10117 return VK_ERROR_VALIDATION_FAILED_EXT;
10119 if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10121 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10122 return VK_ERROR_VALIDATION_FAILED_EXT;
10130 void VmaBlockMetadata_Linear::Alloc(
10131 const VmaAllocationRequest& request,
10132 VmaSuballocationType type,
10133 VkDeviceSize allocSize,
10137 const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10141 VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10142 "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10143 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10144 suballocations2nd.push_back(newSuballoc);
10145 m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10149 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10152 if(suballocations1st.empty())
10154 suballocations1st.push_back(newSuballoc);
10159 if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10162 VMA_ASSERT(request.offset + allocSize <= GetSize());
10163 suballocations1st.push_back(newSuballoc);
10166 else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10168 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10170 switch(m_2ndVectorMode)
10172 case SECOND_VECTOR_EMPTY:
10174 VMA_ASSERT(suballocations2nd.empty());
10175 m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10177 case SECOND_VECTOR_RING_BUFFER:
10179 VMA_ASSERT(!suballocations2nd.empty());
10181 case SECOND_VECTOR_DOUBLE_STACK:
10182 VMA_ASSERT(0 &&
"CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10188 suballocations2nd.push_back(newSuballoc);
10192 VMA_ASSERT(0 &&
"CRITICAL INTERNAL ERROR.");
10197 m_SumFreeSize -= newSuballoc.size;
10200 void VmaBlockMetadata_Linear::Free(
const VmaAllocation allocation)
10202 FreeAtOffset(allocation->GetOffset());
10205 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10207 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10208 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10210 if(!suballocations1st.empty())
10213 VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10214 if(firstSuballoc.offset == offset)
10216 firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10217 firstSuballoc.hAllocation = VK_NULL_HANDLE;
10218 m_SumFreeSize += firstSuballoc.size;
10219 ++m_1stNullItemsBeginCount;
10220 CleanupAfterFree();
10226 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10227 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10229 VmaSuballocation& lastSuballoc = suballocations2nd.back();
10230 if(lastSuballoc.offset == offset)
10232 m_SumFreeSize += lastSuballoc.size;
10233 suballocations2nd.pop_back();
10234 CleanupAfterFree();
10239 else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10241 VmaSuballocation& lastSuballoc = suballocations1st.back();
10242 if(lastSuballoc.offset == offset)
10244 m_SumFreeSize += lastSuballoc.size;
10245 suballocations1st.pop_back();
10246 CleanupAfterFree();
10253 VmaSuballocation refSuballoc;
10254 refSuballoc.offset = offset;
10256 SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10257 suballocations1st.begin() + m_1stNullItemsBeginCount,
10258 suballocations1st.end(),
10260 if(it != suballocations1st.end())
10262 it->type = VMA_SUBALLOCATION_TYPE_FREE;
10263 it->hAllocation = VK_NULL_HANDLE;
10264 ++m_1stNullItemsMiddleCount;
10265 m_SumFreeSize += it->size;
10266 CleanupAfterFree();
10271 if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10274 VmaSuballocation refSuballoc;
10275 refSuballoc.offset = offset;
10277 SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10278 VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10279 VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10280 if(it != suballocations2nd.end())
10282 it->type = VMA_SUBALLOCATION_TYPE_FREE;
10283 it->hAllocation = VK_NULL_HANDLE;
10284 ++m_2ndNullItemsCount;
10285 m_SumFreeSize += it->size;
10286 CleanupAfterFree();
10291 VMA_ASSERT(0 &&
"Allocation to free not found in linear allocator!");
10294 bool VmaBlockMetadata_Linear::ShouldCompact1st()
const 10296 const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10297 const size_t suballocCount = AccessSuballocations1st().size();
10298 return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10301 void VmaBlockMetadata_Linear::CleanupAfterFree()
10303 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10304 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10308 suballocations1st.clear();
10309 suballocations2nd.clear();
10310 m_1stNullItemsBeginCount = 0;
10311 m_1stNullItemsMiddleCount = 0;
10312 m_2ndNullItemsCount = 0;
10313 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10317 const size_t suballoc1stCount = suballocations1st.size();
10318 const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10319 VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10322 while(m_1stNullItemsBeginCount < suballoc1stCount &&
10323 suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10325 ++m_1stNullItemsBeginCount;
10326 --m_1stNullItemsMiddleCount;
10330 while(m_1stNullItemsMiddleCount > 0 &&
10331 suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10333 --m_1stNullItemsMiddleCount;
10334 suballocations1st.pop_back();
10338 while(m_2ndNullItemsCount > 0 &&
10339 suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10341 --m_2ndNullItemsCount;
10342 suballocations2nd.pop_back();
10345 if(ShouldCompact1st())
10347 const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10348 size_t srcIndex = m_1stNullItemsBeginCount;
10349 for(
size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10351 while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10355 if(dstIndex != srcIndex)
10357 suballocations1st[dstIndex] = suballocations1st[srcIndex];
10361 suballocations1st.resize(nonNullItemCount);
10362 m_1stNullItemsBeginCount = 0;
10363 m_1stNullItemsMiddleCount = 0;
10367 if(suballocations2nd.empty())
10369 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10373 if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10375 suballocations1st.clear();
10376 m_1stNullItemsBeginCount = 0;
10378 if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10381 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10382 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10383 while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10384 suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10386 ++m_1stNullItemsBeginCount;
10387 --m_1stNullItemsMiddleCount;
10389 m_2ndNullItemsCount = 0;
10390 m_1stVectorIndex ^= 1;
10395 VMA_HEAVY_ASSERT(Validate());
10402 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(
VmaAllocator hAllocator) :
10403 VmaBlockMetadata(hAllocator),
10405 m_AllocationCount(0),
10409 memset(m_FreeList, 0,
sizeof(m_FreeList));
10412 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10414 DeleteNode(m_Root);
10417 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10419 VmaBlockMetadata::Init(size);
10421 m_UsableSize = VmaPrevPow2(size);
10422 m_SumFreeSize = m_UsableSize;
10426 while(m_LevelCount < MAX_LEVELS &&
10427 LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10432 Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10433 rootNode->offset = 0;
10434 rootNode->type = Node::TYPE_FREE;
10435 rootNode->parent = VMA_NULL;
10436 rootNode->buddy = VMA_NULL;
10439 AddToFreeListFront(0, rootNode);
10442 bool VmaBlockMetadata_Buddy::Validate()
const 10445 ValidationContext ctx;
10446 if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10448 VMA_VALIDATE(
false &&
"ValidateNode failed.");
10450 VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10451 VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10454 for(uint32_t level = 0; level < m_LevelCount; ++level)
10456 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10457 m_FreeList[level].front->free.prev == VMA_NULL);
10459 for(Node* node = m_FreeList[level].front;
10461 node = node->free.next)
10463 VMA_VALIDATE(node->type == Node::TYPE_FREE);
10465 if(node->free.next == VMA_NULL)
10467 VMA_VALIDATE(m_FreeList[level].back == node);
10471 VMA_VALIDATE(node->free.next->free.prev == node);
10477 for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10479 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10485 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax()
const 10487 for(uint32_t level = 0; level < m_LevelCount; ++level)
10489 if(m_FreeList[level].front != VMA_NULL)
10491 return LevelToNodeSize(level);
10497 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(
VmaStatInfo& outInfo)
const 10499 const VkDeviceSize unusableSize = GetUnusableSize();
10510 CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10512 if(unusableSize > 0)
10521 void VmaBlockMetadata_Buddy::AddPoolStats(
VmaPoolStats& inoutStats)
const 10523 const VkDeviceSize unusableSize = GetUnusableSize();
10525 inoutStats.
size += GetSize();
10526 inoutStats.
unusedSize += m_SumFreeSize + unusableSize;
10531 if(unusableSize > 0)
10538 #if VMA_STATS_STRING_ENABLED 10540 void VmaBlockMetadata_Buddy::PrintDetailedMap(
class VmaJsonWriter& json)
const 10544 CalcAllocationStatInfo(stat);
10546 PrintDetailedMap_Begin(
10552 PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10554 const VkDeviceSize unusableSize = GetUnusableSize();
10555 if(unusableSize > 0)
10557 PrintDetailedMap_UnusedRange(json,
10562 PrintDetailedMap_End(json);
10565 #endif // #if VMA_STATS_STRING_ENABLED 10567 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10568 uint32_t currentFrameIndex,
10569 uint32_t frameInUseCount,
10570 VkDeviceSize bufferImageGranularity,
10571 VkDeviceSize allocSize,
10572 VkDeviceSize allocAlignment,
10574 VmaSuballocationType allocType,
10575 bool canMakeOtherLost,
10577 VmaAllocationRequest* pAllocationRequest)
10579 VMA_ASSERT(!upperAddress &&
"VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10583 if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10584 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10585 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10587 allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10588 allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10591 if(allocSize > m_UsableSize)
10596 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10597 for(uint32_t level = targetLevel + 1; level--; )
10599 for(Node* freeNode = m_FreeList[level].front;
10600 freeNode != VMA_NULL;
10601 freeNode = freeNode->free.next)
10603 if(freeNode->offset % allocAlignment == 0)
10605 pAllocationRequest->offset = freeNode->offset;
10606 pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10607 pAllocationRequest->sumItemSize = 0;
10608 pAllocationRequest->itemsToMakeLostCount = 0;
10609 pAllocationRequest->customData = (
void*)(uintptr_t)level;
10618 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10619 uint32_t currentFrameIndex,
10620 uint32_t frameInUseCount,
10621 VmaAllocationRequest* pAllocationRequest)
10627 return pAllocationRequest->itemsToMakeLostCount == 0;
10630 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10639 void VmaBlockMetadata_Buddy::Alloc(
10640 const VmaAllocationRequest& request,
10641 VmaSuballocationType type,
10642 VkDeviceSize allocSize,
10646 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10647 uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10649 Node* currNode = m_FreeList[currLevel].front;
10650 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10651 while(currNode->offset != request.offset)
10653 currNode = currNode->free.next;
10654 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10658 while(currLevel < targetLevel)
10662 RemoveFromFreeList(currLevel, currNode);
10664 const uint32_t childrenLevel = currLevel + 1;
10667 Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10668 Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10670 leftChild->offset = currNode->offset;
10671 leftChild->type = Node::TYPE_FREE;
10672 leftChild->parent = currNode;
10673 leftChild->buddy = rightChild;
10675 rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10676 rightChild->type = Node::TYPE_FREE;
10677 rightChild->parent = currNode;
10678 rightChild->buddy = leftChild;
10681 currNode->type = Node::TYPE_SPLIT;
10682 currNode->split.leftChild = leftChild;
10685 AddToFreeListFront(childrenLevel, rightChild);
10686 AddToFreeListFront(childrenLevel, leftChild);
10691 currNode = m_FreeList[currLevel].front;
10700 VMA_ASSERT(currLevel == targetLevel &&
10701 currNode != VMA_NULL &&
10702 currNode->type == Node::TYPE_FREE);
10703 RemoveFromFreeList(currLevel, currNode);
10706 currNode->type = Node::TYPE_ALLOCATION;
10707 currNode->allocation.alloc = hAllocation;
10709 ++m_AllocationCount;
10711 m_SumFreeSize -= allocSize;
10714 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10716 if(node->type == Node::TYPE_SPLIT)
10718 DeleteNode(node->split.leftChild->buddy);
10719 DeleteNode(node->split.leftChild);
10722 vma_delete(GetAllocationCallbacks(), node);
10725 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx,
const Node* parent,
const Node* curr, uint32_t level, VkDeviceSize levelNodeSize)
const 10727 VMA_VALIDATE(level < m_LevelCount);
10728 VMA_VALIDATE(curr->parent == parent);
10729 VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10730 VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10733 case Node::TYPE_FREE:
10735 ctx.calculatedSumFreeSize += levelNodeSize;
10736 ++ctx.calculatedFreeCount;
10738 case Node::TYPE_ALLOCATION:
10739 ++ctx.calculatedAllocationCount;
10740 ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10741 VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10743 case Node::TYPE_SPLIT:
10745 const uint32_t childrenLevel = level + 1;
10746 const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10747 const Node*
const leftChild = curr->split.leftChild;
10748 VMA_VALIDATE(leftChild != VMA_NULL);
10749 VMA_VALIDATE(leftChild->offset == curr->offset);
10750 if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10752 VMA_VALIDATE(
false &&
"ValidateNode for left child failed.");
10754 const Node*
const rightChild = leftChild->buddy;
10755 VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10756 if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10758 VMA_VALIDATE(
false &&
"ValidateNode for right child failed.");
10769 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize)
const 10772 uint32_t level = 0;
10773 VkDeviceSize currLevelNodeSize = m_UsableSize;
10774 VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10775 while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10778 currLevelNodeSize = nextLevelNodeSize;
10779 nextLevelNodeSize = currLevelNodeSize >> 1;
10784 void VmaBlockMetadata_Buddy::FreeAtOffset(
VmaAllocation alloc, VkDeviceSize offset)
10787 Node* node = m_Root;
10788 VkDeviceSize nodeOffset = 0;
10789 uint32_t level = 0;
10790 VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10791 while(node->type == Node::TYPE_SPLIT)
10793 const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10794 if(offset < nodeOffset + nextLevelSize)
10796 node = node->split.leftChild;
10800 node = node->split.leftChild->buddy;
10801 nodeOffset += nextLevelSize;
10804 levelNodeSize = nextLevelSize;
10807 VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10808 VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10811 --m_AllocationCount;
10812 m_SumFreeSize += alloc->GetSize();
10814 node->type = Node::TYPE_FREE;
10817 while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10819 RemoveFromFreeList(level, node->buddy);
10820 Node*
const parent = node->parent;
10822 vma_delete(GetAllocationCallbacks(), node->buddy);
10823 vma_delete(GetAllocationCallbacks(), node);
10824 parent->type = Node::TYPE_FREE;
10832 AddToFreeListFront(level, node);
10835 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(
VmaStatInfo& outInfo,
const Node* node, VkDeviceSize levelNodeSize)
const 10839 case Node::TYPE_FREE:
10845 case Node::TYPE_ALLOCATION:
10847 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10853 const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10854 if(unusedRangeSize > 0)
10863 case Node::TYPE_SPLIT:
10865 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10866 const Node*
const leftChild = node->split.leftChild;
10867 CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10868 const Node*
const rightChild = leftChild->buddy;
10869 CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10877 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10879 VMA_ASSERT(node->type == Node::TYPE_FREE);
10882 Node*
const frontNode = m_FreeList[level].front;
10883 if(frontNode == VMA_NULL)
10885 VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10886 node->free.prev = node->free.next = VMA_NULL;
10887 m_FreeList[level].front = m_FreeList[level].back = node;
10891 VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10892 node->free.prev = VMA_NULL;
10893 node->free.next = frontNode;
10894 frontNode->free.prev = node;
10895 m_FreeList[level].front = node;
10899 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10901 VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10904 if(node->free.prev == VMA_NULL)
10906 VMA_ASSERT(m_FreeList[level].front == node);
10907 m_FreeList[level].front = node->free.next;
10911 Node*
const prevFreeNode = node->free.prev;
10912 VMA_ASSERT(prevFreeNode->free.next == node);
10913 prevFreeNode->free.next = node->free.next;
10917 if(node->free.next == VMA_NULL)
10919 VMA_ASSERT(m_FreeList[level].back == node);
10920 m_FreeList[level].back = node->free.prev;
10924 Node*
const nextFreeNode = node->free.next;
10925 VMA_ASSERT(nextFreeNode->free.prev == node);
10926 nextFreeNode->free.prev = node->free.prev;
10930 #if VMA_STATS_STRING_ENABLED 10931 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(
class VmaJsonWriter& json,
const Node* node, VkDeviceSize levelNodeSize)
const 10935 case Node::TYPE_FREE:
10936 PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10938 case Node::TYPE_ALLOCATION:
10940 PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10941 const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10942 if(allocSize < levelNodeSize)
10944 PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10948 case Node::TYPE_SPLIT:
10950 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10951 const Node*
const leftChild = node->split.leftChild;
10952 PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10953 const Node*
const rightChild = leftChild->buddy;
10954 PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10961 #endif // #if VMA_STATS_STRING_ENABLED 10967 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(
VmaAllocator hAllocator) :
10968 m_pMetadata(VMA_NULL),
10969 m_MemoryTypeIndex(UINT32_MAX),
10971 m_hMemory(VK_NULL_HANDLE),
10973 m_pMappedData(VMA_NULL)
10977 void VmaDeviceMemoryBlock::Init(
10979 uint32_t newMemoryTypeIndex,
10980 VkDeviceMemory newMemory,
10981 VkDeviceSize newSize,
10983 uint32_t algorithm)
10985 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10987 m_MemoryTypeIndex = newMemoryTypeIndex;
10989 m_hMemory = newMemory;
10994 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10997 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11003 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11005 m_pMetadata->Init(newSize);
11008 void VmaDeviceMemoryBlock::Destroy(
VmaAllocator allocator)
11012 VMA_ASSERT(m_pMetadata->IsEmpty() &&
"Some allocations were not freed before destruction of this memory block!");
11014 VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11015 allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11016 m_hMemory = VK_NULL_HANDLE;
11018 vma_delete(allocator, m_pMetadata);
11019 m_pMetadata = VMA_NULL;
11022 bool VmaDeviceMemoryBlock::Validate()
const 11024 VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11025 (m_pMetadata->GetSize() != 0));
11027 return m_pMetadata->Validate();
11030 VkResult VmaDeviceMemoryBlock::CheckCorruption(
VmaAllocator hAllocator)
11032 void* pData =
nullptr;
11033 VkResult res = Map(hAllocator, 1, &pData);
11034 if(res != VK_SUCCESS)
11039 res = m_pMetadata->CheckCorruption(pData);
11041 Unmap(hAllocator, 1);
11046 VkResult VmaDeviceMemoryBlock::Map(
VmaAllocator hAllocator, uint32_t count,
void** ppData)
11053 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11054 if(m_MapCount != 0)
11056 m_MapCount += count;
11057 VMA_ASSERT(m_pMappedData != VMA_NULL);
11058 if(ppData != VMA_NULL)
11060 *ppData = m_pMappedData;
11066 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11067 hAllocator->m_hDevice,
11073 if(result == VK_SUCCESS)
11075 if(ppData != VMA_NULL)
11077 *ppData = m_pMappedData;
11079 m_MapCount = count;
11085 void VmaDeviceMemoryBlock::Unmap(
VmaAllocator hAllocator, uint32_t count)
11092 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11093 if(m_MapCount >= count)
11095 m_MapCount -= count;
11096 if(m_MapCount == 0)
11098 m_pMappedData = VMA_NULL;
11099 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11104 VMA_ASSERT(0 &&
"VkDeviceMemory block is being unmapped while it was not previously mapped.");
11108 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11110 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11111 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11114 VkResult res = Map(hAllocator, 1, &pData);
11115 if(res != VK_SUCCESS)
11120 VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11121 VmaWriteMagicValue(pData, allocOffset + allocSize);
11123 Unmap(hAllocator, 1);
11128 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(
VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11130 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11131 VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11134 VkResult res = Map(hAllocator, 1, &pData);
11135 if(res != VK_SUCCESS)
11140 if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11142 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11144 else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11146 VMA_ASSERT(0 &&
"MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11149 Unmap(hAllocator, 1);
11154 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11159 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11160 hAllocation->GetBlock() ==
this);
11162 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11163 return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11164 hAllocator->m_hDevice,
11167 hAllocation->GetOffset());
11170 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11175 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11176 hAllocation->GetBlock() ==
this);
11178 VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11179 return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11180 hAllocator->m_hDevice,
11183 hAllocation->GetOffset());
11188 memset(&outInfo, 0,
sizeof(outInfo));
11207 static void VmaPostprocessCalcStatInfo(
VmaStatInfo& inoutInfo)
11215 VmaPool_T::VmaPool_T(
11218 VkDeviceSize preferredBlockSize) :
11221 createInfo.memoryTypeIndex,
11222 createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11223 createInfo.minBlockCount,
11224 createInfo.maxBlockCount,
11226 createInfo.frameInUseCount,
11228 createInfo.blockSize != 0,
11234 VmaPool_T::~VmaPool_T()
11238 #if VMA_STATS_STRING_ENABLED 11240 #endif // #if VMA_STATS_STRING_ENABLED 11242 VmaBlockVector::VmaBlockVector(
11244 uint32_t memoryTypeIndex,
11245 VkDeviceSize preferredBlockSize,
11246 size_t minBlockCount,
11247 size_t maxBlockCount,
11248 VkDeviceSize bufferImageGranularity,
11249 uint32_t frameInUseCount,
11251 bool explicitBlockSize,
11252 uint32_t algorithm) :
11253 m_hAllocator(hAllocator),
11254 m_MemoryTypeIndex(memoryTypeIndex),
11255 m_PreferredBlockSize(preferredBlockSize),
11256 m_MinBlockCount(minBlockCount),
11257 m_MaxBlockCount(maxBlockCount),
11258 m_BufferImageGranularity(bufferImageGranularity),
11259 m_FrameInUseCount(frameInUseCount),
11260 m_IsCustomPool(isCustomPool),
11261 m_ExplicitBlockSize(explicitBlockSize),
11262 m_Algorithm(algorithm),
11263 m_HasEmptyBlock(false),
11264 m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11269 VmaBlockVector::~VmaBlockVector()
11271 for(
size_t i = m_Blocks.size(); i--; )
11273 m_Blocks[i]->Destroy(m_hAllocator);
11274 vma_delete(m_hAllocator, m_Blocks[i]);
11278 VkResult VmaBlockVector::CreateMinBlocks()
11280 for(
size_t i = 0; i < m_MinBlockCount; ++i)
11282 VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11283 if(res != VK_SUCCESS)
11291 void VmaBlockVector::GetPoolStats(
VmaPoolStats* pStats)
11293 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11295 const size_t blockCount = m_Blocks.size();
11304 for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11306 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
11307 VMA_ASSERT(pBlock);
11308 VMA_HEAVY_ASSERT(pBlock->Validate());
11309 pBlock->m_pMetadata->AddPoolStats(*pStats);
11313 bool VmaBlockVector::IsCorruptionDetectionEnabled()
const 11315 const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11316 return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11317 (VMA_DEBUG_MARGIN > 0) &&
11318 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11321 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11323 VkResult VmaBlockVector::Allocate(
11325 uint32_t currentFrameIndex,
11327 VkDeviceSize alignment,
11329 VmaSuballocationType suballocType,
11330 size_t allocationCount,
11334 VkResult res = VK_SUCCESS;
11337 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11338 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11340 res = AllocatePage(
11347 pAllocations + allocIndex);
11348 if(res != VK_SUCCESS)
11355 if(res != VK_SUCCESS)
11358 while(allocIndex--)
11360 Free(pAllocations[allocIndex]);
11362 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
11368 VkResult VmaBlockVector::AllocatePage(
11370 uint32_t currentFrameIndex,
11372 VkDeviceSize alignment,
11374 VmaSuballocationType suballocType,
11381 const bool canCreateNewBlock =
11383 (m_Blocks.size() < m_MaxBlockCount);
11390 canMakeOtherLost =
false;
11394 if(isUpperAddress &&
11397 return VK_ERROR_FEATURE_NOT_PRESENT;
11411 return VK_ERROR_FEATURE_NOT_PRESENT;
11415 if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11417 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11425 if(!canMakeOtherLost || canCreateNewBlock)
11434 if(!m_Blocks.empty())
11436 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks.back();
11437 VMA_ASSERT(pCurrBlock);
11438 VkResult res = AllocateFromBlock(
11449 if(res == VK_SUCCESS)
11451 VMA_DEBUG_LOG(
" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11461 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11463 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
11464 VMA_ASSERT(pCurrBlock);
11465 VkResult res = AllocateFromBlock(
11476 if(res == VK_SUCCESS)
11478 VMA_DEBUG_LOG(
" Returned from existing block #%u", (uint32_t)blockIndex);
11486 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
11488 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
11489 VMA_ASSERT(pCurrBlock);
11490 VkResult res = AllocateFromBlock(
11501 if(res == VK_SUCCESS)
11503 VMA_DEBUG_LOG(
" Returned from existing block #%u", (uint32_t)blockIndex);
11511 if(canCreateNewBlock)
11514 VkDeviceSize newBlockSize = m_PreferredBlockSize;
11515 uint32_t newBlockSizeShift = 0;
11516 const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11518 if(!m_ExplicitBlockSize)
11521 const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11522 for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11524 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11525 if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11527 newBlockSize = smallerNewBlockSize;
11528 ++newBlockSizeShift;
11537 size_t newBlockIndex = 0;
11538 VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11540 if(!m_ExplicitBlockSize)
11542 while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11544 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11545 if(smallerNewBlockSize >= size)
11547 newBlockSize = smallerNewBlockSize;
11548 ++newBlockSizeShift;
11549 res = CreateBlock(newBlockSize, &newBlockIndex);
11558 if(res == VK_SUCCESS)
11560 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[newBlockIndex];
11561 VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11563 res = AllocateFromBlock(
11574 if(res == VK_SUCCESS)
11576 VMA_DEBUG_LOG(
" Created new block Size=%llu", newBlockSize);
11582 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11589 if(canMakeOtherLost)
11591 uint32_t tryIndex = 0;
11592 for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11594 VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11595 VmaAllocationRequest bestRequest = {};
11596 VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11602 for(
size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11604 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
11605 VMA_ASSERT(pCurrBlock);
11606 VmaAllocationRequest currRequest = {};
11607 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11610 m_BufferImageGranularity,
11619 const VkDeviceSize currRequestCost = currRequest.CalcCost();
11620 if(pBestRequestBlock == VMA_NULL ||
11621 currRequestCost < bestRequestCost)
11623 pBestRequestBlock = pCurrBlock;
11624 bestRequest = currRequest;
11625 bestRequestCost = currRequestCost;
11627 if(bestRequestCost == 0)
11638 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
11640 VmaDeviceMemoryBlock*
const pCurrBlock = m_Blocks[blockIndex];
11641 VMA_ASSERT(pCurrBlock);
11642 VmaAllocationRequest currRequest = {};
11643 if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11646 m_BufferImageGranularity,
11655 const VkDeviceSize currRequestCost = currRequest.CalcCost();
11656 if(pBestRequestBlock == VMA_NULL ||
11657 currRequestCost < bestRequestCost ||
11660 pBestRequestBlock = pCurrBlock;
11661 bestRequest = currRequest;
11662 bestRequestCost = currRequestCost;
11664 if(bestRequestCost == 0 ||
11674 if(pBestRequestBlock != VMA_NULL)
11678 VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11679 if(res != VK_SUCCESS)
11685 if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11691 if(pBestRequestBlock->m_pMetadata->IsEmpty())
11693 m_HasEmptyBlock =
false;
11696 *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11697 pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11698 (*pAllocation)->InitBlockAllocation(
11701 bestRequest.offset,
11707 VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11708 VMA_DEBUG_LOG(
" Returned from existing allocation #%u", (uint32_t)blockIndex);
11709 (*pAllocation)->SetUserData(m_hAllocator, createInfo.
pUserData);
11710 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11712 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11714 if(IsCorruptionDetectionEnabled())
11716 VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11717 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
11732 if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11734 return VK_ERROR_TOO_MANY_OBJECTS;
11738 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11741 void VmaBlockVector::Free(
11744 VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11748 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11750 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11752 if(IsCorruptionDetectionEnabled())
11754 VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11755 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to validate magic value.");
11758 if(hAllocation->IsPersistentMap())
11760 pBlock->Unmap(m_hAllocator, 1);
11763 pBlock->m_pMetadata->Free(hAllocation);
11764 VMA_HEAVY_ASSERT(pBlock->Validate());
11766 VMA_DEBUG_LOG(
" Freed from MemoryTypeIndex=%u", memTypeIndex);
11769 if(pBlock->m_pMetadata->IsEmpty())
11772 if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11774 pBlockToDelete = pBlock;
11780 m_HasEmptyBlock =
true;
11785 else if(m_HasEmptyBlock)
11787 VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11788 if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11790 pBlockToDelete = pLastBlock;
11791 m_Blocks.pop_back();
11792 m_HasEmptyBlock =
false;
11796 IncrementallySortBlocks();
11801 if(pBlockToDelete != VMA_NULL)
11803 VMA_DEBUG_LOG(
" Deleted empty allocation");
11804 pBlockToDelete->Destroy(m_hAllocator);
11805 vma_delete(m_hAllocator, pBlockToDelete);
11809 VkDeviceSize VmaBlockVector::CalcMaxBlockSize()
const 11811 VkDeviceSize result = 0;
11812 for(
size_t i = m_Blocks.size(); i--; )
11814 result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11815 if(result >= m_PreferredBlockSize)
11823 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11825 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11827 if(m_Blocks[blockIndex] == pBlock)
11829 VmaVectorRemove(m_Blocks, blockIndex);
11836 void VmaBlockVector::IncrementallySortBlocks()
11841 for(
size_t i = 1; i < m_Blocks.size(); ++i)
11843 if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11845 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11852 VkResult VmaBlockVector::AllocateFromBlock(
11853 VmaDeviceMemoryBlock* pBlock,
11855 uint32_t currentFrameIndex,
11857 VkDeviceSize alignment,
11860 VmaSuballocationType suballocType,
11869 VmaAllocationRequest currRequest = {};
11870 if(pBlock->m_pMetadata->CreateAllocationRequest(
11873 m_BufferImageGranularity,
11883 VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11887 VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11888 if(res != VK_SUCCESS)
11895 if(pBlock->m_pMetadata->IsEmpty())
11897 m_HasEmptyBlock =
false;
11900 *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11901 pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11902 (*pAllocation)->InitBlockAllocation(
11905 currRequest.offset,
11911 VMA_HEAVY_ASSERT(pBlock->Validate());
11912 (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11913 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11915 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11917 if(IsCorruptionDetectionEnabled())
11919 VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11920 VMA_ASSERT(res == VK_SUCCESS &&
"Couldn't map block memory to write magic value.");
11924 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11927 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize,
size_t* pNewBlockIndex)
11929 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11930 allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11931 allocInfo.allocationSize = blockSize;
11932 VkDeviceMemory mem = VK_NULL_HANDLE;
11933 VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11942 VmaDeviceMemoryBlock*
const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11947 allocInfo.allocationSize,
11951 m_Blocks.push_back(pBlock);
11952 if(pNewBlockIndex != VMA_NULL)
11954 *pNewBlockIndex = m_Blocks.size() - 1;
11960 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11961 class VmaBlockVectorDefragmentationContext* pDefragCtx,
11962 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11964 const size_t blockCount = m_Blocks.size();
11965 const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11969 BLOCK_FLAG_USED = 0x00000001,
11970 BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11978 VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11979 blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11980 memset(blockInfo.data(), 0, blockCount *
sizeof(BlockInfo));
11983 const size_t moveCount = moves.size();
11984 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11986 const VmaDefragmentationMove& move = moves[moveIndex];
11987 blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11988 blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11991 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11994 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11996 BlockInfo& currBlockInfo = blockInfo[blockIndex];
11997 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11998 if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12000 currBlockInfo.pMappedData = pBlock->GetMappedData();
12002 if(currBlockInfo.pMappedData == VMA_NULL)
12004 pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12005 if(pDefragCtx->res == VK_SUCCESS)
12007 currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12014 if(pDefragCtx->res == VK_SUCCESS)
12016 const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12017 VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12019 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12021 const VmaDefragmentationMove& move = moves[moveIndex];
12023 const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12024 const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12026 VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12031 VmaDeviceMemoryBlock*
const pSrcBlock = m_Blocks[move.srcBlockIndex];
12032 memRange.memory = pSrcBlock->GetDeviceMemory();
12033 memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12034 memRange.size = VMA_MIN(
12035 VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12036 pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12037 (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12042 reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12043 reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12044 static_cast<size_t>(move.size));
12046 if(IsCorruptionDetectionEnabled())
12048 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12049 VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12055 VmaDeviceMemoryBlock*
const pDstBlock = m_Blocks[move.dstBlockIndex];
12056 memRange.memory = pDstBlock->GetDeviceMemory();
12057 memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12058 memRange.size = VMA_MIN(
12059 VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12060 pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12061 (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12068 for(
size_t blockIndex = blockCount; blockIndex--; )
12070 const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12071 if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12073 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12074 pBlock->Unmap(m_hAllocator, 1);
12079 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12080 class VmaBlockVectorDefragmentationContext* pDefragCtx,
12081 const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12082 VkCommandBuffer commandBuffer)
12084 const size_t blockCount = m_Blocks.size();
12086 pDefragCtx->blockContexts.resize(blockCount);
12087 memset(pDefragCtx->blockContexts.data(), 0, blockCount *
sizeof(VmaBlockDefragmentationContext));
12090 const size_t moveCount = moves.size();
12091 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12093 const VmaDefragmentationMove& move = moves[moveIndex];
12094 pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12095 pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12098 VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12102 VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12103 bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12104 VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12106 for(
size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12108 VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12109 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12110 if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12112 bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12113 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12114 m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12115 if(pDefragCtx->res == VK_SUCCESS)
12117 pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12118 m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12125 if(pDefragCtx->res == VK_SUCCESS)
12127 const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12128 VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12130 for(
size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12132 const VmaDefragmentationMove& move = moves[moveIndex];
12134 const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12135 const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12137 VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12139 VkBufferCopy region = {
12143 (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12144 commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion);
12149 if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12151 pDefragCtx->res = VK_NOT_READY;
12157 m_HasEmptyBlock =
false;
12158 for(
size_t blockIndex = m_Blocks.size(); blockIndex--; )
12160 VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12161 if(pBlock->m_pMetadata->IsEmpty())
12163 if(m_Blocks.size() > m_MinBlockCount)
12165 if(pDefragmentationStats != VMA_NULL)
12168 pDefragmentationStats->
bytesFreed += pBlock->m_pMetadata->GetSize();
12171 VmaVectorRemove(m_Blocks, blockIndex);
12172 pBlock->Destroy(m_hAllocator);
12173 vma_delete(m_hAllocator, pBlock);
12177 m_HasEmptyBlock =
true;
12183 #if VMA_STATS_STRING_ENABLED 12185 void VmaBlockVector::PrintDetailedMap(
class VmaJsonWriter& json)
12187 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12189 json.BeginObject();
12193 json.WriteString(
"MemoryTypeIndex");
12194 json.WriteNumber(m_MemoryTypeIndex);
12196 json.WriteString(
"BlockSize");
12197 json.WriteNumber(m_PreferredBlockSize);
12199 json.WriteString(
"BlockCount");
12200 json.BeginObject(
true);
12201 if(m_MinBlockCount > 0)
12203 json.WriteString(
"Min");
12204 json.WriteNumber((uint64_t)m_MinBlockCount);
12206 if(m_MaxBlockCount < SIZE_MAX)
12208 json.WriteString(
"Max");
12209 json.WriteNumber((uint64_t)m_MaxBlockCount);
12211 json.WriteString(
"Cur");
12212 json.WriteNumber((uint64_t)m_Blocks.size());
12215 if(m_FrameInUseCount > 0)
12217 json.WriteString(
"FrameInUseCount");
12218 json.WriteNumber(m_FrameInUseCount);
12221 if(m_Algorithm != 0)
12223 json.WriteString(
"Algorithm");
12224 json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12229 json.WriteString(
"PreferredBlockSize");
12230 json.WriteNumber(m_PreferredBlockSize);
12233 json.WriteString(
"Blocks");
12234 json.BeginObject();
12235 for(
size_t i = 0; i < m_Blocks.size(); ++i)
12237 json.BeginString();
12238 json.ContinueString(m_Blocks[i]->GetId());
12241 m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12248 #endif // #if VMA_STATS_STRING_ENABLED 12250 void VmaBlockVector::Defragment(
12251 class VmaBlockVectorDefragmentationContext* pCtx,
12253 VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12254 VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12255 VkCommandBuffer commandBuffer)
12257 pCtx->res = VK_SUCCESS;
12259 const VkMemoryPropertyFlags memPropFlags =
12260 m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12261 const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12262 const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12264 const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12266 const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12267 (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12270 if(canDefragmentOnCpu || canDefragmentOnGpu)
12272 bool defragmentOnGpu;
12274 if(canDefragmentOnGpu != canDefragmentOnCpu)
12276 defragmentOnGpu = canDefragmentOnGpu;
12281 defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12282 m_hAllocator->IsIntegratedGpu();
12285 bool overlappingMoveSupported = !defragmentOnGpu;
12287 if(m_hAllocator->m_UseMutex)
12289 m_Mutex.LockWrite();
12290 pCtx->mutexLocked =
true;
12293 pCtx->Begin(overlappingMoveSupported);
12297 const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12298 const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12299 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12300 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12301 pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12304 if(pStats != VMA_NULL)
12306 const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12307 const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12308 pStats->bytesMoved += bytesMoved;
12309 pStats->allocationsMoved += allocationsMoved;
12310 VMA_ASSERT(bytesMoved <= maxBytesToMove);
12311 VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12312 if(defragmentOnGpu)
12314 maxGpuBytesToMove -= bytesMoved;
12315 maxGpuAllocationsToMove -= allocationsMoved;
12319 maxCpuBytesToMove -= bytesMoved;
12320 maxCpuAllocationsToMove -= allocationsMoved;
12324 if(pCtx->res >= VK_SUCCESS)
12326 if(defragmentOnGpu)
12328 ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12332 ApplyDefragmentationMovesCpu(pCtx, moves);
12338 void VmaBlockVector::DefragmentationEnd(
12339 class VmaBlockVectorDefragmentationContext* pCtx,
12343 for(
size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12345 VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12346 if(blockCtx.hBuffer)
12348 (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12349 m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12353 if(pCtx->res >= VK_SUCCESS)
12355 FreeEmptyBlocks(pStats);
12358 if(pCtx->mutexLocked)
12360 VMA_ASSERT(m_hAllocator->m_UseMutex);
12361 m_Mutex.UnlockWrite();
12365 size_t VmaBlockVector::CalcAllocationCount()
const 12368 for(
size_t i = 0; i < m_Blocks.size(); ++i)
12370 result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12375 bool VmaBlockVector::IsBufferImageGranularityConflictPossible()
const 12377 if(m_BufferImageGranularity == 1)
12381 VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12382 for(
size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12384 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[i];
12385 VMA_ASSERT(m_Algorithm == 0);
12386 VmaBlockMetadata_Generic*
const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12387 if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12395 void VmaBlockVector::MakePoolAllocationsLost(
12396 uint32_t currentFrameIndex,
12397 size_t* pLostAllocationCount)
12399 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12400 size_t lostAllocationCount = 0;
12401 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12403 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
12404 VMA_ASSERT(pBlock);
12405 lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12407 if(pLostAllocationCount != VMA_NULL)
12409 *pLostAllocationCount = lostAllocationCount;
12413 VkResult VmaBlockVector::CheckCorruption()
12415 if(!IsCorruptionDetectionEnabled())
12417 return VK_ERROR_FEATURE_NOT_PRESENT;
12420 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12421 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12423 VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
12424 VMA_ASSERT(pBlock);
12425 VkResult res = pBlock->CheckCorruption(m_hAllocator);
12426 if(res != VK_SUCCESS)
12434 void VmaBlockVector::AddStats(
VmaStats* pStats)
12436 const uint32_t memTypeIndex = m_MemoryTypeIndex;
12437 const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12439 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12441 for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12443 const VmaDeviceMemoryBlock*
const pBlock = m_Blocks[blockIndex];
12444 VMA_ASSERT(pBlock);
12445 VMA_HEAVY_ASSERT(pBlock->Validate());
12447 pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12448 VmaAddStatInfo(pStats->
total, allocationStatInfo);
12449 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
12450 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
12457 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12459 VmaBlockVector* pBlockVector,
12460 uint32_t currentFrameIndex,
12461 bool overlappingMoveSupported) :
12462 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12463 m_AllAllocations(false),
12464 m_AllocationCount(0),
12466 m_AllocationsMoved(0),
12467 m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12470 const size_t blockCount = m_pBlockVector->m_Blocks.size();
12471 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12473 BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12474 pBlockInfo->m_OriginalBlockIndex = blockIndex;
12475 pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12476 m_Blocks.push_back(pBlockInfo);
12480 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12483 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12485 for(
size_t i = m_Blocks.size(); i--; )
12487 vma_delete(m_hAllocator, m_Blocks[i]);
12491 void VmaDefragmentationAlgorithm_Generic::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
12494 if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12496 VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12497 BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12498 if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12500 AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12501 (*it)->m_Allocations.push_back(allocInfo);
12508 ++m_AllocationCount;
12512 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12513 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12514 VkDeviceSize maxBytesToMove,
12515 uint32_t maxAllocationsToMove)
12517 if(m_Blocks.empty())
12530 size_t srcBlockMinIndex = 0;
12543 size_t srcBlockIndex = m_Blocks.size() - 1;
12544 size_t srcAllocIndex = SIZE_MAX;
12550 while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12552 if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12555 if(srcBlockIndex == srcBlockMinIndex)
12562 srcAllocIndex = SIZE_MAX;
12567 srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12571 BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12572 AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12574 const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12575 const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12576 const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12577 const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12580 for(
size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12582 BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12583 VmaAllocationRequest dstAllocRequest;
12584 if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12585 m_CurrentFrameIndex,
12586 m_pBlockVector->GetFrameInUseCount(),
12587 m_pBlockVector->GetBufferImageGranularity(),
12594 &dstAllocRequest) &&
12596 dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12598 VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12601 if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12602 (m_BytesMoved + size > maxBytesToMove))
12607 VmaDefragmentationMove move;
12608 move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12609 move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12610 move.srcOffset = srcOffset;
12611 move.dstOffset = dstAllocRequest.offset;
12613 moves.push_back(move);
12615 pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12620 allocInfo.m_hAllocation);
12621 pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12623 allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12625 if(allocInfo.m_pChanged != VMA_NULL)
12627 *allocInfo.m_pChanged = VK_TRUE;
12630 ++m_AllocationsMoved;
12631 m_BytesMoved += size;
12633 VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12641 if(srcAllocIndex > 0)
12647 if(srcBlockIndex > 0)
12650 srcAllocIndex = SIZE_MAX;
12660 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount()
const 12663 for(
size_t i = 0; i < m_Blocks.size(); ++i)
12665 if(m_Blocks[i]->m_HasNonMovableAllocations)
12673 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12674 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12675 VkDeviceSize maxBytesToMove,
12676 uint32_t maxAllocationsToMove)
12678 if(!m_AllAllocations && m_AllocationCount == 0)
12683 const size_t blockCount = m_Blocks.size();
12684 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12686 BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12688 if(m_AllAllocations)
12690 VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12691 for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12692 it != pMetadata->m_Suballocations.end();
12695 if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12697 AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12698 pBlockInfo->m_Allocations.push_back(allocInfo);
12703 pBlockInfo->CalcHasNonMovableAllocations();
12707 pBlockInfo->SortAllocationsByOffsetDescending();
12713 VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12716 const uint32_t roundCount = 2;
12719 VkResult result = VK_SUCCESS;
12720 for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12722 result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12728 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12729 size_t dstBlockIndex, VkDeviceSize dstOffset,
12730 size_t srcBlockIndex, VkDeviceSize srcOffset)
12732 if(dstBlockIndex < srcBlockIndex)
12736 if(dstBlockIndex > srcBlockIndex)
12740 if(dstOffset < srcOffset)
12750 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12752 VmaBlockVector* pBlockVector,
12753 uint32_t currentFrameIndex,
12754 bool overlappingMoveSupported) :
12755 VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12756 m_OverlappingMoveSupported(overlappingMoveSupported),
12757 m_AllocationCount(0),
12758 m_AllAllocations(false),
12760 m_AllocationsMoved(0),
12761 m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12763 VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12767 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12771 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12772 VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12773 VkDeviceSize maxBytesToMove,
12774 uint32_t maxAllocationsToMove)
12776 VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12778 const size_t blockCount = m_pBlockVector->GetBlockCount();
12779 if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12784 PreprocessMetadata();
12788 m_BlockInfos.resize(blockCount);
12789 for(
size_t i = 0; i < blockCount; ++i)
12791 m_BlockInfos[i].origBlockIndex = i;
12794 VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [
this](
const BlockInfo& lhs,
const BlockInfo& rhs) ->
bool {
12795 return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12796 m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12801 FreeSpaceDatabase freeSpaceDb;
12803 size_t dstBlockInfoIndex = 0;
12804 size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12805 VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12806 VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12807 VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12808 VkDeviceSize dstOffset = 0;
12811 for(
size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12813 const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12814 VmaDeviceMemoryBlock*
const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12815 VmaBlockMetadata_Generic*
const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12816 for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12817 !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12819 VmaAllocation_T*
const pAlloc = srcSuballocIt->hAllocation;
12820 const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12821 const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12822 if(m_AllocationsMoved == maxAllocationsToMove ||
12823 m_BytesMoved + srcAllocSize > maxBytesToMove)
12828 const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12831 size_t freeSpaceInfoIndex;
12832 VkDeviceSize dstAllocOffset;
12833 if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12834 freeSpaceInfoIndex, dstAllocOffset))
12836 size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12837 VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12838 VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12839 VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12842 if(freeSpaceInfoIndex == srcBlockInfoIndex)
12844 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12848 VmaSuballocation suballoc = *srcSuballocIt;
12849 suballoc.offset = dstAllocOffset;
12850 suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12851 m_BytesMoved += srcAllocSize;
12852 ++m_AllocationsMoved;
12854 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12856 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12857 srcSuballocIt = nextSuballocIt;
12859 InsertSuballoc(pFreeSpaceMetadata, suballoc);
12861 VmaDefragmentationMove move = {
12862 srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12863 srcAllocOffset, dstAllocOffset,
12865 moves.push_back(move);
12872 VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12874 VmaSuballocation suballoc = *srcSuballocIt;
12875 suballoc.offset = dstAllocOffset;
12876 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12877 m_BytesMoved += srcAllocSize;
12878 ++m_AllocationsMoved;
12880 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12882 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12883 srcSuballocIt = nextSuballocIt;
12885 InsertSuballoc(pFreeSpaceMetadata, suballoc);
12887 VmaDefragmentationMove move = {
12888 srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12889 srcAllocOffset, dstAllocOffset,
12891 moves.push_back(move);
12896 dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12899 while(dstBlockInfoIndex < srcBlockInfoIndex &&
12900 dstAllocOffset + srcAllocSize > dstBlockSize)
12903 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12905 ++dstBlockInfoIndex;
12906 dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12907 pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12908 pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12909 dstBlockSize = pDstMetadata->GetSize();
12911 dstAllocOffset = 0;
12915 if(dstBlockInfoIndex == srcBlockInfoIndex)
12917 VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12919 const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12921 bool skipOver = overlap;
12922 if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12926 skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12931 freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12933 dstOffset = srcAllocOffset + srcAllocSize;
12939 srcSuballocIt->offset = dstAllocOffset;
12940 srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12941 dstOffset = dstAllocOffset + srcAllocSize;
12942 m_BytesMoved += srcAllocSize;
12943 ++m_AllocationsMoved;
12945 VmaDefragmentationMove move = {
12946 srcOrigBlockIndex, dstOrigBlockIndex,
12947 srcAllocOffset, dstAllocOffset,
12949 moves.push_back(move);
12957 VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12958 VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12960 VmaSuballocation suballoc = *srcSuballocIt;
12961 suballoc.offset = dstAllocOffset;
12962 suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12963 dstOffset = dstAllocOffset + srcAllocSize;
12964 m_BytesMoved += srcAllocSize;
12965 ++m_AllocationsMoved;
12967 VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12969 pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12970 srcSuballocIt = nextSuballocIt;
12972 pDstMetadata->m_Suballocations.push_back(suballoc);
12974 VmaDefragmentationMove move = {
12975 srcOrigBlockIndex, dstOrigBlockIndex,
12976 srcAllocOffset, dstAllocOffset,
12978 moves.push_back(move);
12984 m_BlockInfos.clear();
12986 PostprocessMetadata();
12991 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12993 const size_t blockCount = m_pBlockVector->GetBlockCount();
12994 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12996 VmaBlockMetadata_Generic*
const pMetadata =
12997 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12998 pMetadata->m_FreeCount = 0;
12999 pMetadata->m_SumFreeSize = pMetadata->GetSize();
13000 pMetadata->m_FreeSuballocationsBySize.clear();
13001 for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13002 it != pMetadata->m_Suballocations.end(); )
13004 if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13006 VmaSuballocationList::iterator nextIt = it;
13008 pMetadata->m_Suballocations.erase(it);
13019 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13021 const size_t blockCount = m_pBlockVector->GetBlockCount();
13022 for(
size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13024 VmaBlockMetadata_Generic*
const pMetadata =
13025 (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13026 const VkDeviceSize blockSize = pMetadata->GetSize();
13029 if(pMetadata->m_Suballocations.empty())
13031 pMetadata->m_FreeCount = 1;
13033 VmaSuballocation suballoc = {
13037 VMA_SUBALLOCATION_TYPE_FREE };
13038 pMetadata->m_Suballocations.push_back(suballoc);
13039 pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13044 VkDeviceSize offset = 0;
13045 VmaSuballocationList::iterator it;
13046 for(it = pMetadata->m_Suballocations.begin();
13047 it != pMetadata->m_Suballocations.end();
13050 VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13051 VMA_ASSERT(it->offset >= offset);
13054 if(it->offset > offset)
13056 ++pMetadata->m_FreeCount;
13057 const VkDeviceSize freeSize = it->offset - offset;
13058 VmaSuballocation suballoc = {
13062 VMA_SUBALLOCATION_TYPE_FREE };
13063 VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13064 if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13066 pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13070 pMetadata->m_SumFreeSize -= it->size;
13071 offset = it->offset + it->size;
13075 if(offset < blockSize)
13077 ++pMetadata->m_FreeCount;
13078 const VkDeviceSize freeSize = blockSize - offset;
13079 VmaSuballocation suballoc = {
13083 VMA_SUBALLOCATION_TYPE_FREE };
13084 VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13085 VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13086 if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13088 pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13093 pMetadata->m_FreeSuballocationsBySize.begin(),
13094 pMetadata->m_FreeSuballocationsBySize.end(),
13095 VmaSuballocationItemSizeLess());
13098 VMA_HEAVY_ASSERT(pMetadata->Validate());
13102 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata,
const VmaSuballocation& suballoc)
13105 VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13106 while(it != pMetadata->m_Suballocations.end())
13108 if(it->offset < suballoc.offset)
13113 pMetadata->m_Suballocations.insert(it, suballoc);
13119 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13122 VmaBlockVector* pBlockVector,
13123 uint32_t currFrameIndex,
13124 uint32_t algorithmFlags) :
13126 mutexLocked(false),
13127 blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13128 m_hAllocator(hAllocator),
13129 m_hCustomPool(hCustomPool),
13130 m_pBlockVector(pBlockVector),
13131 m_CurrFrameIndex(currFrameIndex),
13132 m_AlgorithmFlags(algorithmFlags),
13133 m_pAlgorithm(VMA_NULL),
13134 m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13135 m_AllAllocations(false)
13139 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13141 vma_delete(m_hAllocator, m_pAlgorithm);
13144 void VmaBlockVectorDefragmentationContext::AddAllocation(
VmaAllocation hAlloc, VkBool32* pChanged)
13146 AllocInfo info = { hAlloc, pChanged };
13147 m_Allocations.push_back(info);
13150 void VmaBlockVectorDefragmentationContext::Begin(
bool overlappingMoveSupported)
13152 const bool allAllocations = m_AllAllocations ||
13153 m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13165 if(VMA_DEBUG_MARGIN == 0 &&
13167 !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13169 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13170 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13174 m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13175 m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13180 m_pAlgorithm->AddAll();
13184 for(
size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13186 m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13194 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13196 uint32_t currFrameIndex,
13199 m_hAllocator(hAllocator),
13200 m_CurrFrameIndex(currFrameIndex),
13203 m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13205 memset(m_DefaultPoolContexts, 0,
sizeof(m_DefaultPoolContexts));
13208 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13210 for(
size_t i = m_CustomPoolContexts.size(); i--; )
13212 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13213 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13214 vma_delete(m_hAllocator, pBlockVectorCtx);
13216 for(
size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13218 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13219 if(pBlockVectorCtx)
13221 pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13222 vma_delete(m_hAllocator, pBlockVectorCtx);
13227 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount,
VmaPool* pPools)
13229 for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13231 VmaPool pool = pPools[poolIndex];
13234 if(pool->m_BlockVector.GetAlgorithm() == 0)
13236 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13238 for(
size_t i = m_CustomPoolContexts.size(); i--; )
13240 if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13242 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13247 if(!pBlockVectorDefragCtx)
13249 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13252 &pool->m_BlockVector,
13255 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13258 pBlockVectorDefragCtx->AddAll();
13263 void VmaDefragmentationContext_T::AddAllocations(
13264 uint32_t allocationCount,
13266 VkBool32* pAllocationsChanged)
13269 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13272 VMA_ASSERT(hAlloc);
13274 if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13276 (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13278 VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13280 const VmaPool hAllocPool = hAlloc->GetPool();
13282 if(hAllocPool != VK_NULL_HANDLE)
13285 if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13287 for(
size_t i = m_CustomPoolContexts.size(); i--; )
13289 if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13291 pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13295 if(!pBlockVectorDefragCtx)
13297 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13300 &hAllocPool->m_BlockVector,
13303 m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13310 const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13311 pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13312 if(!pBlockVectorDefragCtx)
13314 pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13317 m_hAllocator->m_pBlockVectors[memTypeIndex],
13320 m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13324 if(pBlockVectorDefragCtx)
13326 VkBool32*
const pChanged = (pAllocationsChanged != VMA_NULL) ?
13327 &pAllocationsChanged[allocIndex] : VMA_NULL;
13328 pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13334 VkResult VmaDefragmentationContext_T::Defragment(
13335 VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13336 VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13344 if(commandBuffer == VK_NULL_HANDLE)
13346 maxGpuBytesToMove = 0;
13347 maxGpuAllocationsToMove = 0;
13350 VkResult res = VK_SUCCESS;
13353 for(uint32_t memTypeIndex = 0;
13354 memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13357 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13358 if(pBlockVectorCtx)
13360 VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13361 pBlockVectorCtx->GetBlockVector()->Defragment(
13364 maxCpuBytesToMove, maxCpuAllocationsToMove,
13365 maxGpuBytesToMove, maxGpuAllocationsToMove,
13367 if(pBlockVectorCtx->res != VK_SUCCESS)
13369 res = pBlockVectorCtx->res;
13375 for(
size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13376 customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13379 VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13380 VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13381 pBlockVectorCtx->GetBlockVector()->Defragment(
13384 maxCpuBytesToMove, maxCpuAllocationsToMove,
13385 maxGpuBytesToMove, maxGpuAllocationsToMove,
13387 if(pBlockVectorCtx->res != VK_SUCCESS)
13389 res = pBlockVectorCtx->res;
13399 #if VMA_RECORDING_ENABLED 13401 VmaRecorder::VmaRecorder() :
13406 m_StartCounter(INT64_MAX)
13412 m_UseMutex = useMutex;
13413 m_Flags = settings.
flags;
13415 QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13416 QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13419 errno_t err = fopen_s(&m_File, settings.
pFilePath,
"wb");
13422 return VK_ERROR_INITIALIZATION_FAILED;
13426 fprintf(m_File,
"%s\n",
"Vulkan Memory Allocator,Calls recording");
13427 fprintf(m_File,
"%s\n",
"1,5");
13432 VmaRecorder::~VmaRecorder()
13434 if(m_File != VMA_NULL)
13440 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13442 CallParams callParams;
13443 GetBasicParams(callParams);
13445 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13446 fprintf(m_File,
"%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13450 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13452 CallParams callParams;
13453 GetBasicParams(callParams);
13455 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13456 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13462 CallParams callParams;
13463 GetBasicParams(callParams);
13465 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13466 fprintf(m_File,
"%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13477 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex,
VmaPool pool)
13479 CallParams callParams;
13480 GetBasicParams(callParams);
13482 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13483 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13488 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13489 const VkMemoryRequirements& vkMemReq,
13493 CallParams callParams;
13494 GetBasicParams(callParams);
13496 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13497 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
13498 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13500 vkMemReq.alignment,
13501 vkMemReq.memoryTypeBits,
13509 userDataStr.GetString());
13513 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13514 const VkMemoryRequirements& vkMemReq,
13516 uint64_t allocationCount,
13519 CallParams callParams;
13520 GetBasicParams(callParams);
13522 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13523 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
13524 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13526 vkMemReq.alignment,
13527 vkMemReq.memoryTypeBits,
13534 PrintPointerList(allocationCount, pAllocations);
13535 fprintf(m_File,
",%s\n", userDataStr.GetString());
13539 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13540 const VkMemoryRequirements& vkMemReq,
13541 bool requiresDedicatedAllocation,
13542 bool prefersDedicatedAllocation,
13546 CallParams callParams;
13547 GetBasicParams(callParams);
13549 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13550 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
13551 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13553 vkMemReq.alignment,
13554 vkMemReq.memoryTypeBits,
13555 requiresDedicatedAllocation ? 1 : 0,
13556 prefersDedicatedAllocation ? 1 : 0,
13564 userDataStr.GetString());
13568 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13569 const VkMemoryRequirements& vkMemReq,
13570 bool requiresDedicatedAllocation,
13571 bool prefersDedicatedAllocation,
13575 CallParams callParams;
13576 GetBasicParams(callParams);
13578 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13579 UserDataString userDataStr(createInfo.
flags, createInfo.
pUserData);
13580 fprintf(m_File,
"%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13582 vkMemReq.alignment,
13583 vkMemReq.memoryTypeBits,
13584 requiresDedicatedAllocation ? 1 : 0,
13585 prefersDedicatedAllocation ? 1 : 0,
13593 userDataStr.GetString());
13597 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13600 CallParams callParams;
13601 GetBasicParams(callParams);
13603 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13604 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13609 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13610 uint64_t allocationCount,
13613 CallParams callParams;
13614 GetBasicParams(callParams);
13616 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13617 fprintf(m_File,
"%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13618 PrintPointerList(allocationCount, pAllocations);
13619 fprintf(m_File,
"\n");
13623 void VmaRecorder::RecordResizeAllocation(
13624 uint32_t frameIndex,
13626 VkDeviceSize newSize)
13628 CallParams callParams;
13629 GetBasicParams(callParams);
13631 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13632 fprintf(m_File,
"%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13633 allocation, newSize);
13637 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13639 const void* pUserData)
13641 CallParams callParams;
13642 GetBasicParams(callParams);
13644 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13645 UserDataString userDataStr(
13648 fprintf(m_File,
"%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13650 userDataStr.GetString());
13654 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13657 CallParams callParams;
13658 GetBasicParams(callParams);
13660 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13661 fprintf(m_File,
"%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13666 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13669 CallParams callParams;
13670 GetBasicParams(callParams);
13672 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13673 fprintf(m_File,
"%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13678 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13681 CallParams callParams;
13682 GetBasicParams(callParams);
13684 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13685 fprintf(m_File,
"%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13690 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13691 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13693 CallParams callParams;
13694 GetBasicParams(callParams);
13696 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13697 fprintf(m_File,
"%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13704 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13705 VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13707 CallParams callParams;
13708 GetBasicParams(callParams);
13710 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13711 fprintf(m_File,
"%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13718 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13719 const VkBufferCreateInfo& bufCreateInfo,
13723 CallParams callParams;
13724 GetBasicParams(callParams);
13726 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13727 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
13728 fprintf(m_File,
"%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13729 bufCreateInfo.flags,
13730 bufCreateInfo.size,
13731 bufCreateInfo.usage,
13732 bufCreateInfo.sharingMode,
13733 allocCreateInfo.
flags,
13734 allocCreateInfo.
usage,
13738 allocCreateInfo.
pool,
13740 userDataStr.GetString());
13744 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13745 const VkImageCreateInfo& imageCreateInfo,
13749 CallParams callParams;
13750 GetBasicParams(callParams);
13752 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13753 UserDataString userDataStr(allocCreateInfo.
flags, allocCreateInfo.
pUserData);
13754 fprintf(m_File,
"%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13755 imageCreateInfo.flags,
13756 imageCreateInfo.imageType,
13757 imageCreateInfo.format,
13758 imageCreateInfo.extent.width,
13759 imageCreateInfo.extent.height,
13760 imageCreateInfo.extent.depth,
13761 imageCreateInfo.mipLevels,
13762 imageCreateInfo.arrayLayers,
13763 imageCreateInfo.samples,
13764 imageCreateInfo.tiling,
13765 imageCreateInfo.usage,
13766 imageCreateInfo.sharingMode,
13767 imageCreateInfo.initialLayout,
13768 allocCreateInfo.
flags,
13769 allocCreateInfo.
usage,
13773 allocCreateInfo.
pool,
13775 userDataStr.GetString());
13779 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13782 CallParams callParams;
13783 GetBasicParams(callParams);
13785 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13786 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13791 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13794 CallParams callParams;
13795 GetBasicParams(callParams);
13797 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13798 fprintf(m_File,
"%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13803 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13806 CallParams callParams;
13807 GetBasicParams(callParams);
13809 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13810 fprintf(m_File,
"%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13815 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13818 CallParams callParams;
13819 GetBasicParams(callParams);
13821 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13822 fprintf(m_File,
"%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13827 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13830 CallParams callParams;
13831 GetBasicParams(callParams);
13833 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13834 fprintf(m_File,
"%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13839 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13843 CallParams callParams;
13844 GetBasicParams(callParams);
13846 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13850 fprintf(m_File,
",");
13852 fprintf(m_File,
",%llu,%u,%llu,%u,%p,%p\n",
13862 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13865 CallParams callParams;
13866 GetBasicParams(callParams);
13868 VmaMutexLock lock(m_FileMutex, m_UseMutex);
13869 fprintf(m_File,
"%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13876 if(pUserData != VMA_NULL)
13880 m_Str = (
const char*)pUserData;
13884 sprintf_s(m_PtrStr,
"%p", pUserData);
13894 void VmaRecorder::WriteConfiguration(
13895 const VkPhysicalDeviceProperties& devProps,
13896 const VkPhysicalDeviceMemoryProperties& memProps,
13897 bool dedicatedAllocationExtensionEnabled)
13899 fprintf(m_File,
"Config,Begin\n");
13901 fprintf(m_File,
"PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13902 fprintf(m_File,
"PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13903 fprintf(m_File,
"PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13904 fprintf(m_File,
"PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13905 fprintf(m_File,
"PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13906 fprintf(m_File,
"PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13908 fprintf(m_File,
"PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13909 fprintf(m_File,
"PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13910 fprintf(m_File,
"PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13912 fprintf(m_File,
"PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13913 for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13915 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13916 fprintf(m_File,
"PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13918 fprintf(m_File,
"PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13919 for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13921 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13922 fprintf(m_File,
"PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13925 fprintf(m_File,
"Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13927 fprintf(m_File,
"Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13928 fprintf(m_File,
"Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13929 fprintf(m_File,
"Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13930 fprintf(m_File,
"Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13931 fprintf(m_File,
"Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13932 fprintf(m_File,
"Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13933 fprintf(m_File,
"Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13934 fprintf(m_File,
"Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13935 fprintf(m_File,
"Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13937 fprintf(m_File,
"Config,End\n");
13940 void VmaRecorder::GetBasicParams(CallParams& outParams)
13942 outParams.threadId = GetCurrentThreadId();
13944 LARGE_INTEGER counter;
13945 QueryPerformanceCounter(&counter);
13946 outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13949 void VmaRecorder::PrintPointerList(uint64_t count,
const VmaAllocation* pItems)
13953 fprintf(m_File,
"%p", pItems[0]);
13954 for(uint64_t i = 1; i < count; ++i)
13956 fprintf(m_File,
" %p", pItems[i]);
13961 void VmaRecorder::Flush()
13969 #endif // #if VMA_RECORDING_ENABLED 13977 m_hDevice(pCreateInfo->device),
13978 m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13979 m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13980 *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13981 m_PreferredLargeHeapBlockSize(0),
13982 m_PhysicalDevice(pCreateInfo->physicalDevice),
13983 m_CurrentFrameIndex(0),
13984 m_Pools(VmaStlAllocator<
VmaPool>(GetAllocationCallbacks())),
13987 ,m_pRecorder(VMA_NULL)
13990 if(VMA_DEBUG_DETECT_CORRUPTION)
13993 VMA_ASSERT(VMA_DEBUG_MARGIN %
sizeof(uint32_t) == 0);
13998 #if !(VMA_DEDICATED_ALLOCATION) 14001 VMA_ASSERT(0 &&
"VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14005 memset(&m_DeviceMemoryCallbacks, 0 ,
sizeof(m_DeviceMemoryCallbacks));
14006 memset(&m_PhysicalDeviceProperties, 0,
sizeof(m_PhysicalDeviceProperties));
14007 memset(&m_MemProps, 0,
sizeof(m_MemProps));
14009 memset(&m_pBlockVectors, 0,
sizeof(m_pBlockVectors));
14010 memset(&m_pDedicatedAllocations, 0,
sizeof(m_pDedicatedAllocations));
14012 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14014 m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14025 (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14026 (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14028 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14029 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14030 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14031 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14038 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14040 const VkDeviceSize limit = pCreateInfo->
pHeapSizeLimit[heapIndex];
14041 if(limit != VK_WHOLE_SIZE)
14043 m_HeapSizeLimit[heapIndex] = limit;
14044 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14046 m_MemProps.memoryHeaps[heapIndex].size = limit;
14052 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14054 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14056 m_pBlockVectors[memTypeIndex] = vma_new(
this, VmaBlockVector)(
14059 preferredBlockSize,
14062 GetBufferImageGranularity(),
14069 m_pDedicatedAllocations[memTypeIndex] = vma_new(
this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14076 VkResult res = VK_SUCCESS;
14081 #if VMA_RECORDING_ENABLED 14082 m_pRecorder = vma_new(
this, VmaRecorder)();
14084 if(res != VK_SUCCESS)
14088 m_pRecorder->WriteConfiguration(
14089 m_PhysicalDeviceProperties,
14091 m_UseKhrDedicatedAllocation);
14092 m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14094 VMA_ASSERT(0 &&
"VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14095 return VK_ERROR_FEATURE_NOT_PRESENT;
14102 VmaAllocator_T::~VmaAllocator_T()
14104 #if VMA_RECORDING_ENABLED 14105 if(m_pRecorder != VMA_NULL)
14107 m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14108 vma_delete(
this, m_pRecorder);
14112 VMA_ASSERT(m_Pools.empty());
14114 for(
size_t i = GetMemoryTypeCount(); i--; )
14116 vma_delete(
this, m_pDedicatedAllocations[i]);
14117 vma_delete(
this, m_pBlockVectors[i]);
14121 void VmaAllocator_T::ImportVulkanFunctions(
const VmaVulkanFunctions* pVulkanFunctions)
14123 #if VMA_STATIC_VULKAN_FUNCTIONS == 1 14124 m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
14125 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
14126 m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
14127 m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
14128 m_VulkanFunctions.vkMapMemory = &vkMapMemory;
14129 m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
14130 m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
14131 m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
14132 m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
14133 m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
14134 m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
14135 m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
14136 m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
14137 m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
14138 m_VulkanFunctions.vkCreateImage = &vkCreateImage;
14139 m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
14140 m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
14141 #if VMA_DEDICATED_ALLOCATION 14142 if(m_UseKhrDedicatedAllocation)
14144 m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14145 (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice,
"vkGetBufferMemoryRequirements2KHR");
14146 m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14147 (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice,
"vkGetImageMemoryRequirements2KHR");
14149 #endif // #if VMA_DEDICATED_ALLOCATION 14150 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 14152 #define VMA_COPY_IF_NOT_NULL(funcName) \ 14153 if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; 14155 if(pVulkanFunctions != VMA_NULL)
14157 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14158 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14159 VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14160 VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14161 VMA_COPY_IF_NOT_NULL(vkMapMemory);
14162 VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14163 VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14164 VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14165 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14166 VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14167 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14168 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14169 VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14170 VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14171 VMA_COPY_IF_NOT_NULL(vkCreateImage);
14172 VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14173 VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14174 #if VMA_DEDICATED_ALLOCATION 14175 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14176 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14180 #undef VMA_COPY_IF_NOT_NULL 14184 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14185 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14186 VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14187 VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14188 VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14189 VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14190 VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14191 VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14192 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14193 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14194 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14195 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14196 VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14197 VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14198 VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14199 VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14200 VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14201 #if VMA_DEDICATED_ALLOCATION 14202 if(m_UseKhrDedicatedAllocation)
14204 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14205 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14210 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14212 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14213 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14214 const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14215 return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14218 VkResult VmaAllocator_T::AllocateMemoryOfType(
14220 VkDeviceSize alignment,
14221 bool dedicatedAllocation,
14222 VkBuffer dedicatedBuffer,
14223 VkImage dedicatedImage,
14225 uint32_t memTypeIndex,
14226 VmaSuballocationType suballocType,
14227 size_t allocationCount,
14230 VMA_ASSERT(pAllocations != VMA_NULL);
14231 VMA_DEBUG_LOG(
" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
14237 (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14242 VmaBlockVector*
const blockVector = m_pBlockVectors[memTypeIndex];
14243 VMA_ASSERT(blockVector);
14245 const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14246 bool preferDedicatedMemory =
14247 VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14248 dedicatedAllocation ||
14250 size > preferredBlockSize / 2;
14252 if(preferDedicatedMemory &&
14254 finalCreateInfo.
pool == VK_NULL_HANDLE)
14263 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14267 return AllocateDedicatedMemory(
14282 VkResult res = blockVector->Allocate(
14284 m_CurrentFrameIndex.load(),
14291 if(res == VK_SUCCESS)
14299 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14303 res = AllocateDedicatedMemory(
14309 finalCreateInfo.pUserData,
14314 if(res == VK_SUCCESS)
14317 VMA_DEBUG_LOG(
" Allocated as DedicatedMemory");
14323 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
14330 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14332 VmaSuballocationType suballocType,
14333 uint32_t memTypeIndex,
14335 bool isUserDataString,
14337 VkBuffer dedicatedBuffer,
14338 VkImage dedicatedImage,
14339 size_t allocationCount,
14342 VMA_ASSERT(allocationCount > 0 && pAllocations);
14344 VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14345 allocInfo.memoryTypeIndex = memTypeIndex;
14346 allocInfo.allocationSize = size;
14348 #if VMA_DEDICATED_ALLOCATION 14349 VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14350 if(m_UseKhrDedicatedAllocation)
14352 if(dedicatedBuffer != VK_NULL_HANDLE)
14354 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14355 dedicatedAllocInfo.buffer = dedicatedBuffer;
14356 allocInfo.pNext = &dedicatedAllocInfo;
14358 else if(dedicatedImage != VK_NULL_HANDLE)
14360 dedicatedAllocInfo.image = dedicatedImage;
14361 allocInfo.pNext = &dedicatedAllocInfo;
14364 #endif // #if VMA_DEDICATED_ALLOCATION 14368 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14370 res = AllocateDedicatedMemoryPage(
14378 pAllocations + allocIndex);
14379 if(res != VK_SUCCESS)
14385 if(res == VK_SUCCESS)
14389 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14390 AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14391 VMA_ASSERT(pDedicatedAllocations);
14392 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14394 VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14398 VMA_DEBUG_LOG(
" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14403 while(allocIndex--)
14406 VkDeviceMemory hMemory = currAlloc->GetMemory();
14418 FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14420 currAlloc->SetUserData(
this, VMA_NULL);
14421 vma_delete(
this, currAlloc);
14424 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
14430 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14432 VmaSuballocationType suballocType,
14433 uint32_t memTypeIndex,
14434 const VkMemoryAllocateInfo& allocInfo,
14436 bool isUserDataString,
14440 VkDeviceMemory hMemory = VK_NULL_HANDLE;
14441 VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14444 VMA_DEBUG_LOG(
" vkAllocateMemory FAILED");
14448 void* pMappedData = VMA_NULL;
14451 res = (*m_VulkanFunctions.vkMapMemory)(
14460 VMA_DEBUG_LOG(
" vkMapMemory FAILED");
14461 FreeVulkanMemory(memTypeIndex, size, hMemory);
14466 *pAllocation = vma_new(
this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14467 (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14468 (*pAllocation)->SetUserData(
this, pUserData);
14469 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14471 FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14477 void VmaAllocator_T::GetBufferMemoryRequirements(
14479 VkMemoryRequirements& memReq,
14480 bool& requiresDedicatedAllocation,
14481 bool& prefersDedicatedAllocation)
const 14483 #if VMA_DEDICATED_ALLOCATION 14484 if(m_UseKhrDedicatedAllocation)
14486 VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14487 memReqInfo.buffer = hBuffer;
14489 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14491 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14492 memReq2.pNext = &memDedicatedReq;
14494 (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14496 memReq = memReq2.memoryRequirements;
14497 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14498 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14501 #endif // #if VMA_DEDICATED_ALLOCATION 14503 (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14504 requiresDedicatedAllocation =
false;
14505 prefersDedicatedAllocation =
false;
14509 void VmaAllocator_T::GetImageMemoryRequirements(
14511 VkMemoryRequirements& memReq,
14512 bool& requiresDedicatedAllocation,
14513 bool& prefersDedicatedAllocation)
const 14515 #if VMA_DEDICATED_ALLOCATION 14516 if(m_UseKhrDedicatedAllocation)
14518 VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14519 memReqInfo.image = hImage;
14521 VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14523 VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14524 memReq2.pNext = &memDedicatedReq;
14526 (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14528 memReq = memReq2.memoryRequirements;
14529 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14530 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14533 #endif // #if VMA_DEDICATED_ALLOCATION 14535 (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14536 requiresDedicatedAllocation =
false;
14537 prefersDedicatedAllocation =
false;
14541 VkResult VmaAllocator_T::AllocateMemory(
14542 const VkMemoryRequirements& vkMemReq,
14543 bool requiresDedicatedAllocation,
14544 bool prefersDedicatedAllocation,
14545 VkBuffer dedicatedBuffer,
14546 VkImage dedicatedImage,
14548 VmaSuballocationType suballocType,
14549 size_t allocationCount,
14552 memset(pAllocations, 0,
sizeof(
VmaAllocation) * allocationCount);
14554 VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14556 if(vkMemReq.size == 0)
14558 return VK_ERROR_VALIDATION_FAILED_EXT;
14563 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14564 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14569 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14570 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14572 if(requiresDedicatedAllocation)
14576 VMA_ASSERT(0 &&
"VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14577 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14579 if(createInfo.
pool != VK_NULL_HANDLE)
14581 VMA_ASSERT(0 &&
"Pool specified while dedicated allocation is required.");
14582 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14585 if((createInfo.
pool != VK_NULL_HANDLE) &&
14588 VMA_ASSERT(0 &&
"Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14589 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14592 if(createInfo.
pool != VK_NULL_HANDLE)
14594 const VkDeviceSize alignmentForPool = VMA_MAX(
14595 vkMemReq.alignment,
14596 GetMemoryTypeMinAlignment(createInfo.
pool->m_BlockVector.GetMemoryTypeIndex()));
14597 return createInfo.
pool->m_BlockVector.Allocate(
14599 m_CurrentFrameIndex.load(),
14610 uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14611 uint32_t memTypeIndex = UINT32_MAX;
14613 if(res == VK_SUCCESS)
14615 VkDeviceSize alignmentForMemType = VMA_MAX(
14616 vkMemReq.alignment,
14617 GetMemoryTypeMinAlignment(memTypeIndex));
14619 res = AllocateMemoryOfType(
14621 alignmentForMemType,
14622 requiresDedicatedAllocation || prefersDedicatedAllocation,
14631 if(res == VK_SUCCESS)
14641 memoryTypeBits &= ~(1u << memTypeIndex);
14644 if(res == VK_SUCCESS)
14646 alignmentForMemType = VMA_MAX(
14647 vkMemReq.alignment,
14648 GetMemoryTypeMinAlignment(memTypeIndex));
14650 res = AllocateMemoryOfType(
14652 alignmentForMemType,
14653 requiresDedicatedAllocation || prefersDedicatedAllocation,
14662 if(res == VK_SUCCESS)
14672 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14683 void VmaAllocator_T::FreeMemory(
14684 size_t allocationCount,
14687 VMA_ASSERT(pAllocations);
14689 for(
size_t allocIndex = allocationCount; allocIndex--; )
14693 if(allocation != VK_NULL_HANDLE)
14695 if(TouchAllocation(allocation))
14697 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14699 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14702 switch(allocation->GetType())
14704 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14706 VmaBlockVector* pBlockVector = VMA_NULL;
14707 VmaPool hPool = allocation->GetPool();
14708 if(hPool != VK_NULL_HANDLE)
14710 pBlockVector = &hPool->m_BlockVector;
14714 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14715 pBlockVector = m_pBlockVectors[memTypeIndex];
14717 pBlockVector->Free(allocation);
14720 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14721 FreeDedicatedMemory(allocation);
14728 allocation->SetUserData(
this, VMA_NULL);
14729 vma_delete(
this, allocation);
14734 VkResult VmaAllocator_T::ResizeAllocation(
14736 VkDeviceSize newSize)
14738 if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14740 return VK_ERROR_VALIDATION_FAILED_EXT;
14742 if(newSize == alloc->GetSize())
14747 switch(alloc->GetType())
14749 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14750 return VK_ERROR_FEATURE_NOT_PRESENT;
14751 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14752 if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14754 alloc->ChangeSize(newSize);
14755 VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14760 return VK_ERROR_OUT_OF_POOL_MEMORY;
14764 return VK_ERROR_VALIDATION_FAILED_EXT;
14768 void VmaAllocator_T::CalculateStats(
VmaStats* pStats)
14771 InitStatInfo(pStats->
total);
14772 for(
size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14774 for(
size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14778 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14780 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
14781 VMA_ASSERT(pBlockVector);
14782 pBlockVector->AddStats(pStats);
14787 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14788 for(
size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14790 m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14795 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14797 const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14798 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14799 AllocationVectorType*
const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14800 VMA_ASSERT(pDedicatedAllocVector);
14801 for(
size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14804 (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14805 VmaAddStatInfo(pStats->
total, allocationStatInfo);
14806 VmaAddStatInfo(pStats->
memoryType[memTypeIndex], allocationStatInfo);
14807 VmaAddStatInfo(pStats->
memoryHeap[memHeapIndex], allocationStatInfo);
14812 VmaPostprocessCalcStatInfo(pStats->
total);
14813 for(
size_t i = 0; i < GetMemoryTypeCount(); ++i)
14814 VmaPostprocessCalcStatInfo(pStats->
memoryType[i]);
14815 for(
size_t i = 0; i < GetMemoryHeapCount(); ++i)
14816 VmaPostprocessCalcStatInfo(pStats->
memoryHeap[i]);
14819 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14821 VkResult VmaAllocator_T::DefragmentationBegin(
14831 *pContext = vma_new(
this, VmaDefragmentationContext_T)(
14832 this, m_CurrentFrameIndex.load(), info.
flags, pStats);
14835 (*pContext)->AddAllocations(
14838 VkResult res = (*pContext)->Defragment(
14843 if(res != VK_NOT_READY)
14845 vma_delete(
this, *pContext);
14846 *pContext = VMA_NULL;
14852 VkResult VmaAllocator_T::DefragmentationEnd(
14855 vma_delete(
this, context);
14861 if(hAllocation->CanBecomeLost())
14867 const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14868 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14871 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14875 pAllocationInfo->
offset = 0;
14876 pAllocationInfo->
size = hAllocation->GetSize();
14878 pAllocationInfo->
pUserData = hAllocation->GetUserData();
14881 else if(localLastUseFrameIndex == localCurrFrameIndex)
14883 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
14884 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
14885 pAllocationInfo->
offset = hAllocation->GetOffset();
14886 pAllocationInfo->
size = hAllocation->GetSize();
14888 pAllocationInfo->
pUserData = hAllocation->GetUserData();
14893 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14895 localLastUseFrameIndex = localCurrFrameIndex;
14902 #if VMA_STATS_STRING_ENABLED 14903 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14904 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14907 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14908 if(localLastUseFrameIndex == localCurrFrameIndex)
14914 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14916 localLastUseFrameIndex = localCurrFrameIndex;
14922 pAllocationInfo->
memoryType = hAllocation->GetMemoryTypeIndex();
14923 pAllocationInfo->
deviceMemory = hAllocation->GetMemory();
14924 pAllocationInfo->
offset = hAllocation->GetOffset();
14925 pAllocationInfo->
size = hAllocation->GetSize();
14926 pAllocationInfo->
pMappedData = hAllocation->GetMappedData();
14927 pAllocationInfo->
pUserData = hAllocation->GetUserData();
14931 bool VmaAllocator_T::TouchAllocation(
VmaAllocation hAllocation)
14934 if(hAllocation->CanBecomeLost())
14936 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14937 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14940 if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14944 else if(localLastUseFrameIndex == localCurrFrameIndex)
14950 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14952 localLastUseFrameIndex = localCurrFrameIndex;
14959 #if VMA_STATS_STRING_ENABLED 14960 uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14961 uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14964 VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14965 if(localLastUseFrameIndex == localCurrFrameIndex)
14971 if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14973 localLastUseFrameIndex = localCurrFrameIndex;
14985 VMA_DEBUG_LOG(
" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->
memoryTypeIndex, pCreateInfo->
flags);
14995 return VK_ERROR_INITIALIZATION_FAILED;
14998 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.
memoryTypeIndex);
15000 *pPool = vma_new(
this, VmaPool_T)(
this, newCreateInfo, preferredBlockSize);
15002 VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15003 if(res != VK_SUCCESS)
15005 vma_delete(
this, *pPool);
15012 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15013 (*pPool)->SetId(m_NextPoolId++);
15014 VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15020 void VmaAllocator_T::DestroyPool(
VmaPool pool)
15024 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15025 bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15026 VMA_ASSERT(success &&
"Pool not found in Allocator.");
15029 vma_delete(
this, pool);
15034 pool->m_BlockVector.GetPoolStats(pPoolStats);
15037 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15039 m_CurrentFrameIndex.store(frameIndex);
15042 void VmaAllocator_T::MakePoolAllocationsLost(
15044 size_t* pLostAllocationCount)
15046 hPool->m_BlockVector.MakePoolAllocationsLost(
15047 m_CurrentFrameIndex.load(),
15048 pLostAllocationCount);
15051 VkResult VmaAllocator_T::CheckPoolCorruption(
VmaPool hPool)
15053 return hPool->m_BlockVector.CheckCorruption();
15056 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15058 VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15061 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15063 if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15065 VmaBlockVector*
const pBlockVector = m_pBlockVectors[memTypeIndex];
15066 VMA_ASSERT(pBlockVector);
15067 VkResult localRes = pBlockVector->CheckCorruption();
15070 case VK_ERROR_FEATURE_NOT_PRESENT:
15073 finalRes = VK_SUCCESS;
15083 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15084 for(
size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15086 if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15088 VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15091 case VK_ERROR_FEATURE_NOT_PRESENT:
15094 finalRes = VK_SUCCESS;
15106 void VmaAllocator_T::CreateLostAllocation(
VmaAllocation* pAllocation)
15108 *pAllocation = vma_new(
this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST,
false);
15109 (*pAllocation)->InitLost();
15112 VkResult VmaAllocator_T::AllocateVulkanMemory(
const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15114 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15117 if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15119 VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15120 if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15122 res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15123 if(res == VK_SUCCESS)
15125 m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15130 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15135 res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15138 if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.
pfnAllocate != VMA_NULL)
15140 (*m_DeviceMemoryCallbacks.
pfnAllocate)(
this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15146 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15148 if(m_DeviceMemoryCallbacks.
pfnFree != VMA_NULL)
15150 (*m_DeviceMemoryCallbacks.
pfnFree)(
this, memoryType, hMemory, size);
15153 (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15155 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15156 if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15158 VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15159 m_HeapSizeLimit[heapIndex] += size;
15163 VkResult VmaAllocator_T::Map(
VmaAllocation hAllocation,
void** ppData)
15165 if(hAllocation->CanBecomeLost())
15167 return VK_ERROR_MEMORY_MAP_FAILED;
15170 switch(hAllocation->GetType())
15172 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15174 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
15175 char *pBytes = VMA_NULL;
15176 VkResult res = pBlock->Map(
this, 1, (
void**)&pBytes);
15177 if(res == VK_SUCCESS)
15179 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15180 hAllocation->BlockAllocMap();
15184 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15185 return hAllocation->DedicatedAllocMap(
this, ppData);
15188 return VK_ERROR_MEMORY_MAP_FAILED;
15194 switch(hAllocation->GetType())
15196 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15198 VmaDeviceMemoryBlock*
const pBlock = hAllocation->GetBlock();
15199 hAllocation->BlockAllocUnmap();
15200 pBlock->Unmap(
this, 1);
15203 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15204 hAllocation->DedicatedAllocUnmap(
this);
15211 VkResult VmaAllocator_T::BindBufferMemory(
VmaAllocation hAllocation, VkBuffer hBuffer)
15213 VkResult res = VK_SUCCESS;
15214 switch(hAllocation->GetType())
15216 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15217 res = GetVulkanFunctions().vkBindBufferMemory(
15220 hAllocation->GetMemory(),
15223 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15225 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15226 VMA_ASSERT(pBlock &&
"Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15227 res = pBlock->BindBufferMemory(
this, hAllocation, hBuffer);
15236 VkResult VmaAllocator_T::BindImageMemory(
VmaAllocation hAllocation, VkImage hImage)
15238 VkResult res = VK_SUCCESS;
15239 switch(hAllocation->GetType())
15241 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15242 res = GetVulkanFunctions().vkBindImageMemory(
15245 hAllocation->GetMemory(),
15248 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15250 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15251 VMA_ASSERT(pBlock &&
"Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15252 res = pBlock->BindImageMemory(
this, hAllocation, hImage);
15261 void VmaAllocator_T::FlushOrInvalidateAllocation(
15263 VkDeviceSize offset, VkDeviceSize size,
15264 VMA_CACHE_OPERATION op)
15266 const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15267 if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15269 const VkDeviceSize allocationSize = hAllocation->GetSize();
15270 VMA_ASSERT(offset <= allocationSize);
15272 const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15274 VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15275 memRange.memory = hAllocation->GetMemory();
15277 switch(hAllocation->GetType())
15279 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15280 memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15281 if(size == VK_WHOLE_SIZE)
15283 memRange.size = allocationSize - memRange.offset;
15287 VMA_ASSERT(offset + size <= allocationSize);
15288 memRange.size = VMA_MIN(
15289 VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15290 allocationSize - memRange.offset);
15294 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15297 memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15298 if(size == VK_WHOLE_SIZE)
15300 size = allocationSize - offset;
15304 VMA_ASSERT(offset + size <= allocationSize);
15306 memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15309 const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15310 VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15311 const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15312 memRange.offset += allocationOffset;
15313 memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15324 case VMA_CACHE_FLUSH:
15325 (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15327 case VMA_CACHE_INVALIDATE:
15328 (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15337 void VmaAllocator_T::FreeDedicatedMemory(
VmaAllocation allocation)
15339 VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15341 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15343 VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15344 AllocationVectorType*
const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15345 VMA_ASSERT(pDedicatedAllocations);
15346 bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15347 VMA_ASSERT(success);
15350 VkDeviceMemory hMemory = allocation->GetMemory();
15362 FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15364 VMA_DEBUG_LOG(
" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15367 void VmaAllocator_T::FillAllocation(
const VmaAllocation hAllocation, uint8_t pattern)
15369 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15370 !hAllocation->CanBecomeLost() &&
15371 (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15373 void* pData = VMA_NULL;
15374 VkResult res = Map(hAllocation, &pData);
15375 if(res == VK_SUCCESS)
15377 memset(pData, (
int)pattern, (
size_t)hAllocation->GetSize());
15378 FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15379 Unmap(hAllocation);
15383 VMA_ASSERT(0 &&
"VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15388 #if VMA_STATS_STRING_ENABLED 15390 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15392 bool dedicatedAllocationsStarted =
false;
15393 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15395 VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15396 AllocationVectorType*
const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15397 VMA_ASSERT(pDedicatedAllocVector);
15398 if(pDedicatedAllocVector->empty() ==
false)
15400 if(dedicatedAllocationsStarted ==
false)
15402 dedicatedAllocationsStarted =
true;
15403 json.WriteString(
"DedicatedAllocations");
15404 json.BeginObject();
15407 json.BeginString(
"Type ");
15408 json.ContinueString(memTypeIndex);
15413 for(
size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15415 json.BeginObject(
true);
15417 hAlloc->PrintParameters(json);
15424 if(dedicatedAllocationsStarted)
15430 bool allocationsStarted =
false;
15431 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15433 if(m_pBlockVectors[memTypeIndex]->IsEmpty() ==
false)
15435 if(allocationsStarted ==
false)
15437 allocationsStarted =
true;
15438 json.WriteString(
"DefaultPools");
15439 json.BeginObject();
15442 json.BeginString(
"Type ");
15443 json.ContinueString(memTypeIndex);
15446 m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15449 if(allocationsStarted)
15457 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15458 const size_t poolCount = m_Pools.size();
15461 json.WriteString(
"Pools");
15462 json.BeginObject();
15463 for(
size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15465 json.BeginString();
15466 json.ContinueString(m_Pools[poolIndex]->GetId());
15469 m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15476 #endif // #if VMA_STATS_STRING_ENABLED 15485 VMA_ASSERT(pCreateInfo && pAllocator);
15486 VMA_DEBUG_LOG(
"vmaCreateAllocator");
15488 return (*pAllocator)->Init(pCreateInfo);
15494 if(allocator != VK_NULL_HANDLE)
15496 VMA_DEBUG_LOG(
"vmaDestroyAllocator");
15497 VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15498 vma_delete(&allocationCallbacks, allocator);
15504 const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15506 VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15507 *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15512 const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15514 VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15515 *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15520 uint32_t memoryTypeIndex,
15521 VkMemoryPropertyFlags* pFlags)
15523 VMA_ASSERT(allocator && pFlags);
15524 VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15525 *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15530 uint32_t frameIndex)
15532 VMA_ASSERT(allocator);
15533 VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15535 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15537 allocator->SetCurrentFrameIndex(frameIndex);
15544 VMA_ASSERT(allocator && pStats);
15545 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15546 allocator->CalculateStats(pStats);
15549 #if VMA_STATS_STRING_ENABLED 15553 char** ppStatsString,
15554 VkBool32 detailedMap)
15556 VMA_ASSERT(allocator && ppStatsString);
15557 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15559 VmaStringBuilder sb(allocator);
15561 VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15562 json.BeginObject();
15565 allocator->CalculateStats(&stats);
15567 json.WriteString(
"Total");
15568 VmaPrintStatInfo(json, stats.
total);
15570 for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15572 json.BeginString(
"Heap ");
15573 json.ContinueString(heapIndex);
15575 json.BeginObject();
15577 json.WriteString(
"Size");
15578 json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15580 json.WriteString(
"Flags");
15581 json.BeginArray(
true);
15582 if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15584 json.WriteString(
"DEVICE_LOCAL");
15590 json.WriteString(
"Stats");
15591 VmaPrintStatInfo(json, stats.
memoryHeap[heapIndex]);
15594 for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15596 if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15598 json.BeginString(
"Type ");
15599 json.ContinueString(typeIndex);
15602 json.BeginObject();
15604 json.WriteString(
"Flags");
15605 json.BeginArray(
true);
15606 VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15607 if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15609 json.WriteString(
"DEVICE_LOCAL");
15611 if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15613 json.WriteString(
"HOST_VISIBLE");
15615 if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15617 json.WriteString(
"HOST_COHERENT");
15619 if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15621 json.WriteString(
"HOST_CACHED");
15623 if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15625 json.WriteString(
"LAZILY_ALLOCATED");
15631 json.WriteString(
"Stats");
15632 VmaPrintStatInfo(json, stats.
memoryType[typeIndex]);
15641 if(detailedMap == VK_TRUE)
15643 allocator->PrintDetailedMap(json);
15649 const size_t len = sb.GetLength();
15650 char*
const pChars = vma_new_array(allocator,
char, len + 1);
15653 memcpy(pChars, sb.GetData(), len);
15655 pChars[len] =
'\0';
15656 *ppStatsString = pChars;
15661 char* pStatsString)
15663 if(pStatsString != VMA_NULL)
15665 VMA_ASSERT(allocator);
15666 size_t len = strlen(pStatsString);
15667 vma_delete_array(allocator, pStatsString, len + 1);
15671 #endif // #if VMA_STATS_STRING_ENABLED 15678 uint32_t memoryTypeBits,
15680 uint32_t* pMemoryTypeIndex)
15682 VMA_ASSERT(allocator != VK_NULL_HANDLE);
15683 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15684 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15691 uint32_t requiredFlags = pAllocationCreateInfo->
requiredFlags;
15692 uint32_t preferredFlags = pAllocationCreateInfo->
preferredFlags;
15697 preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15701 switch(pAllocationCreateInfo->
usage)
15706 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15708 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15712 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15715 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15716 if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15718 preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15722 requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15723 preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15729 *pMemoryTypeIndex = UINT32_MAX;
15730 uint32_t minCost = UINT32_MAX;
15731 for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15732 memTypeIndex < allocator->GetMemoryTypeCount();
15733 ++memTypeIndex, memTypeBit <<= 1)
15736 if((memTypeBit & memoryTypeBits) != 0)
15738 const VkMemoryPropertyFlags currFlags =
15739 allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15741 if((requiredFlags & ~currFlags) == 0)
15744 uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15746 if(currCost < minCost)
15748 *pMemoryTypeIndex = memTypeIndex;
15753 minCost = currCost;
15758 return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15763 const VkBufferCreateInfo* pBufferCreateInfo,
15765 uint32_t* pMemoryTypeIndex)
15767 VMA_ASSERT(allocator != VK_NULL_HANDLE);
15768 VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15769 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15770 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15772 const VkDevice hDev = allocator->m_hDevice;
15773 VkBuffer hBuffer = VK_NULL_HANDLE;
15774 VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15775 hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15776 if(res == VK_SUCCESS)
15778 VkMemoryRequirements memReq = {};
15779 allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15780 hDev, hBuffer, &memReq);
15784 memReq.memoryTypeBits,
15785 pAllocationCreateInfo,
15788 allocator->GetVulkanFunctions().vkDestroyBuffer(
15789 hDev, hBuffer, allocator->GetAllocationCallbacks());
15796 const VkImageCreateInfo* pImageCreateInfo,
15798 uint32_t* pMemoryTypeIndex)
15800 VMA_ASSERT(allocator != VK_NULL_HANDLE);
15801 VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15802 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15803 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15805 const VkDevice hDev = allocator->m_hDevice;
15806 VkImage hImage = VK_NULL_HANDLE;
15807 VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15808 hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15809 if(res == VK_SUCCESS)
15811 VkMemoryRequirements memReq = {};
15812 allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15813 hDev, hImage, &memReq);
15817 memReq.memoryTypeBits,
15818 pAllocationCreateInfo,
15821 allocator->GetVulkanFunctions().vkDestroyImage(
15822 hDev, hImage, allocator->GetAllocationCallbacks());
15832 VMA_ASSERT(allocator && pCreateInfo && pPool);
15834 VMA_DEBUG_LOG(
"vmaCreatePool");
15836 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15838 VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15840 #if VMA_RECORDING_ENABLED 15841 if(allocator->GetRecorder() != VMA_NULL)
15843 allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15854 VMA_ASSERT(allocator);
15856 if(pool == VK_NULL_HANDLE)
15861 VMA_DEBUG_LOG(
"vmaDestroyPool");
15863 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15865 #if VMA_RECORDING_ENABLED 15866 if(allocator->GetRecorder() != VMA_NULL)
15868 allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15872 allocator->DestroyPool(pool);
15880 VMA_ASSERT(allocator && pool && pPoolStats);
15882 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15884 allocator->GetPoolStats(pool, pPoolStats);
15890 size_t* pLostAllocationCount)
15892 VMA_ASSERT(allocator && pool);
15894 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15896 #if VMA_RECORDING_ENABLED 15897 if(allocator->GetRecorder() != VMA_NULL)
15899 allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15903 allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15908 VMA_ASSERT(allocator && pool);
15910 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15912 VMA_DEBUG_LOG(
"vmaCheckPoolCorruption");
15914 return allocator->CheckPoolCorruption(pool);
15919 const VkMemoryRequirements* pVkMemoryRequirements,
15924 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15926 VMA_DEBUG_LOG(
"vmaAllocateMemory");
15928 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15930 VkResult result = allocator->AllocateMemory(
15931 *pVkMemoryRequirements,
15937 VMA_SUBALLOCATION_TYPE_UNKNOWN,
15941 #if VMA_RECORDING_ENABLED 15942 if(allocator->GetRecorder() != VMA_NULL)
15944 allocator->GetRecorder()->RecordAllocateMemory(
15945 allocator->GetCurrentFrameIndex(),
15946 *pVkMemoryRequirements,
15952 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15954 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15962 const VkMemoryRequirements* pVkMemoryRequirements,
15964 size_t allocationCount,
15968 if(allocationCount == 0)
15973 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
15975 VMA_DEBUG_LOG(
"vmaAllocateMemoryPages");
15977 VMA_DEBUG_GLOBAL_MUTEX_LOCK
15979 VkResult result = allocator->AllocateMemory(
15980 *pVkMemoryRequirements,
15986 VMA_SUBALLOCATION_TYPE_UNKNOWN,
15990 #if VMA_RECORDING_ENABLED 15991 if(allocator->GetRecorder() != VMA_NULL)
15993 allocator->GetRecorder()->RecordAllocateMemoryPages(
15994 allocator->GetCurrentFrameIndex(),
15995 *pVkMemoryRequirements,
15997 (uint64_t)allocationCount,
16002 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16004 for(
size_t i = 0; i < allocationCount; ++i)
16006 allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16020 VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16022 VMA_DEBUG_LOG(
"vmaAllocateMemoryForBuffer");
16024 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16026 VkMemoryRequirements vkMemReq = {};
16027 bool requiresDedicatedAllocation =
false;
16028 bool prefersDedicatedAllocation =
false;
16029 allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16030 requiresDedicatedAllocation,
16031 prefersDedicatedAllocation);
16033 VkResult result = allocator->AllocateMemory(
16035 requiresDedicatedAllocation,
16036 prefersDedicatedAllocation,
16040 VMA_SUBALLOCATION_TYPE_BUFFER,
16044 #if VMA_RECORDING_ENABLED 16045 if(allocator->GetRecorder() != VMA_NULL)
16047 allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16048 allocator->GetCurrentFrameIndex(),
16050 requiresDedicatedAllocation,
16051 prefersDedicatedAllocation,
16057 if(pAllocationInfo && result == VK_SUCCESS)
16059 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16072 VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16074 VMA_DEBUG_LOG(
"vmaAllocateMemoryForImage");
16076 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16078 VkMemoryRequirements vkMemReq = {};
16079 bool requiresDedicatedAllocation =
false;
16080 bool prefersDedicatedAllocation =
false;
16081 allocator->GetImageMemoryRequirements(image, vkMemReq,
16082 requiresDedicatedAllocation, prefersDedicatedAllocation);
16084 VkResult result = allocator->AllocateMemory(
16086 requiresDedicatedAllocation,
16087 prefersDedicatedAllocation,
16091 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16095 #if VMA_RECORDING_ENABLED 16096 if(allocator->GetRecorder() != VMA_NULL)
16098 allocator->GetRecorder()->RecordAllocateMemoryForImage(
16099 allocator->GetCurrentFrameIndex(),
16101 requiresDedicatedAllocation,
16102 prefersDedicatedAllocation,
16108 if(pAllocationInfo && result == VK_SUCCESS)
16110 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16120 VMA_ASSERT(allocator);
16122 if(allocation == VK_NULL_HANDLE)
16127 VMA_DEBUG_LOG(
"vmaFreeMemory");
16129 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16131 #if VMA_RECORDING_ENABLED 16132 if(allocator->GetRecorder() != VMA_NULL)
16134 allocator->GetRecorder()->RecordFreeMemory(
16135 allocator->GetCurrentFrameIndex(),
16140 allocator->FreeMemory(
16147 size_t allocationCount,
16150 if(allocationCount == 0)
16155 VMA_ASSERT(allocator);
16157 VMA_DEBUG_LOG(
"vmaFreeMemoryPages");
16159 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16161 #if VMA_RECORDING_ENABLED 16162 if(allocator->GetRecorder() != VMA_NULL)
16164 allocator->GetRecorder()->RecordFreeMemoryPages(
16165 allocator->GetCurrentFrameIndex(),
16166 (uint64_t)allocationCount,
16171 allocator->FreeMemory(allocationCount, pAllocations);
16177 VkDeviceSize newSize)
16179 VMA_ASSERT(allocator && allocation);
16181 VMA_DEBUG_LOG(
"vmaResizeAllocation");
16183 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16185 #if VMA_RECORDING_ENABLED 16186 if(allocator->GetRecorder() != VMA_NULL)
16188 allocator->GetRecorder()->RecordResizeAllocation(
16189 allocator->GetCurrentFrameIndex(),
16195 return allocator->ResizeAllocation(allocation, newSize);
16203 VMA_ASSERT(allocator && allocation && pAllocationInfo);
16205 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16207 #if VMA_RECORDING_ENABLED 16208 if(allocator->GetRecorder() != VMA_NULL)
16210 allocator->GetRecorder()->RecordGetAllocationInfo(
16211 allocator->GetCurrentFrameIndex(),
16216 allocator->GetAllocationInfo(allocation, pAllocationInfo);
16223 VMA_ASSERT(allocator && allocation);
16225 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16227 #if VMA_RECORDING_ENABLED 16228 if(allocator->GetRecorder() != VMA_NULL)
16230 allocator->GetRecorder()->RecordTouchAllocation(
16231 allocator->GetCurrentFrameIndex(),
16236 return allocator->TouchAllocation(allocation);
16244 VMA_ASSERT(allocator && allocation);
16246 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16248 allocation->SetUserData(allocator, pUserData);
16250 #if VMA_RECORDING_ENABLED 16251 if(allocator->GetRecorder() != VMA_NULL)
16253 allocator->GetRecorder()->RecordSetAllocationUserData(
16254 allocator->GetCurrentFrameIndex(),
16265 VMA_ASSERT(allocator && pAllocation);
16267 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16269 allocator->CreateLostAllocation(pAllocation);
16271 #if VMA_RECORDING_ENABLED 16272 if(allocator->GetRecorder() != VMA_NULL)
16274 allocator->GetRecorder()->RecordCreateLostAllocation(
16275 allocator->GetCurrentFrameIndex(),
16286 VMA_ASSERT(allocator && allocation && ppData);
16288 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16290 VkResult res = allocator->Map(allocation, ppData);
16292 #if VMA_RECORDING_ENABLED 16293 if(allocator->GetRecorder() != VMA_NULL)
16295 allocator->GetRecorder()->RecordMapMemory(
16296 allocator->GetCurrentFrameIndex(),
16308 VMA_ASSERT(allocator && allocation);
16310 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16312 #if VMA_RECORDING_ENABLED 16313 if(allocator->GetRecorder() != VMA_NULL)
16315 allocator->GetRecorder()->RecordUnmapMemory(
16316 allocator->GetCurrentFrameIndex(),
16321 allocator->Unmap(allocation);
16326 VMA_ASSERT(allocator && allocation);
16328 VMA_DEBUG_LOG(
"vmaFlushAllocation");
16330 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16332 allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16334 #if VMA_RECORDING_ENABLED 16335 if(allocator->GetRecorder() != VMA_NULL)
16337 allocator->GetRecorder()->RecordFlushAllocation(
16338 allocator->GetCurrentFrameIndex(),
16339 allocation, offset, size);
16346 VMA_ASSERT(allocator && allocation);
16348 VMA_DEBUG_LOG(
"vmaInvalidateAllocation");
16350 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16352 allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16354 #if VMA_RECORDING_ENABLED 16355 if(allocator->GetRecorder() != VMA_NULL)
16357 allocator->GetRecorder()->RecordInvalidateAllocation(
16358 allocator->GetCurrentFrameIndex(),
16359 allocation, offset, size);
16366 VMA_ASSERT(allocator);
16368 VMA_DEBUG_LOG(
"vmaCheckCorruption");
16370 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16372 return allocator->CheckCorruption(memoryTypeBits);
16378 size_t allocationCount,
16379 VkBool32* pAllocationsChanged,
16389 if(pDefragmentationInfo != VMA_NULL)
16403 if(res == VK_NOT_READY)
16416 VMA_ASSERT(allocator && pInfo && pContext);
16427 VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->
poolCount, pInfo->
pPools));
16429 VMA_DEBUG_LOG(
"vmaDefragmentationBegin");
16431 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16433 VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16435 #if VMA_RECORDING_ENABLED 16436 if(allocator->GetRecorder() != VMA_NULL)
16438 allocator->GetRecorder()->RecordDefragmentationBegin(
16439 allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16450 VMA_ASSERT(allocator);
16452 VMA_DEBUG_LOG(
"vmaDefragmentationEnd");
16454 if(context != VK_NULL_HANDLE)
16456 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16458 #if VMA_RECORDING_ENABLED 16459 if(allocator->GetRecorder() != VMA_NULL)
16461 allocator->GetRecorder()->RecordDefragmentationEnd(
16462 allocator->GetCurrentFrameIndex(), context);
16466 return allocator->DefragmentationEnd(context);
16479 VMA_ASSERT(allocator && allocation && buffer);
16481 VMA_DEBUG_LOG(
"vmaBindBufferMemory");
16483 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16485 return allocator->BindBufferMemory(allocation, buffer);
16493 VMA_ASSERT(allocator && allocation && image);
16495 VMA_DEBUG_LOG(
"vmaBindImageMemory");
16497 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16499 return allocator->BindImageMemory(allocation, image);
16504 const VkBufferCreateInfo* pBufferCreateInfo,
16510 VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16512 if(pBufferCreateInfo->size == 0)
16514 return VK_ERROR_VALIDATION_FAILED_EXT;
16517 VMA_DEBUG_LOG(
"vmaCreateBuffer");
16519 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16521 *pBuffer = VK_NULL_HANDLE;
16522 *pAllocation = VK_NULL_HANDLE;
16525 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16526 allocator->m_hDevice,
16528 allocator->GetAllocationCallbacks(),
16533 VkMemoryRequirements vkMemReq = {};
16534 bool requiresDedicatedAllocation =
false;
16535 bool prefersDedicatedAllocation =
false;
16536 allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16537 requiresDedicatedAllocation, prefersDedicatedAllocation);
16541 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16543 VMA_ASSERT(vkMemReq.alignment %
16544 allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16546 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16548 VMA_ASSERT(vkMemReq.alignment %
16549 allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16551 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16553 VMA_ASSERT(vkMemReq.alignment %
16554 allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16558 res = allocator->AllocateMemory(
16560 requiresDedicatedAllocation,
16561 prefersDedicatedAllocation,
16564 *pAllocationCreateInfo,
16565 VMA_SUBALLOCATION_TYPE_BUFFER,
16569 #if VMA_RECORDING_ENABLED 16570 if(allocator->GetRecorder() != VMA_NULL)
16572 allocator->GetRecorder()->RecordCreateBuffer(
16573 allocator->GetCurrentFrameIndex(),
16574 *pBufferCreateInfo,
16575 *pAllocationCreateInfo,
16583 res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16587 #if VMA_STATS_STRING_ENABLED 16588 (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16590 if(pAllocationInfo != VMA_NULL)
16592 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16597 allocator->FreeMemory(
16600 *pAllocation = VK_NULL_HANDLE;
16601 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16602 *pBuffer = VK_NULL_HANDLE;
16605 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16606 *pBuffer = VK_NULL_HANDLE;
16617 VMA_ASSERT(allocator);
16619 if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16624 VMA_DEBUG_LOG(
"vmaDestroyBuffer");
16626 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16628 #if VMA_RECORDING_ENABLED 16629 if(allocator->GetRecorder() != VMA_NULL)
16631 allocator->GetRecorder()->RecordDestroyBuffer(
16632 allocator->GetCurrentFrameIndex(),
16637 if(buffer != VK_NULL_HANDLE)
16639 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16642 if(allocation != VK_NULL_HANDLE)
16644 allocator->FreeMemory(
16652 const VkImageCreateInfo* pImageCreateInfo,
16658 VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16660 if(pImageCreateInfo->extent.width == 0 ||
16661 pImageCreateInfo->extent.height == 0 ||
16662 pImageCreateInfo->extent.depth == 0 ||
16663 pImageCreateInfo->mipLevels == 0 ||
16664 pImageCreateInfo->arrayLayers == 0)
16666 return VK_ERROR_VALIDATION_FAILED_EXT;
16669 VMA_DEBUG_LOG(
"vmaCreateImage");
16671 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16673 *pImage = VK_NULL_HANDLE;
16674 *pAllocation = VK_NULL_HANDLE;
16677 VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16678 allocator->m_hDevice,
16680 allocator->GetAllocationCallbacks(),
16684 VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16685 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16686 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16689 VkMemoryRequirements vkMemReq = {};
16690 bool requiresDedicatedAllocation =
false;
16691 bool prefersDedicatedAllocation =
false;
16692 allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16693 requiresDedicatedAllocation, prefersDedicatedAllocation);
16695 res = allocator->AllocateMemory(
16697 requiresDedicatedAllocation,
16698 prefersDedicatedAllocation,
16701 *pAllocationCreateInfo,
16706 #if VMA_RECORDING_ENABLED 16707 if(allocator->GetRecorder() != VMA_NULL)
16709 allocator->GetRecorder()->RecordCreateImage(
16710 allocator->GetCurrentFrameIndex(),
16712 *pAllocationCreateInfo,
16720 res = allocator->BindImageMemory(*pAllocation, *pImage);
16724 #if VMA_STATS_STRING_ENABLED 16725 (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16727 if(pAllocationInfo != VMA_NULL)
16729 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16734 allocator->FreeMemory(
16737 *pAllocation = VK_NULL_HANDLE;
16738 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16739 *pImage = VK_NULL_HANDLE;
16742 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16743 *pImage = VK_NULL_HANDLE;
16754 VMA_ASSERT(allocator);
16756 if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16761 VMA_DEBUG_LOG(
"vmaDestroyImage");
16763 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16765 #if VMA_RECORDING_ENABLED 16766 if(allocator->GetRecorder() != VMA_NULL)
16768 allocator->GetRecorder()->RecordDestroyImage(
16769 allocator->GetCurrentFrameIndex(),
16774 if(image != VK_NULL_HANDLE)
16776 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16778 if(allocation != VK_NULL_HANDLE)
16780 allocator->FreeMemory(
16786 #endif // #ifdef VMA_IMPLEMENTATION PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1744
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2042
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1802
diff --git a/src/Common.cpp b/src/Common.cpp
index ea7b9a1..790cdda 100644
--- a/src/Common.cpp
+++ b/src/Common.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#include "Common.h"
#ifdef _WIN32
diff --git a/src/Common.h b/src/Common.h
index 714fa6a..f5954c5 100644
--- a/src/Common.h
+++ b/src/Common.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#ifndef COMMON_H_
#define COMMON_H_
diff --git a/src/Shaders/Shader.frag b/src/Shaders/Shader.frag
index 6f1f9d3..4060483 100644
--- a/src/Shaders/Shader.frag
+++ b/src/Shaders/Shader.frag
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/Shaders/Shader.vert b/src/Shaders/Shader.vert
index c6e6cab..06ff262 100644
--- a/src/Shaders/Shader.vert
+++ b/src/Shaders/Shader.vert
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/Shaders/SparseBindingTest.comp b/src/Shaders/SparseBindingTest.comp
index 21c41ab..7c8889e 100644
--- a/src/Shaders/SparseBindingTest.comp
+++ b/src/Shaders/SparseBindingTest.comp
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2018-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/SparseBindingTest.cpp b/src/SparseBindingTest.cpp
index 3722523..8fbd5e9 100644
--- a/src/SparseBindingTest.cpp
+++ b/src/SparseBindingTest.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#include "Common.h"
#include "SparseBindingTest.h"
diff --git a/src/SparseBindingTest.h b/src/SparseBindingTest.h
index 8637c9c..69b95d6 100644
--- a/src/SparseBindingTest.h
+++ b/src/SparseBindingTest.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#pragma once
#ifdef _WIN32
diff --git a/src/Tests.cpp b/src/Tests.cpp
index 058fedb..6ad99fb 100644
--- a/src/Tests.cpp
+++ b/src/Tests.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#include "Tests.h"
#include "VmaUsage.h"
#include "Common.h"
diff --git a/src/Tests.h b/src/Tests.h
index 9da4f6f..df79318 100644
--- a/src/Tests.h
+++ b/src/Tests.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#ifndef TESTS_H_
#define TESTS_H_
diff --git a/src/VmaReplay/Common.cpp b/src/VmaReplay/Common.cpp
index a7c723f..104d390 100644
--- a/src/VmaReplay/Common.cpp
+++ b/src/VmaReplay/Common.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#include "Common.h"
bool StrRangeToPtrList(const StrRange& s, std::vector
& out)
diff --git a/src/VmaReplay/Common.h b/src/VmaReplay/Common.h
index d0548bf..fa2dfe9 100644
--- a/src/VmaReplay/Common.h
+++ b/src/VmaReplay/Common.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#pragma once
#include "VmaUsage.h"
diff --git a/src/VmaReplay/Constants.cpp b/src/VmaReplay/Constants.cpp
index 9f19d13..e353f49 100644
--- a/src/VmaReplay/Constants.cpp
+++ b/src/VmaReplay/Constants.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#include "Common.h"
#include "Constants.h"
diff --git a/src/VmaReplay/Constants.h b/src/VmaReplay/Constants.h
index 2c58f68..e479168 100644
--- a/src/VmaReplay/Constants.h
+++ b/src/VmaReplay/Constants.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#pragma once
extern const int RESULT_EXCEPTION;
diff --git a/src/VmaReplay/VmaReplay.cpp b/src/VmaReplay/VmaReplay.cpp
index 4228009..31a53ed 100644
--- a/src/VmaReplay/VmaReplay.cpp
+++ b/src/VmaReplay/VmaReplay.cpp
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2018-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/VmaReplay/VmaUsage.cpp b/src/VmaReplay/VmaUsage.cpp
index d2d035b..ee781cc 100644
--- a/src/VmaReplay/VmaUsage.cpp
+++ b/src/VmaReplay/VmaUsage.cpp
@@ -1,2 +1,24 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#define VMA_IMPLEMENTATION
#include "VmaUsage.h"
diff --git a/src/VmaReplay/VmaUsage.h b/src/VmaReplay/VmaUsage.h
index 31c2b59..ec00a29 100644
--- a/src/VmaReplay/VmaUsage.h
+++ b/src/VmaReplay/VmaUsage.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#pragma once
#define NOMINMAX
diff --git a/src/VmaUsage.cpp b/src/VmaUsage.cpp
index 2ebf69c..6dc7d7b 100644
--- a/src/VmaUsage.cpp
+++ b/src/VmaUsage.cpp
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
/*
In exactly one CPP file define macro VMA_IMPLEMENTATION and then include
vk_mem_alloc.h to include definitions of its internal implementation
diff --git a/src/VmaUsage.h b/src/VmaUsage.h
index f785ba2..b8761ad 100644
--- a/src/VmaUsage.h
+++ b/src/VmaUsage.h
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
#ifndef VMA_USAGE_H_
#define VMA_USAGE_H_
diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp
index f4b0f27..7884048 100644
--- a/src/VulkanSample.cpp
+++ b/src/VulkanSample.cpp
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h
index 4a621f6..c49b8d4 100644
--- a/src/vk_mem_alloc.h
+++ b/src/vk_mem_alloc.h
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal