diff --git a/docs/html/index.html b/docs/html/index.html index 447953c..143454b 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -133,6 +133,8 @@ memReq.usage = VMA_MEMORY_USAGE_GPU_ONLY; VkBuffer buffer; vmaCreateBuffer(allocator, &bufferInfo, &memReq, &buffer, nullptr, nullptr); +

When no longer needed, destroy your buffer or image using vmaDestroyBuffer() / vmaDestroyImage(). This function would also free memory bound to it.

+
vmaDestroyBuffer(allocator, buffer);
 

Configuration

Set VMA_STATS_STRING_ENABLED macro in vk_mem_alloc.h to 0 or 1 to disable/enable compilation of code for dumping internal allocator state to string in JSON format.

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 0f685cf..de40d22 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,63 +62,63 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
149 #include <vulkan/vulkan.h>
150 
152 
156 VK_DEFINE_HANDLE(VmaAllocator)
157 
158 typedef struct VmaAllocatorCreateInfo
160 {
162 
163  VkPhysicalDevice physicalDevice;
165 
166  VkDevice device;
168 
171 
174 
175  const VkAllocationCallbacks* pAllocationCallbacks;
177 
179 VkResult vmaCreateAllocator(
180  const VmaAllocatorCreateInfo* pCreateInfo,
181  VmaAllocator* pAllocator);
182 
185  VmaAllocator allocator);
186 
192  VmaAllocator allocator,
193  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
194 
200  VmaAllocator allocator,
201  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
202 
210  VmaAllocator allocator,
211  uint32_t memoryTypeIndex,
212  VkMemoryPropertyFlags* pFlags);
213 
214 typedef struct VmaStatInfo
215 {
216  uint32_t AllocationCount;
219  VkDeviceSize UsedBytes;
220  VkDeviceSize UnusedBytes;
221  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
222  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
223 } VmaStatInfo;
224 
226 struct VmaStats
227 {
228  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
229  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
231 };
232 
234 void vmaCalculateStats(
235  VmaAllocator allocator,
236  VmaStats* pStats);
237 
238 #define VMA_STATS_STRING_ENABLED 1
239 
240 #if VMA_STATS_STRING_ENABLED
241 
243 
246  VmaAllocator allocator,
247  char** ppStatsString,
248  VkBool32 detailedMap);
249 
250 void vmaFreeStatsString(
251  VmaAllocator allocator,
252  char* pStatsString);
253 
254 #endif // #if VMA_STATS_STRING_ENABLED
255 
258 
263 typedef enum VmaMemoryUsage
264 {
277 
278 typedef struct VmaMemoryRequirements
279 {
288  VkBool32 ownMemory;
297  VkMemoryPropertyFlags requiredFlags;
302  VkMemoryPropertyFlags preferredFlags;
309  VkBool32 neverAllocate;
311 
326 VkResult vmaFindMemoryTypeIndex(
327  VmaAllocator allocator,
328  uint32_t memoryTypeBits,
329  const VmaMemoryRequirements* pMemoryRequirements,
330  uint32_t* pMemoryTypeIndex);
331 
334 
351 VkResult vmaAllocateMemory(
352  VmaAllocator allocator,
353  const VkMemoryRequirements* pVkMemoryRequirements,
354  const VmaMemoryRequirements* pVmaMemoryRequirements,
355  VkMappedMemoryRange* pMemory,
356  uint32_t* pMemoryTypeIndex);
357 
366  VmaAllocator allocator,
367  VkBuffer buffer,
368  const VmaMemoryRequirements* pMemoryRequirements,
369  VkMappedMemoryRange* pMemory,
370  uint32_t* pMemoryTypeIndex);
371 
374  VmaAllocator allocator,
375  VkImage image,
376  const VmaMemoryRequirements* pMemoryRequirements,
377  VkMappedMemoryRange* pMemory,
378  uint32_t* pMemoryTypeIndex);
379 
381 void vmaFreeMemory(
382  VmaAllocator allocator,
383  const VkMappedMemoryRange* pMemory);
384 
390 VkResult vmaMapMemory(
391  VmaAllocator allocator,
392  const VkMappedMemoryRange* pMemory,
393  void** ppData);
394 
395 void vmaUnmapMemory(
396  VmaAllocator allocator,
397  const VkMappedMemoryRange* pMemory);
398 
401 
423 VkResult vmaCreateBuffer(
424  VmaAllocator allocator,
425  const VkBufferCreateInfo* pCreateInfo,
426  const VmaMemoryRequirements* pMemoryRequirements,
427  VkBuffer* pBuffer,
428  VkMappedMemoryRange* pMemory,
429  uint32_t* pMemoryTypeIndex);
430 
431 void vmaDestroyBuffer(
432  VmaAllocator allocator,
433  VkBuffer buffer);
434 
436 VkResult vmaCreateImage(
437  VmaAllocator allocator,
438  const VkImageCreateInfo* pCreateInfo,
439  const VmaMemoryRequirements* pMemoryRequirements,
440  VkImage* pImage,
441  VkMappedMemoryRange* pMemory,
442  uint32_t* pMemoryTypeIndex);
443 
444 void vmaDestroyImage(
445  VmaAllocator allocator,
446  VkImage image);
447 
450 #ifdef VMA_IMPLEMENTATION
451 
452 #include <cstdlib>
453 
454 /*******************************************************************************
455 CONFIGURATION
456 
457 Change these definitions depending on your environment.
458 */
459 
460 #define VMA_USE_STL_CONTAINERS 0
461 
462 /* Set this macro to 1 to make the library including and using STL containers:
463 std::pair, std::vector, std::list, std::unordered_map.
464 
465 Set it to 0 or undefined to make the library using its own implementation of
466 the containers.
467 */
468 #if VMA_USE_STL_CONTAINERS
469 #define VMA_USE_STL_VECTOR 1
470 #define VMA_USE_STL_UNORDERED_MAP 1
471 #define VMA_USE_STL_LIST 1
472 #endif
473 
474 #if VMA_USE_STL_VECTOR
475 #include <vector>
476 #endif
477 
478 #if VMA_USE_STL_UNORDERED_MAP
479 #include <unordered_map>
480 #endif
481 
482 #if VMA_USE_STL_LIST
483 #include <list>
484 #endif
485 
486 /*
487 Following headers are used in this CONFIGURATION section only, so feel free to
488 remove them if not needed.
489 */
490 #include <cassert> // for assert
491 #include <algorithm> // for min, max
492 #include <mutex> // for std::mutex
493 
494 #ifdef _DEBUG
495  // Normal assert to check for programmer's errors, especially in Debug configuration.
496  #define VMA_ASSERT(expr) assert(expr)
497  // Assert that will be called very often, like inside data structures e.g. operator[].
498  // Making it non-empty can make program slow.
499  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
500 #else
501  #define VMA_ASSERT(expr)
502  #define VMA_HEAVY_ASSERT(expr)
503 #endif
504 
505 // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
506 #define VMA_NULL nullptr
507 
508 #define VMA_ALIGN_OF(type) (__alignof(type))
509 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
510 #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
511 
512 #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
513 #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
514 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
515 
516 #define VMA_DEBUG_LOG(format, ...)
517 /*
518 #define VMA_DEBUG_LOG(format, ...) do { \
519  printf(format, __VA_ARGS__); \
520  printf("\n"); \
521 } while(false)
522 */
523 
524 #if VMA_STATS_STRING_ENABLED
525 
526 static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
527 {
528  _ultoa_s(num, outStr, strLen, 10);
529 }
530 static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
531 {
532  _ui64toa_s(num, outStr, strLen, 10);
533 }
534 
535 #endif // #if VMA_STATS_STRING_ENABLED
536 
537 class VmaMutex
538 {
539 public:
540  VmaMutex() { }
541  ~VmaMutex() { }
542  void Lock() { m_Mutex.lock(); }
543  void Unlock() { m_Mutex.unlock(); }
544 private:
545  std::mutex m_Mutex;
546 };
547 
548 /*
549 Main parameter for function assessing how good is a free suballocation for a new
550 allocation request.
551 
552 - Set to true to use Best-Fit algorithm - prefer smaller blocks, as close to the
553  size of requested allocations as possible.
554 - Set to false to use Worst-Fit algorithm - prefer larger blocks, as large as
555  possible.
556 
557 Experiments in special testing environment showed that Best-Fit algorithm is
558 better.
559 */
560 static const bool VMA_BEST_FIT = true;
561 
562 /*
563 Every object will have its own allocation.
564 Enable for debugging purposes only.
565 */
566 static const bool VMA_DEBUG_ALWAYS_OWN_MEMORY = false;
567 
568 /*
569 Minimum alignment of all suballocations, in bytes.
570 Set to more than 1 for debugging purposes only. Must be power of two.
571 */
572 static const VkDeviceSize VMA_DEBUG_ALIGNMENT = 1;
573 
574 /*
575 Minimum margin between suballocations, in bytes.
576 Set nonzero for debugging purposes only.
577 */
578 static const VkDeviceSize VMA_DEBUG_MARGIN = 0;
579 
580 /*
581 Set this to 1 for debugging purposes only, to enable single mutex protecting all
582 entry calls to the library. Can be useful for debugging multithreading issues.
583 */
584 #define VMA_DEBUG_GLOBAL_MUTEX 0
585 
586 /*
587 Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
588 Set to more than 1 for debugging purposes only. Must be power of two.
589 */
590 static const VkDeviceSize VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY = 1;
591 
592 // Maximum size of a memory heap in Vulkan to consider it "small".
593 static const VkDeviceSize VMA_SMALL_HEAP_MAX_SIZE = 512 * 1024 * 1024;
594 // Default size of a block allocated as single VkDeviceMemory from a "large" heap.
595 static const VkDeviceSize VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE = 256 * 1024 * 1024;
596 // Default size of a block allocated as single VkDeviceMemory from a "small" heap.
597 static const VkDeviceSize VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE = 64 * 1024 * 1024;
598 
599 /*******************************************************************************
600 END OF CONFIGURATION
601 */
602 
603 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
604  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
605 
606 // Returns number of bits set to 1 in (v).
607 static inline uint32_t CountBitsSet(uint32_t v)
608 {
609  uint32_t c = v - ((v >> 1) & 0x55555555);
610  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
611  c = ((c >> 4) + c) & 0x0F0F0F0F;
612  c = ((c >> 8) + c) & 0x00FF00FF;
613  c = ((c >> 16) + c) & 0x0000FFFF;
614  return c;
615 }
616 
617 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
618 // Use types like uint32_t, uint64_t as T.
619 template <typename T>
620 static inline T VmaAlignUp(T val, T align)
621 {
622  return (val + align - 1) / align * align;
623 }
624 
625 // Division with mathematical rounding to nearest number.
626 template <typename T>
627 inline T VmaRoundDiv(T x, T y)
628 {
629  return (x + (y / (T)2)) / y;
630 }
631 /*
632 Returns true if two memory blocks occupy overlapping pages.
633 ResourceA must be in less memory offset than ResourceB.
634 
635 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
636 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
637 */
638 static inline bool VmaBlocksOnSamePage(
639  VkDeviceSize resourceAOffset,
640  VkDeviceSize resourceASize,
641  VkDeviceSize resourceBOffset,
642  VkDeviceSize pageSize)
643 {
644  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
645  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
646  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
647  VkDeviceSize resourceBStart = resourceBOffset;
648  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
649  return resourceAEndPage == resourceBStartPage;
650 }
651 
652 enum VmaSuballocationType
653 {
654  VMA_SUBALLOCATION_TYPE_FREE = 0,
655  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
656  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
657  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
658  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
659  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
660  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
661 };
662 
663 /*
664 Returns true if given suballocation types could conflict and must respect
665 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
666 or linear image and another one is optimal image. If type is unknown, behave
667 conservatively.
668 */
669 static inline bool VmaIsBufferImageGranularityConflict(
670  VmaSuballocationType suballocType1,
671  VmaSuballocationType suballocType2)
672 {
673  if(suballocType1 > suballocType2)
674  VMA_SWAP(suballocType1, suballocType2);
675 
676  switch(suballocType1)
677  {
678  case VMA_SUBALLOCATION_TYPE_FREE:
679  return false;
680  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
681  return true;
682  case VMA_SUBALLOCATION_TYPE_BUFFER:
683  return
684  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
685  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
686  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
687  return
688  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
689  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
690  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
691  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
692  return
693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
694  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
695  return false;
696  default:
697  VMA_ASSERT(0);
698  return true;
699  }
700 }
701 
702 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
703 struct VmaMutexLock
704 {
705 public:
706  VmaMutexLock(VmaMutex& mutex) : m_Mutex(mutex) { mutex.Lock(); }
707  ~VmaMutexLock() { m_Mutex.Unlock(); }
708 
709 private:
710  VmaMutex& m_Mutex;
711 };
712 
713 #if VMA_DEBUG_GLOBAL_MUTEX
714  static VmaMutex gDebugGlobalMutex;
715  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
716 #else
717  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
718 #endif
719 
720 // Minimum size of a free suballocation to register it in the free suballocation collection.
721 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
722 
723 /*
724 Performs binary search and returns iterator to first element that is greater or
725 equal to (key), according to comparison (cmp).
726 
727 Cmp should return true if first argument is less than second argument.
728 
729 Returned value is the found element, if present in the collection or place where
730 new element with value (key) should be inserted.
731 */
732 template <typename IterT, typename KeyT, typename CmpT>
733 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
734 {
735  size_t down = 0, up = (end - beg);
736  while(down < up)
737  {
738  const size_t mid = (down + up) / 2;
739  if(cmp(*(beg+mid), key))
740  down = mid + 1;
741  else
742  up = mid;
743  }
744  return beg + down;
745 }
746 
748 // Memory allocation
749 
750 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
751 {
752  if((pAllocationCallbacks != VMA_NULL) &&
753  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
754  {
755  return (*pAllocationCallbacks->pfnAllocation)(
756  pAllocationCallbacks->pUserData,
757  size,
758  alignment,
759  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
760  }
761  else
762  {
763  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
764  }
765 }
766 
767 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
768 {
769  if((pAllocationCallbacks != VMA_NULL) &&
770  (pAllocationCallbacks->pfnFree != VMA_NULL))
771  {
772  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
773  }
774  else
775  {
776  VMA_SYSTEM_FREE(ptr);
777  }
778 }
779 
780 template<typename T>
781 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
782 {
783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
784 }
785 
786 template<typename T>
787 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
788 {
789  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
790 }
791 
792 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
793 
794 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
795 
796 template<typename T>
797 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
798 {
799  ptr->~T();
800  VmaFree(pAllocationCallbacks, ptr);
801 }
802 
803 template<typename T>
804 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
805 {
806  if(ptr != VMA_NULL)
807  {
808  for(size_t i = count; i--; )
809  ptr[i].~T();
810  VmaFree(pAllocationCallbacks, ptr);
811  }
812 }
813 
814 // STL-compatible allocator.
815 template<typename T>
816 class VmaStlAllocator
817 {
818 public:
819  const VkAllocationCallbacks* const m_pCallbacks;
820  typedef T value_type;
821 
822  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
823  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
824 
825  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
826  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
827 
828  template<typename U>
829  bool operator==(const VmaStlAllocator<U>& rhs) const
830  {
831  return m_pCallbacks == rhs.m_pCallbacks;
832  }
833  template<typename U>
834  bool operator!=(const VmaStlAllocator<U>& rhs) const
835  {
836  return m_pCallbacks != rhs.m_pCallbacks;
837  }
838 };
839 
840 #if VMA_USE_STL_VECTOR
841 
842 #define VmaVector std::vector
843 
844 template<typename T, typename allocatorT>
845 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
846 {
847  vec.insert(vec.begin() + index, item);
848 }
849 
850 template<typename T, typename allocatorT>
851 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
852 {
853  vec.erase(vec.begin() + index);
854 }
855 
856 #else // #if VMA_USE_STL_VECTOR
857 
858 /* Class with interface compatible with subset of std::vector.
859 T must be POD because constructors and destructors are not called and memcpy is
860 used for these objects. */
861 template<typename T, typename AllocatorT>
862 class VmaVector
863 {
864 public:
865  VmaVector(AllocatorT& allocator) :
866  m_Allocator(allocator),
867  m_pArray(VMA_NULL),
868  m_Count(0),
869  m_Capacity(0)
870  {
871  }
872 
873  VmaVector(size_t count, AllocatorT& allocator) :
874  m_Allocator(allocator),
875  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
876  m_Count(count),
877  m_Capacity(count)
878  {
879  }
880 
881  VmaVector(const VmaVector<T, AllocatorT>& src) :
882  m_Allocator(src.m_Allocator),
883  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, src.m_Count) : VMA_NULL),
884  m_Count(src.m_Count),
885  m_Capacity(src.m_Count)
886  {
887  if(m_Count != 0)
888  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
889  }
890 
891  ~VmaVector()
892  {
893  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
894  }
895 
896  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
897  {
898  if(&rhs != this)
899  {
900  Resize(rhs.m_Count);
901  if(m_Count != 0)
902  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
903  }
904  return *this;
905  }
906 
907  bool empty() const { return m_Count == 0; }
908  size_t size() const { return m_Count; }
909  T* data() { return m_pArray; }
910  const T* data() const { return m_pArray; }
911 
912  T& operator[](size_t index)
913  {
914  VMA_HEAVY_ASSERT(index < m_Count);
915  return m_pArray[index];
916  }
917  const T& operator[](size_t index) const
918  {
919  VMA_HEAVY_ASSERT(index < m_Count);
920  return m_pArray[index];
921  }
922 
923  T& front()
924  {
925  VMA_HEAVY_ASSERT(m_Count > 0);
926  return m_pArray[0];
927  }
928  const T& front() const
929  {
930  VMA_HEAVY_ASSERT(m_Count > 0);
931  return m_pArray[0];
932  }
933  T& back()
934  {
935  VMA_HEAVY_ASSERT(m_Count > 0);
936  return m_pArray[m_Count - 1];
937  }
938  const T& back() const
939  {
940  VMA_HEAVY_ASSERT(m_Count > 0);
941  return m_pArray[m_Count - 1];
942  }
943 
944  void reserve(size_t newCapacity, bool freeMemory = false)
945  {
946  newCapacity = VMA_MAX(newCapacity, m_Count);
947 
948  if((newCapacity < m_Capacity) && !freeMemory)
949  newCapacity = m_Capacity;
950 
951  if(newCapacity != m_Capacity)
952  {
953  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_hAllocator, newCapacity) : VMA_NULL;
954  if(m_Count != 0)
955  memcpy(newArray, m_pArray, m_Count * sizeof(T));
956  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
957  m_Capacity = newCapacity;
958  m_pArray = newArray;
959  }
960  }
961 
962  void resize(size_t newCount, bool freeMemory = false)
963  {
964  size_t newCapacity = m_Capacity;
965  if(newCount > m_Capacity)
966  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
967  else if(freeMemory)
968  newCapacity = newCount;
969 
970  if(newCapacity != m_Capacity)
971  {
972  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
973  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
974  if(elementsToCopy != 0)
975  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
976  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
977  m_Capacity = newCapacity;
978  m_pArray = newArray;
979  }
980 
981  m_Count = newCount;
982  }
983 
984  void clear(bool freeMemory = false)
985  {
986  resize(0, freeMemory);
987  }
988 
989  void insert(size_t index, const T& src)
990  {
991  VMA_HEAVY_ASSERT(index <= m_Count);
992  const size_t oldCount = size();
993  resize(oldCount + 1);
994  if(index < oldCount)
995  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
996  m_pArray[index] = src;
997  }
998 
999  void remove(size_t index)
1000  {
1001  VMA_HEAVY_ASSERT(index < m_Count);
1002  const size_t oldCount = size();
1003  if(index < oldCount - 1)
1004  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1005  resize(oldCount - 1);
1006  }
1007 
1008  void push_back(const T& src)
1009  {
1010  const size_t newIndex = size();
1011  resize(newIndex + 1);
1012  m_pArray[newIndex] = src;
1013  }
1014 
1015  void pop_back()
1016  {
1017  VMA_HEAVY_ASSERT(m_Count > 0);
1018  resize(size() - 1);
1019  }
1020 
1021  void push_front(const T& src)
1022  {
1023  insert(0, src);
1024  }
1025 
1026  void pop_front()
1027  {
1028  VMA_HEAVY_ASSERT(m_Count > 0);
1029  remove(0);
1030  }
1031 
1032  typedef T* iterator;
1033 
1034  iterator begin() { return m_pArray; }
1035  iterator end() { return m_pArray + m_Count; }
1036 
1037 private:
1038  AllocatorT m_Allocator;
1039  T* m_pArray;
1040  size_t m_Count;
1041  size_t m_Capacity;
1042 };
1043 
1044 template<typename T, typename allocatorT>
1045 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1046 {
1047  vec.insert(index, item);
1048 }
1049 
1050 template<typename T, typename allocatorT>
1051 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1052 {
1053  vec.remove(index);
1054 }
1055 
1056 #endif // #if VMA_USE_STL_VECTOR
1057 
1059 // class VmaPoolAllocator
1060 
1061 /*
1062 Allocator for objects of type T using a list of arrays (pools) to speed up
1063 allocation. Number of elements that can be allocated is not bounded because
1064 allocator can create multiple blocks.
1065 */
1066 template<typename T>
1067 class VmaPoolAllocator
1068 {
1069 public:
1070  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1071  ~VmaPoolAllocator();
1072  void Clear();
1073  T* Alloc();
1074  void Free(T* ptr);
1075 
1076 private:
1077  union Item
1078  {
1079  uint32_t NextFreeIndex;
1080  T Value;
1081  };
1082 
1083  struct ItemBlock
1084  {
1085  Item* pItems;
1086  uint32_t FirstFreeIndex;
1087  };
1088 
1089  const VkAllocationCallbacks* m_pAllocationCallbacks;
1090  size_t m_ItemsPerBlock;
1091  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1092 
1093  ItemBlock& CreateNewBlock();
1094 };
1095 
1096 template<typename T>
1097 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1098  m_pAllocationCallbacks(pAllocationCallbacks),
1099  m_ItemsPerBlock(itemsPerBlock),
1100  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1101 {
1102  VMA_ASSERT(itemsPerBlock > 0);
1103 }
1104 
1105 template<typename T>
1106 VmaPoolAllocator<T>::~VmaPoolAllocator()
1107 {
1108  Clear();
1109 }
1110 
1111 template<typename T>
1112 void VmaPoolAllocator<T>::Clear()
1113 {
1114  for(size_t i = m_ItemBlocks.size(); i--; )
1115  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1116  m_ItemBlocks.clear();
1117 }
1118 
1119 template<typename T>
1120 T* VmaPoolAllocator<T>::Alloc()
1121 {
1122  for(size_t i = m_ItemBlocks.size(); i--; )
1123  {
1124  ItemBlock& block = m_ItemBlocks[i];
1125  // This block has some free items: Use first one.
1126  if(block.FirstFreeIndex != UINT_MAX)
1127  {
1128  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1129  block.FirstFreeIndex = pItem->NextFreeIndex;
1130  return &pItem->Value;
1131  }
1132  }
1133 
1134  // No block has free item: Create new one and use it.
1135  ItemBlock& newBlock = CreateNewBlock();
1136  Item* const pItem = &newBlock.pItems[0];
1137  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1138  return &pItem->Value;
1139 }
1140 
1141 template<typename T>
1142 void VmaPoolAllocator<T>::Free(T* ptr)
1143 {
1144  // Search all memory blocks to find ptr.
1145  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1146  {
1147  ItemBlock& block = m_ItemBlocks[i];
1148 
1149  // Casting to union.
1150  Item* pItemPtr;
1151  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1152 
1153  // Check if pItemPtr is in address range of this block.
1154  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1155  {
1156  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1157  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1158  block.FirstFreeIndex = index;
1159  return;
1160  }
1161  }
1162  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1163 }
1164 
1165 template<typename T>
1166 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1167 {
1168  ItemBlock newBlock = {
1169  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1170 
1171  m_ItemBlocks.push_back(newBlock);
1172 
1173  // Setup singly-linked list of all free items in this block.
1174  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1175  newBlock.pItems[i].NextFreeIndex = i + 1;
1176  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT_MAX;
1177  return m_ItemBlocks.back();
1178 }
1179 
1181 // class VmaRawList, VmaList
1182 
1183 #if VMA_USE_STL_LIST
1184 
1185 #define VmaList std::list
1186 
1187 #else // #if VMA_USE_STL_LIST
1188 
1189 template<typename T>
1190 struct VmaListItem
1191 {
1192  VmaListItem* pPrev;
1193  VmaListItem* pNext;
1194  T Value;
1195 };
1196 
1197 // Doubly linked list.
1198 template<typename T>
1199 class VmaRawList
1200 {
1201 public:
1202  typedef VmaListItem<T> ItemType;
1203 
1204  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1205  ~VmaRawList();
1206  void Clear();
1207 
1208  size_t GetCount() const { return m_Count; }
1209  bool IsEmpty() const { return m_Count == 0; }
1210 
1211  ItemType* Front() { return m_pFront; }
1212  const ItemType* Front() const { return m_pFront; }
1213  ItemType* Back() { return m_pBack; }
1214  const ItemType* Back() const { return m_pBack; }
1215 
1216  ItemType* PushBack();
1217  ItemType* PushFront();
1218  ItemType* PushBack(const T& value);
1219  ItemType* PushFront(const T& value);
1220  void PopBack();
1221  void PopFront();
1222 
1223  // Item can be null - it means PushBack.
1224  ItemType* InsertBefore(ItemType* pItem);
1225  // Item can be null - it means PushFront.
1226  ItemType* InsertAfter(ItemType* pItem);
1227 
1228  ItemType* InsertBefore(ItemType* pItem, const T& value);
1229  ItemType* InsertAfter(ItemType* pItem, const T& value);
1230 
1231  void Remove(ItemType* pItem);
1232 
1233 private:
1234  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1235  VmaPoolAllocator<ItemType> m_ItemAllocator;
1236  ItemType* m_pFront;
1237  ItemType* m_pBack;
1238  size_t m_Count;
1239 
1240  // Declared not defined, to block copy constructor and assignment operator.
1241  VmaRawList(const VmaRawList<T>& src);
1242  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1243 };
1244 
1245 template<typename T>
1246 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1247  m_pAllocationCallbacks(pAllocationCallbacks),
1248  m_ItemAllocator(pAllocationCallbacks, 128),
1249  m_pFront(VMA_NULL),
1250  m_pBack(VMA_NULL),
1251  m_Count(0)
1252 {
1253 }
1254 
1255 template<typename T>
1256 VmaRawList<T>::~VmaRawList()
1257 {
1258  // Intentionally not calling Clear, because that would be unnecessary
1259  // computations to return all items to m_ItemAllocator as free.
1260 }
1261 
1262 template<typename T>
1263 void VmaRawList<T>::Clear()
1264 {
1265  if(IsEmpty() == false)
1266  {
1267  ItemType* pItem = m_pBack;
1268  while(pItem != VMA_NULL)
1269  {
1270  ItemType* const pPrevItem = pItem->pPrev;
1271  m_ItemAllocator.Free(pItem);
1272  pItem = pPrevItem;
1273  }
1274  m_pFront = VMA_NULL;
1275  m_pBack = VMA_NULL;
1276  m_Count = 0;
1277  }
1278 }
1279 
1280 template<typename T>
1281 VmaListItem<T>* VmaRawList<T>::PushBack()
1282 {
1283  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1284  pNewItem->pNext = VMA_NULL;
1285  if(IsEmpty())
1286  {
1287  pNewItem->pPrev = VMA_NULL;
1288  m_pFront = pNewItem;
1289  m_pBack = pNewItem;
1290  m_Count = 1;
1291  }
1292  else
1293  {
1294  pNewItem->pPrev = m_pBack;
1295  m_pBack->pNext = pNewItem;
1296  m_pBack = pNewItem;
1297  ++m_Count;
1298  }
1299  return pNewItem;
1300 }
1301 
1302 template<typename T>
1303 VmaListItem<T>* VmaRawList<T>::PushFront()
1304 {
1305  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1306  pNewItem->pPrev = VMA_NULL;
1307  if(IsEmpty())
1308  {
1309  pNewItem->pNext = VMA_NULL;
1310  m_pFront = pNewItem;
1311  m_pBack = pNewItem;
1312  m_Count = 1;
1313  }
1314  else
1315  {
1316  pNewItem->pNext = m_pFront;
1317  m_pFront->pPrev = pNewItem;
1318  m_pFront = pNewItem;
1319  ++m_Count;
1320  }
1321  return pNewItem;
1322 }
1323 
1324 template<typename T>
1325 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1326 {
1327  ItemType* const pNewItem = PushBack();
1328  pNewItem->Value = value;
1329  return pNewItem;
1330 }
1331 
1332 template<typename T>
1333 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1334 {
1335  ItemType* const pNewItem = PushFront();
1336  pNewItem->Value = value;
1337  return newItem;
1338 }
1339 
1340 template<typename T>
1341 void VmaRawList<T>::PopBack()
1342 {
1343  VMA_HEAVY_ASSERT(m_Count > 0);
1344  ItemType* const pBackItem = m_pBack;
1345  ItemType* const pPrevItem = pBackItem->pPrev;
1346  if(pPrevItem != VMA_NULL)
1347  pPrevItem->pNext = VMA_NULL;
1348  m_pBack = pPrevItem;
1349  m_ItemAllocator.Free(pBackItem);
1350  --m_Count;
1351 }
1352 
1353 template<typename T>
1354 void VmaRawList<T>::PopFront()
1355 {
1356  VMA_HEAVY_ASSERT(m_Count > 0);
1357  ItemType* const pFrontItem = m_pFront;
1358  ItemType* const pNextItem = pFrontItem->pNext;
1359  if(pNextItem != VMA_NULL)
1360  pNextItem->pPrev = VMA_NULL;
1361  m_pFront = pNextItem;
1362  m_ItemAllocator.Free(pFrontItem);
1363  --m_Count;
1364 }
1365 
1366 template<typename T>
1367 void VmaRawList<T>::Remove(ItemType* pItem)
1368 {
1369  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1370  VMA_HEAVY_ASSERT(m_Count > 0);
1371 
1372  if(pItem->pPrev != VMA_NULL)
1373  pItem->pPrev->pNext = pItem->pNext;
1374  else
1375  {
1376  VMA_HEAVY_ASSERT(m_pFront == pItem);
1377  m_pFront = pItem->pNext;
1378  }
1379 
1380  if(pItem->pNext != VMA_NULL)
1381  pItem->pNext->pPrev = pItem->pPrev;
1382  else
1383  {
1384  VMA_HEAVY_ASSERT(m_pBack == pItem);
1385  m_pBack = pItem->pPrev;
1386  }
1387 
1388  m_ItemAllocator.Free(pItem);
1389  --m_Count;
1390 }
1391 
1392 template<typename T>
1393 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1394 {
1395  if(pItem != VMA_NULL)
1396  {
1397  ItemType* const prevItem = pItem->pPrev;
1398  ItemType* const newItem = m_ItemAllocator.Alloc();
1399  newItem->pPrev = prevItem;
1400  newItem->pNext = pItem;
1401  pItem->pPrev = newItem;
1402  if(prevItem != VMA_NULL)
1403  prevItem->pNext = newItem;
1404  else
1405  {
1406  VMA_HEAVY_ASSERT(m_pFront = pItem);
1407  m_pFront = newItem;
1408  }
1409  ++m_Count;
1410  return newItem;
1411  }
1412  else
1413  return PushBack();
1414 }
1415 
1416 template<typename T>
1417 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1418 {
1419  if(pItem != VMA_NULL)
1420  {
1421  ItemType* const nextItem = pItem->pNext;
1422  ItemType* const newItem = m_ItemAllocator.Alloc();
1423  newItem->pNext = nextItem;
1424  newItem->pPrev = pItem;
1425  pItem->pNext = newItem;
1426  if(nextItem != VMA_NULL)
1427  nextItem->pPrev = newItem;
1428  else
1429  {
1430  VMA_HEAVY_ASSERT(m_pBack = pItem);
1431  m_pBack = newItem;
1432  }
1433  ++m_Count;
1434  return newItem;
1435  }
1436  else
1437  return PushFront();
1438 }
1439 
1440 template<typename T>
1441 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1442 {
1443  ItemType* const newItem = InsertBefore(pItem);
1444  newItem->Value = value;
1445  return newItem;
1446 }
1447 
1448 template<typename T>
1449 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1450 {
1451  ItemType* const newItem = InsertAfter(pItem);
1452  newItem->Value = value;
1453  return newItem;
1454 }
1455 
1456 template<typename T, typename AllocatorT>
1457 class VmaList
1458 {
1459 public:
1460  class iterator
1461  {
1462  public:
1463  iterator() :
1464  m_pList(VMA_NULL),
1465  m_pItem(VMA_NULL)
1466  {
1467  }
1468 
1469  T& operator*() const
1470  {
1471  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1472  return m_pItem->Value;
1473  }
1474  T* operator->() const
1475  {
1476  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1477  return &m_pItem->Value;
1478  }
1479 
1480  iterator& operator++()
1481  {
1482  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1483  m_pItem = m_pItem->pNext;
1484  return *this;
1485  }
1486  iterator& operator--()
1487  {
1488  if(m_pItem != VMA_NULL)
1489  m_pItem = m_pItem->pPrev;
1490  else
1491  {
1492  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1493  m_pItem = m_pList->Back();
1494  }
1495  return *this;
1496  }
1497 
1498  iterator operator++(int)
1499  {
1500  iterator result = *this;
1501  ++*this;
1502  return result;
1503  }
1504  iterator operator--(int)
1505  {
1506  iterator result = *this;
1507  --*this;
1508  return result;
1509  }
1510 
1511  bool operator==(const iterator& rhs) const
1512  {
1513  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1514  return m_pItem == rhs.m_pItem;
1515  }
1516  bool operator!=(const iterator& rhs) const
1517  {
1518  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1519  return m_pItem != rhs.m_pItem;
1520  }
1521 
1522  private:
1523  VmaRawList<T>* m_pList;
1524  VmaListItem<T>* m_pItem;
1525 
1526  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1527  m_pList(pList),
1528  m_pItem(pItem)
1529  {
1530  }
1531 
1532  friend class VmaList<T, AllocatorT>;
1533  friend class VmaList<T, AllocatorT>:: const_iterator;
1534  };
1535 
1536  class const_iterator
1537  {
1538  public:
1539  const_iterator() :
1540  m_pList(VMA_NULL),
1541  m_pItem(VMA_NULL)
1542  {
1543  }
1544 
1545  const_iterator(const iterator& src) :
1546  m_pList(src.m_pList),
1547  m_pItem(src.m_pItem)
1548  {
1549  }
1550 
1551  const T& operator*() const
1552  {
1553  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1554  return m_pItem->Value;
1555  }
1556  const T* operator->() const
1557  {
1558  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1559  return &m_pItem->Value;
1560  }
1561 
1562  const_iterator& operator++()
1563  {
1564  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1565  m_pItem = m_pItem->pNext;
1566  return *this;
1567  }
1568  const_iterator& operator--()
1569  {
1570  if(m_pItem != VMA_NULL)
1571  m_pItem = m_pItem->pPrev;
1572  else
1573  {
1574  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
1575  m_pItem = m_pList->Back();
1576  }
1577  return *this;
1578  }
1579 
1580  const_iterator operator++(int)
1581  {
1582  const_iterator result = *this;
1583  ++*this;
1584  return result;
1585  }
1586  const_iterator operator--(int)
1587  {
1588  const_iterator result = *this;
1589  --*this;
1590  return result;
1591  }
1592 
1593  bool operator==(const const_iterator& rhs) const
1594  {
1595  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1596  return m_pItem == rhs.m_pItem;
1597  }
1598  bool operator!=(const const_iterator& rhs) const
1599  {
1600  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1601  return m_pItem != rhs.m_pItem;
1602  }
1603 
1604  private:
1605  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
1606  m_pList(pList),
1607  m_pItem(pItem)
1608  {
1609  }
1610 
1611  const VmaRawList<T>* m_pList;
1612  const VmaListItem<T>* m_pItem;
1613 
1614  friend class VmaList<T, AllocatorT>;
1615  };
1616 
1617  VmaList(AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1618 
1619  bool empty() const { return m_RawList.IsEmpty(); }
1620  size_t size() const { return m_RawList.GetCount(); }
1621 
1622  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
1623  iterator end() { return iterator(&m_RawList, VMA_NULL); }
1624 
1625  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
1626  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
1627 
1628  void clear() { m_RawList.Clear(); }
1629  void push_back(const T& value) { m_RawList.PushBack(value); }
1630  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
1631  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
1632 
1633 private:
1634  VmaRawList<T> m_RawList;
1635 };
1636 
1637 #endif // #if VMA_USE_STL_LIST
1638 
1640 // class VmaMap
1641 
1642 #if VMA_USE_STL_UNORDERED_MAP
1643 
1644 #define VmaPair std::pair
1645 
1646 #define VMA_MAP_TYPE(KeyT, ValueT) \
1647  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
1648 
1649 #else // #if VMA_USE_STL_UNORDERED_MAP
1650 
1651 template<typename T1, typename T2>
1652 struct VmaPair
1653 {
1654  T1 first;
1655  T2 second;
1656 
1657  VmaPair() : first(), second() { }
1658  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
1659 };
1660 
1661 /* Class compatible with subset of interface of std::unordered_map.
1662 KeyT, ValueT must be POD because they will be stored in VmaVector.
1663 */
1664 template<typename KeyT, typename ValueT>
1665 class VmaMap
1666 {
1667 public:
1668  typedef VmaPair<KeyT, ValueT> PairType;
1669  typedef PairType* iterator;
1670 
1671  VmaMap(VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1672 
1673  iterator begin() { return m_Vector.begin(); }
1674  iterator end() { return m_Vector.end(); }
1675 
1676  void insert(const PairType& pair);
1677  iterator find(const KeyT& key);
1678  void erase(iterator it);
1679 
1680 private:
1681  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
1682 };
1683 
1684 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
1685 
1686 template<typename FirstT, typename SecondT>
1687 struct VmaPairFirstLess
1688 {
1689  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
1690  {
1691  return lhs.first < rhs.first;
1692  }
1693  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
1694  {
1695  return lhs.first < rhsFirst;
1696  }
1697 };
1698 
1699 template<typename KeyT, typename ValueT>
1700 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
1701 {
1702  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
1703  m_Vector.data(),
1704  m_Vector.data() + m_Vector.size(),
1705  pair,
1706  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
1707  VectorInsert(m_Vector, indexToInsert, pair);
1708 }
1709 
1710 template<typename KeyT, typename ValueT>
1711 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
1712 {
1713  PairType* it = VmaBinaryFindFirstNotLess(
1714  m_Vector.data(),
1715  m_Vector.data() + m_Vector.size(),
1716  key,
1717  VmaPairFirstLess<KeyT, ValueT>());
1718  if((it != m_Vector.end()) && (it->first == key))
1719  return it;
1720  else
1721  return m_Vector.end();
1722 }
1723 
1724 template<typename KeyT, typename ValueT>
1725 void VmaMap<KeyT, ValueT>::erase(iterator it)
1726 {
1727  VectorRemove(m_Vector, it - m_Vector.begin());
1728 }
1729 
1730 #endif // #if VMA_USE_STL_UNORDERED_MAP
1731 
1732 /*
1733 Represents a region of VmaAllocation that is either assigned and returned as
1734 allocated memory block or free.
1735 */
1736 struct VmaSuballocation
1737 {
1738  VkDeviceSize offset;
1739  VkDeviceSize size;
1740  VmaSuballocationType type;
1741 };
1742 
1743 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
1744 
1745 // Parameters of an allocation.
1746 struct VmaAllocationRequest
1747 {
1748  VmaSuballocationList::iterator freeSuballocationItem;
1749  VkDeviceSize offset;
1750 };
1751 
1752 /* Single block of memory - VkDeviceMemory with all the data about its regions
1753 assigned or free. */
1754 class VmaAllocation
1755 {
1756 public:
1757  VkDeviceMemory m_hMemory;
1758  VkDeviceSize m_Size;
1759  uint32_t m_FreeCount;
1760  VkDeviceSize m_SumFreeSize;
1761  VmaSuballocationList m_Suballocations;
1762  // Suballocations that are free and have size greater than certain threshold.
1763  // Sorted by size, ascending.
1764  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
1765 
1766  VmaAllocation(VmaAllocator hAllocator);
1767 
1768  ~VmaAllocation()
1769  {
1770  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
1771  }
1772 
1773  // Always call after construction.
1774  void Init(VkDeviceMemory newMemory, VkDeviceSize newSize);
1775  // Always call before destruction.
1776  void Destroy(VmaAllocator allocator);
1777 
1778  // Validates all data structures inside this object. If not valid, returns false.
1779  bool Validate() const;
1780 
1781  // Tries to find a place for suballocation with given parameters inside this allocation.
1782  // If succeeded, fills pAllocationRequest and returns true.
1783  // If failed, returns false.
1784  bool CreateAllocationRequest(
1785  VkDeviceSize bufferImageGranularity,
1786  VkDeviceSize allocSize,
1787  VkDeviceSize allocAlignment,
1788  VmaSuballocationType allocType,
1789  VmaAllocationRequest* pAllocationRequest);
1790 
1791  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
1792  // If yes, fills pOffset and returns true. If no, returns false.
1793  bool CheckAllocation(
1794  VkDeviceSize bufferImageGranularity,
1795  VkDeviceSize allocSize,
1796  VkDeviceSize allocAlignment,
1797  VmaSuballocationType allocType,
1798  VmaSuballocationList::const_iterator freeSuballocItem,
1799  VkDeviceSize* pOffset) const;
1800 
1801  // Returns true if this allocation is empty - contains only single free suballocation.
1802  bool IsEmpty() const;
1803 
1804  // Makes actual allocation based on request. Request must already be checked
1805  // and valid.
1806  void Alloc(
1807  const VmaAllocationRequest& request,
1808  VmaSuballocationType type,
1809  VkDeviceSize allocSize);
1810 
1811  // Frees suballocation assigned to given memory region.
1812  void Free(const VkMappedMemoryRange* pMemory);
1813 
1814 #if VMA_STATS_STRING_ENABLED
1815  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1816 #endif
1817 
1818 private:
1819  // Given free suballocation, it merges it with following one, which must also be free.
1820  void MergeFreeWithNext(VmaSuballocationList::iterator item);
1821  // Releases given suballocation, making it free. Merges it with adjacent free
1822  // suballocations if applicable.
1823  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
1824  // Given free suballocation, it inserts it into sorted list of
1825  // m_FreeSuballocationsBySize if it's suitable.
1826  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
1827  // Given free suballocation, it removes it from sorted list of
1828  // m_FreeSuballocationsBySize if it's suitable.
1829  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
1830 };
1831 
1832 // Allocation for an object that has its own private VkDeviceMemory.
1833 struct VmaOwnAllocation
1834 {
1835  VkDeviceMemory m_hMemory;
1836  VkDeviceSize m_Size;
1837  VmaSuballocationType m_Type;
1838 };
1839 
1840 struct VmaOwnAllocationMemoryHandleLess
1841 {
1842  bool operator()(const VmaOwnAllocation& lhs, const VmaOwnAllocation& rhs) const
1843  {
1844  return lhs.m_hMemory < rhs.m_hMemory;
1845  }
1846  bool operator()(const VmaOwnAllocation& lhs, VkDeviceMemory rhsMem) const
1847  {
1848  return lhs.m_hMemory < rhsMem;
1849  }
1850 };
1851 
1852 /* Sequence of VmaAllocation. Represents memory blocks allocated for a specific
1853 Vulkan memory type. */
1854 struct VmaAllocationVector
1855 {
1856  // Incrementally sorted by sumFreeSize, ascending.
1857  VmaVector< VmaAllocation*, VmaStlAllocator<VmaAllocation*> > m_Allocations;
1858 
1859  VmaAllocationVector(VmaAllocator hAllocator);
1860  ~VmaAllocationVector();
1861 
1862  bool IsEmpty() const { return m_Allocations.empty(); }
1863 
1864  // Tries to free memory from any if its Allocations.
1865  // Returns index of Allocation that the memory was freed from, or -1 if not found.
1866  size_t Free(const VkMappedMemoryRange* pMemory);
1867 
1868  // Performs single step in sorting m_Allocations. They may not be fully sorted
1869  // after this call.
1870  void IncrementallySortAllocations();
1871 
1872  // Adds statistics of this AllocationVector to pStats.
1873  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
1874 
1875 #if VMA_STATS_STRING_ENABLED
1876  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1877 #endif
1878 
1879 private:
1880  VmaAllocator m_hAllocator;
1881 };
1882 
1883 // Main allocator object.
1884 struct VmaAllocator_T
1885 {
1886  VkDevice m_hDevice;
1887  bool m_AllocationCallbacksSpecified;
1888  VkAllocationCallbacks m_AllocationCallbacks;
1889  VkDeviceSize m_PreferredLargeHeapBlockSize;
1890  VkDeviceSize m_PreferredSmallHeapBlockSize;
1891 
1892  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
1893  VkPhysicalDeviceMemoryProperties m_MemProps;
1894 
1895  VmaAllocationVector* m_pAllocations[VK_MAX_MEMORY_TYPES];
1896  /* There can be at most one allocation that is completely empty - a
1897  hysteresis to avoid pessimistic case of alternating creation and destruction
1898  of a VkDeviceMemory. */
1899  bool m_HasEmptyAllocation[VK_MAX_MEMORY_TYPES];
1900  VmaMutex m_AllocationsMutex[VK_MAX_MEMORY_TYPES];
1901 
1902  // Each vector is sorted by memory (handle value).
1903  typedef VmaVector< VmaOwnAllocation, VmaStlAllocator<VmaOwnAllocation> > OwnAllocationVectorType;
1904  OwnAllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES];
1905  VmaMutex m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
1906 
1907  // Sorted by first (VkBuffer handle value).
1908  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange) m_BufferToMemoryMap;
1909  VmaMutex m_BufferToMemoryMapMutex;
1910  // Sorted by first (VkImage handle value).
1911  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange) m_ImageToMemoryMap;
1912  VmaMutex m_ImageToMemoryMapMutex;
1913 
1914  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
1915  ~VmaAllocator_T();
1916 
1917  const VkAllocationCallbacks* GetAllocationCallbacks() const
1918  {
1919  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
1920  }
1921 
1922  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
1923 
1924  VkDeviceSize GetBufferImageGranularity() const
1925  {
1926  return VMA_MAX(
1927  VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,
1928  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
1929  }
1930 
1931  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
1932  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
1933 
1934  // Main allocation function.
1935  VkResult AllocateMemory(
1936  const VkMemoryRequirements& vkMemReq,
1937  const VmaMemoryRequirements& vmaMemReq,
1938  VmaSuballocationType suballocType,
1939  VkMappedMemoryRange* pMemory,
1940  uint32_t* pMemoryTypeIndex);
1941 
1942  // Main deallocation function.
1943  void FreeMemory(const VkMappedMemoryRange* pMemory);
1944 
1945  void CalculateStats(VmaStats* pStats);
1946 
1947 #if VMA_STATS_STRING_ENABLED
1948  void PrintDetailedMap(class VmaStringBuilder& sb);
1949 #endif
1950 
1951 private:
1952  VkPhysicalDevice m_PhysicalDevice;
1953 
1954  VkResult AllocateMemoryOfType(
1955  const VkMemoryRequirements& vkMemReq,
1956  const VmaMemoryRequirements& vmaMemReq,
1957  uint32_t memTypeIndex,
1958  VmaSuballocationType suballocType,
1959  VkMappedMemoryRange* pMemory);
1960 
1961  // Allocates and registers new VkDeviceMemory specifically for single allocation.
1962  VkResult AllocateOwnMemory(
1963  VkDeviceSize size,
1964  VmaSuballocationType suballocType,
1965  uint32_t memTypeIndex,
1966  VkMappedMemoryRange* pMemory);
1967 
1968  // Tries to free pMemory as Own Memory. Returns true if found and freed.
1969  bool FreeOwnMemory(const VkMappedMemoryRange* pMemory);
1970 };
1971 
1973 // Memory allocation #2 after VmaAllocator_T definition
1974 
1975 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
1976 {
1977  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
1978 }
1979 
1980 static void VmaFree(VmaAllocator hAllocator, void* ptr)
1981 {
1982  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
1983 }
1984 
1985 template<typename T>
1986 static T* VmaAllocate(VmaAllocator hAllocator)
1987 {
1988  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
1989 }
1990 
1991 template<typename T>
1992 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
1993 {
1994  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
1995 }
1996 
1997 template<typename T>
1998 static void vma_delete(VmaAllocator hAllocator, T* ptr)
1999 {
2000  if(ptr != VMA_NULL)
2001  {
2002  ptr->~T();
2003  VmaFree(hAllocator, ptr);
2004  }
2005 }
2006 
2007 template<typename T>
2008 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2009 {
2010  if(ptr != VMA_NULL)
2011  {
2012  for(size_t i = count; i--; )
2013  ptr[i].~T();
2014  VmaFree(hAllocator, ptr);
2015  }
2016 }
2017 
2019 // VmaStringBuilder
2020 
2021 #if VMA_STATS_STRING_ENABLED
2022 
2023 class VmaStringBuilder
2024 {
2025 public:
2026  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2027  size_t GetLength() const { return m_Data.size(); }
2028  const char* GetData() const { return m_Data.data(); }
2029 
2030  void Add(char ch) { m_Data.push_back(ch); }
2031  void Add(const char* pStr);
2032  void AddNewLine() { Add('\n'); }
2033  void AddNumber(uint32_t num);
2034  void AddNumber(uint64_t num);
2035  void AddBool(bool b) { Add(b ? "true" : "false"); }
2036  void AddNull() { Add("null"); }
2037  void AddString(const char* pStr);
2038 
2039 private:
2040  VmaVector< char, VmaStlAllocator<char> > m_Data;
2041 };
2042 
2043 void VmaStringBuilder::Add(const char* pStr)
2044 {
2045  const size_t strLen = strlen(pStr);
2046  if(strLen > 0)
2047  {
2048  const size_t oldCount = m_Data.size();
2049  m_Data.resize(oldCount + strLen);
2050  memcpy(m_Data.data() + oldCount, pStr, strLen);
2051  }
2052 }
2053 
2054 void VmaStringBuilder::AddNumber(uint32_t num)
2055 {
2056  char buf[11];
2057  VmaUint32ToStr(buf, sizeof(buf), num);
2058  Add(buf);
2059 }
2060 
2061 void VmaStringBuilder::AddNumber(uint64_t num)
2062 {
2063  char buf[21];
2064  VmaUint64ToStr(buf, sizeof(buf), num);
2065  Add(buf);
2066 }
2067 
2068 void VmaStringBuilder::AddString(const char* pStr)
2069 {
2070  Add('"');
2071  const size_t strLen = strlen(pStr);
2072  for(size_t i = 0; i < strLen; ++i)
2073  {
2074  char ch = pStr[i];
2075  if(ch == '\'')
2076  Add("\\\\");
2077  else if(ch == '"')
2078  Add("\\\"");
2079  else if(ch >= 32)
2080  Add(ch);
2081  else switch(ch)
2082  {
2083  case '\n':
2084  Add("\\n");
2085  break;
2086  case '\r':
2087  Add("\\r");
2088  break;
2089  case '\t':
2090  Add("\\t");
2091  break;
2092  default:
2093  VMA_ASSERT(0 && "Character not currently supported.");
2094  break;
2095  }
2096  }
2097  Add('"');
2098 }
2099 
2101 
2102 // Correspond to values of enum VmaSuballocationType.
2103 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2104  "FREE",
2105  "UNKNOWN",
2106  "BUFFER",
2107  "IMAGE_UNKNOWN",
2108  "IMAGE_LINEAR",
2109  "IMAGE_OPTIMAL",
2110 };
2111 
2112 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2113 {
2114  sb.Add("{ \"Allocations\": ");
2115  sb.AddNumber(stat.AllocationCount);
2116  sb.Add(", \"Suballocations\": ");
2117  sb.AddNumber(stat.SuballocationCount);
2118  sb.Add(", \"UnusedRanges\": ");
2119  sb.AddNumber(stat.UnusedRangeCount);
2120  sb.Add(", \"UsedBytes\": ");
2121  sb.AddNumber(stat.UsedBytes);
2122  sb.Add(", \"UnusedBytes\": ");
2123  sb.AddNumber(stat.UnusedBytes);
2124  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2125  sb.AddNumber(stat.SuballocationSizeMin);
2126  sb.Add(", \"Avg\": ");
2127  sb.AddNumber(stat.SuballocationSizeAvg);
2128  sb.Add(", \"Max\": ");
2129  sb.AddNumber(stat.SuballocationSizeMax);
2130  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2131  sb.AddNumber(stat.UnusedRangeSizeMin);
2132  sb.Add(", \"Avg\": ");
2133  sb.AddNumber(stat.UnusedRangeSizeAvg);
2134  sb.Add(", \"Max\": ");
2135  sb.AddNumber(stat.UnusedRangeSizeMax);
2136  sb.Add(" } }");
2137 }
2138 
2139 #endif // #if VMA_STATS_STRING_ENABLED
2140 
2141 struct VmaSuballocationItemSizeLess
2142 {
2143  bool operator()(
2144  const VmaSuballocationList::iterator lhs,
2145  const VmaSuballocationList::iterator rhs) const
2146  {
2147  return lhs->size < rhs->size;
2148  }
2149  bool operator()(
2150  const VmaSuballocationList::iterator lhs,
2151  VkDeviceSize rhsSize) const
2152  {
2153  return lhs->size < rhsSize;
2154  }
2155 };
2156 
2157 VmaAllocation::VmaAllocation(VmaAllocator hAllocator) :
2158  m_hMemory(VK_NULL_HANDLE),
2159  m_Size(0),
2160  m_FreeCount(0),
2161  m_SumFreeSize(0),
2162  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2163  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2164 {
2165 }
2166 
2167 void VmaAllocation::Init(VkDeviceMemory newMemory, VkDeviceSize newSize)
2168 {
2169  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2170 
2171  m_hMemory = newMemory;
2172  m_Size = newSize;
2173  m_FreeCount = 1;
2174  m_SumFreeSize = newSize;
2175 
2176  m_Suballocations.clear();
2177  m_FreeSuballocationsBySize.clear();
2178 
2179  VmaSuballocation suballoc = {};
2180  suballoc.offset = 0;
2181  suballoc.size = newSize;
2182  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2183 
2184  m_Suballocations.push_back(suballoc);
2185  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2186  --suballocItem;
2187  m_FreeSuballocationsBySize.push_back(suballocItem);
2188 }
2189 
2190 void VmaAllocation::Destroy(VmaAllocator allocator)
2191 {
2192  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2193  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2194  m_hMemory = VK_NULL_HANDLE;
2195 }
2196 
2197 bool VmaAllocation::Validate() const
2198 {
2199  if((m_hMemory == VK_NULL_HANDLE) ||
2200  (m_Size == 0) ||
2201  m_Suballocations.empty())
2202  {
2203  return false;
2204  }
2205 
2206  // Expected offset of new suballocation as calculates from previous ones.
2207  VkDeviceSize calculatedOffset = 0;
2208  // Expected number of free suballocations as calculated from traversing their list.
2209  uint32_t calculatedFreeCount = 0;
2210  // Expected sum size of free suballocations as calculated from traversing their list.
2211  VkDeviceSize calculatedSumFreeSize = 0;
2212  // Expected number of free suballocations that should be registered in
2213  // m_FreeSuballocationsBySize calculated from traversing their list.
2214  size_t freeSuballocationsToRegister = 0;
2215  // True if previous visisted suballocation was free.
2216  bool prevFree = false;
2217 
2218  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2219  suballocItem != m_Suballocations.cend();
2220  ++suballocItem)
2221  {
2222  const VmaSuballocation& subAlloc = *suballocItem;
2223 
2224  // Actual offset of this suballocation doesn't match expected one.
2225  if(subAlloc.offset != calculatedOffset)
2226  return false;
2227 
2228  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2229  // Two adjacent free suballocations are invalid. They should be merged.
2230  if(prevFree && currFree)
2231  return false;
2232  prevFree = currFree;
2233 
2234  if(currFree)
2235  {
2236  calculatedSumFreeSize += subAlloc.size;
2237  ++calculatedFreeCount;
2238  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2239  ++freeSuballocationsToRegister;
2240  }
2241 
2242  calculatedOffset += subAlloc.size;
2243  }
2244 
2245  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2246  // match expected one.
2247  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2248  return false;
2249 
2250  VkDeviceSize lastSize = 0;
2251  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2252  {
2253  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2254 
2255  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2256  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2257  return false;
2258  // They must be sorted by size ascending.
2259  if(suballocItem->size < lastSize)
2260  return false;
2261 
2262  lastSize = suballocItem->size;
2263  }
2264 
2265  // Check if totals match calculacted values.
2266  return
2267  (calculatedOffset == m_Size) &&
2268  (calculatedSumFreeSize == m_SumFreeSize) &&
2269  (calculatedFreeCount == m_FreeCount);
2270 }
2271 
2272 /*
2273 How many suitable free suballocations to analyze before choosing best one.
2274 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2275  be chosen.
2276 - Set to UINT_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2277  suballocations will be analized and best one will be chosen.
2278 - Any other value is also acceptable.
2279 */
2280 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2281 
2282 bool VmaAllocation::CreateAllocationRequest(
2283  VkDeviceSize bufferImageGranularity,
2284  VkDeviceSize allocSize,
2285  VkDeviceSize allocAlignment,
2286  VmaSuballocationType allocType,
2287  VmaAllocationRequest* pAllocationRequest)
2288 {
2289  VMA_ASSERT(allocSize > 0);
2290  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2291  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2292  VMA_HEAVY_ASSERT(Validate());
2293 
2294  // There is not enough total free space in this allocation to fullfill the request: Early return.
2295  if(m_SumFreeSize < allocSize)
2296  return false;
2297 
2298  bool found = false;
2299 
2300  // Old brute-force algorithm, linearly searching suballocations.
2301  /*
2302  uint32_t suitableSuballocationsFound = 0;
2303  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2304  suballocItem != VMA_NULL &&
2305  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2306  suballocItem = suballocItem->Next)
2307  {
2308  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
2309  {
2310  VkDeviceSize offset = 0, cost = 0;
2311  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
2312  {
2313  ++suitableSuballocationsFound;
2314  if(cost < costLimit)
2315  {
2316  pAllocationRequest->freeSuballocationItem = suballocItem;
2317  pAllocationRequest->offset = offset;
2318  pAllocationRequest->cost = cost;
2319  if(cost == 0)
2320  return true;
2321  costLimit = cost;
2322  betterSuballocationFound = true;
2323  }
2324  }
2325  }
2326  }
2327  */
2328 
2329  // New algorithm, efficiently searching freeSuballocationsBySize.
2330  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
2331  if(freeSuballocCount > 0)
2332  {
2333  if(VMA_BEST_FIT)
2334  {
2335  // Find first free suballocation with size not less than allocSize.
2336  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2337  m_FreeSuballocationsBySize.data(),
2338  m_FreeSuballocationsBySize.data() + freeSuballocCount,
2339  allocSize,
2340  VmaSuballocationItemSizeLess());
2341  size_t index = it - m_FreeSuballocationsBySize.data();
2342  for(; index < freeSuballocCount; ++index)
2343  {
2344  VkDeviceSize offset = 0;
2345  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2346  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2347  {
2348  pAllocationRequest->freeSuballocationItem = suballocItem;
2349  pAllocationRequest->offset = offset;
2350  return true;
2351  }
2352  }
2353  }
2354  else
2355  {
2356  // Search staring from biggest suballocations.
2357  for(size_t index = freeSuballocCount; index--; )
2358  {
2359  VkDeviceSize offset = 0;
2360  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2361  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2362  {
2363  pAllocationRequest->freeSuballocationItem = suballocItem;
2364  pAllocationRequest->offset = offset;
2365  return true;
2366  }
2367  }
2368  }
2369  }
2370 
2371  return false;
2372 }
2373 
2374 bool VmaAllocation::CheckAllocation(
2375  VkDeviceSize bufferImageGranularity,
2376  VkDeviceSize allocSize,
2377  VkDeviceSize allocAlignment,
2378  VmaSuballocationType allocType,
2379  VmaSuballocationList::const_iterator freeSuballocItem,
2380  VkDeviceSize* pOffset) const
2381 {
2382  VMA_ASSERT(allocSize > 0);
2383  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2384  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
2385  VMA_ASSERT(pOffset != VMA_NULL);
2386 
2387  const VmaSuballocation& suballoc = *freeSuballocItem;
2388  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2389 
2390  // Size of this suballocation is too small for this request: Early return.
2391  if(suballoc.size < allocSize)
2392  return false;
2393 
2394  // Start from offset equal to beginning of this suballocation.
2395  *pOffset = suballoc.offset;
2396 
2397  // Apply VMA_DEBUG_MARGIN at the beginning.
2398  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
2399  *pOffset += VMA_DEBUG_MARGIN;
2400 
2401  // Apply alignment.
2402  const VkDeviceSize alignment = VMA_MAX(allocAlignment, VMA_DEBUG_ALIGNMENT);
2403  *pOffset = VmaAlignUp(*pOffset, alignment);
2404 
2405  // Check previous suballocations for BufferImageGranularity conflicts.
2406  // Make bigger alignment if necessary.
2407  if(bufferImageGranularity > 1)
2408  {
2409  bool bufferImageGranularityConflict = false;
2410  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
2411  while(prevSuballocItem != m_Suballocations.cbegin())
2412  {
2413  --prevSuballocItem;
2414  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
2415  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
2416  {
2417  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
2418  {
2419  bufferImageGranularityConflict = true;
2420  break;
2421  }
2422  }
2423  else
2424  // Already on previous page.
2425  break;
2426  }
2427  if(bufferImageGranularityConflict)
2428  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
2429  }
2430 
2431  // Calculate padding at the beginning based on current offset.
2432  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
2433 
2434  // Calculate required margin at the end if this is not last suballocation.
2435  VmaSuballocationList::const_iterator next = freeSuballocItem;
2436  ++next;
2437  const VkDeviceSize requiredEndMargin =
2438  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
2439 
2440  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
2441  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
2442  return false;
2443 
2444  // Check next suballocations for BufferImageGranularity conflicts.
2445  // If conflict exists, allocation cannot be made here.
2446  if(bufferImageGranularity > 1)
2447  {
2448  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
2449  ++nextSuballocItem;
2450  while(nextSuballocItem != m_Suballocations.cend())
2451  {
2452  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
2453  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
2454  {
2455  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
2456  return false;
2457  }
2458  else
2459  // Already on next page.
2460  break;
2461  ++nextSuballocItem;
2462  }
2463  }
2464 
2465  // All tests passed: Success. pOffset is already filled.
2466  return true;
2467 }
2468 
2469 bool VmaAllocation::IsEmpty() const
2470 {
2471  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
2472 }
2473 
2474 void VmaAllocation::Alloc(
2475  const VmaAllocationRequest& request,
2476  VmaSuballocationType type,
2477  VkDeviceSize allocSize)
2478 {
2479  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
2480  VmaSuballocation& suballoc = *request.freeSuballocationItem;
2481  // Given suballocation is a free block.
2482  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2483  // Given offset is inside this suballocation.
2484  VMA_ASSERT(request.offset >= suballoc.offset);
2485  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
2486  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
2487  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
2488 
2489  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
2490  // it to become used.
2491  UnregisterFreeSuballocation(request.freeSuballocationItem);
2492 
2493  suballoc.offset = request.offset;
2494  suballoc.size = allocSize;
2495  suballoc.type = type;
2496 
2497  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
2498  if(paddingEnd)
2499  {
2500  VmaSuballocation paddingSuballoc = {};
2501  paddingSuballoc.offset = request.offset + allocSize;
2502  paddingSuballoc.size = paddingEnd;
2503  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2504  VmaSuballocationList::iterator next = request.freeSuballocationItem;
2505  ++next;
2506  const VmaSuballocationList::iterator paddingEndItem =
2507  m_Suballocations.insert(next, paddingSuballoc);
2508  RegisterFreeSuballocation(paddingEndItem);
2509  }
2510 
2511  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
2512  if(paddingBegin)
2513  {
2514  VmaSuballocation paddingSuballoc = {};
2515  paddingSuballoc.offset = request.offset - paddingBegin;
2516  paddingSuballoc.size = paddingBegin;
2517  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2518  const VmaSuballocationList::iterator paddingBeginItem =
2519  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
2520  RegisterFreeSuballocation(paddingBeginItem);
2521  }
2522 
2523  // Update totals.
2524  m_FreeCount = m_FreeCount - 1;
2525  if(paddingBegin > 0)
2526  ++m_FreeCount;
2527  if(paddingEnd > 0)
2528  ++m_FreeCount;
2529  m_SumFreeSize -= allocSize;
2530 }
2531 
2532 void VmaAllocation::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
2533 {
2534  // Change this suballocation to be marked as free.
2535  VmaSuballocation& suballoc = *suballocItem;
2536  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2537 
2538  // Update totals.
2539  ++m_FreeCount;
2540  m_SumFreeSize += suballoc.size;
2541 
2542  // Merge with previous and/or next suballocation if it's also free.
2543  bool mergeWithNext = false;
2544  bool mergeWithPrev = false;
2545 
2546  VmaSuballocationList::iterator nextItem = suballocItem;
2547  ++nextItem;
2548  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
2549  mergeWithNext = true;
2550 
2551  VmaSuballocationList::iterator prevItem = suballocItem;
2552  if(suballocItem != m_Suballocations.begin())
2553  {
2554  --prevItem;
2555  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
2556  mergeWithPrev = true;
2557  }
2558 
2559  if(mergeWithNext)
2560  {
2561  UnregisterFreeSuballocation(nextItem);
2562  MergeFreeWithNext(suballocItem);
2563  }
2564 
2565  if(mergeWithPrev)
2566  {
2567  UnregisterFreeSuballocation(prevItem);
2568  MergeFreeWithNext(prevItem);
2569  RegisterFreeSuballocation(prevItem);
2570  }
2571  else
2572  RegisterFreeSuballocation(suballocItem);
2573 }
2574 
2575 void VmaAllocation::Free(const VkMappedMemoryRange* pMemory)
2576 {
2577  // If suballocation to free has offset smaller than half of allocation size, search forward.
2578  // Otherwise search backward.
2579  const bool forwardDirection = pMemory->offset < (m_Size / 2);
2580  if(forwardDirection)
2581  {
2582  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2583  suballocItem != m_Suballocations.end();
2584  ++suballocItem)
2585  {
2586  VmaSuballocation& suballoc = *suballocItem;
2587  if(suballoc.offset == pMemory->offset)
2588  {
2589  FreeSuballocation(suballocItem);
2590  VMA_HEAVY_ASSERT(Validate());
2591  return;
2592  }
2593  }
2594  VMA_ASSERT(0 && "Not found!");
2595  }
2596  else
2597  {
2598  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2599  suballocItem != m_Suballocations.end();
2600  ++suballocItem)
2601  {
2602  VmaSuballocation& suballoc = *suballocItem;
2603  if(suballoc.offset == pMemory->offset)
2604  {
2605  FreeSuballocation(suballocItem);
2606  VMA_HEAVY_ASSERT(Validate());
2607  return;
2608  }
2609  }
2610  VMA_ASSERT(0 && "Not found!");
2611  }
2612 }
2613 
2614 #if VMA_STATS_STRING_ENABLED
2615 
2616 void VmaAllocation::PrintDetailedMap(class VmaStringBuilder& sb) const
2617 {
2618  sb.Add("{\n\t\t\t\"Bytes\": ");
2619  sb.AddNumber(m_Size);
2620  sb.Add(",\n\t\t\t\"FreeBytes\": ");
2621  sb.AddNumber(m_SumFreeSize);
2622  sb.Add(",\n\t\t\t\"Suballocations\": ");
2623  sb.AddNumber(m_Suballocations.size());
2624  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
2625  sb.AddNumber(m_FreeCount);
2626  sb.Add(",\n\t\t\t\"SuballocationList\": [");
2627 
2628  size_t i = 0;
2629  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2630  suballocItem != m_Suballocations.cend();
2631  ++suballocItem, ++i)
2632  {
2633  if(i > 0)
2634  sb.Add(",\n\t\t\t\t{ \"Type\": ");
2635  else
2636  sb.Add("\n\t\t\t\t{ \"Type\": ");
2637  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
2638  sb.Add(", \"Size\": ");
2639  sb.AddNumber(suballocItem->size);
2640  sb.Add(", \"Offset\": ");
2641  sb.AddNumber(suballocItem->offset);
2642  sb.Add(" }");
2643  }
2644 
2645  sb.Add("\n\t\t\t]\n\t\t}");
2646 }
2647 
2648 #endif // #if VMA_STATS_STRING_ENABLED
2649 
2650 void VmaAllocation::MergeFreeWithNext(VmaSuballocationList::iterator item)
2651 {
2652  VMA_ASSERT(item != m_Suballocations.end());
2653  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2654 
2655  VmaSuballocationList::iterator nextItem = item;
2656  ++nextItem;
2657  VMA_ASSERT(nextItem != m_Suballocations.end());
2658  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
2659 
2660  item->size += nextItem->size;
2661  --m_FreeCount;
2662  m_Suballocations.erase(nextItem);
2663 }
2664 
2665 void VmaAllocation::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
2666 {
2667  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2668  VMA_ASSERT(item->size > 0);
2669 
2670  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2671  {
2672  if(m_FreeSuballocationsBySize.empty())
2673  m_FreeSuballocationsBySize.push_back(item);
2674  else
2675  {
2676  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2677  m_FreeSuballocationsBySize.data(),
2678  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2679  item,
2680  VmaSuballocationItemSizeLess());
2681  size_t index = it - m_FreeSuballocationsBySize.data();
2682  VectorInsert(m_FreeSuballocationsBySize, index, item);
2683  }
2684  }
2685 }
2686 
2687 void VmaAllocation::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
2688 {
2689  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2690  VMA_ASSERT(item->size > 0);
2691 
2692  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2693  {
2694  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2695  m_FreeSuballocationsBySize.data(),
2696  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2697  item,
2698  VmaSuballocationItemSizeLess());
2699  for(size_t index = it - m_FreeSuballocationsBySize.data();
2700  index < m_FreeSuballocationsBySize.size();
2701  ++index)
2702  {
2703  if(m_FreeSuballocationsBySize[index] == item)
2704  {
2705  VectorRemove(m_FreeSuballocationsBySize, index);
2706  return;
2707  }
2708  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
2709  }
2710  VMA_ASSERT(0 && "Not found.");
2711  }
2712 }
2713 
2714 static void InitStatInfo(VmaStatInfo& outInfo)
2715 {
2716  memset(&outInfo, 0, sizeof(outInfo));
2717  outInfo.SuballocationSizeMin = UINT64_MAX;
2718  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2719 }
2720 
2721 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaAllocation& alloc)
2722 {
2723  outInfo.AllocationCount = 1;
2724 
2725  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
2726  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
2727  outInfo.UnusedRangeCount = alloc.m_FreeCount;
2728 
2729  outInfo.UnusedBytes = alloc.m_SumFreeSize;
2730  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
2731 
2732  outInfo.SuballocationSizeMin = UINT64_MAX;
2733  outInfo.SuballocationSizeMax = 0;
2734  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2735  outInfo.UnusedRangeSizeMax = 0;
2736 
2737  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
2738  suballocItem != alloc.m_Suballocations.cend();
2739  ++suballocItem)
2740  {
2741  const VmaSuballocation& suballoc = *suballocItem;
2742  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
2743  {
2744  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
2745  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
2746  }
2747  else
2748  {
2749  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
2750  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
2751  }
2752  }
2753 }
2754 
2755 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
2756 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
2757 {
2758  inoutInfo.AllocationCount += srcInfo.AllocationCount;
2759  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
2760  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
2761  inoutInfo.UsedBytes += srcInfo.UsedBytes;
2762  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
2763  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
2764  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
2765  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
2766  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
2767 }
2768 
2769 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
2770 {
2771  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
2772  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
2773  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
2774  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
2775 }
2776 
2777 VmaAllocationVector::VmaAllocationVector(VmaAllocator hAllocator) :
2778  m_hAllocator(hAllocator),
2779  m_Allocations(VmaStlAllocator<VmaAllocation*>(hAllocator->GetAllocationCallbacks()))
2780 {
2781 }
2782 
2783 VmaAllocationVector::~VmaAllocationVector()
2784 {
2785  for(size_t i = m_Allocations.size(); i--; )
2786  {
2787  m_Allocations[i]->Destroy(m_hAllocator);
2788  vma_delete(m_hAllocator, m_Allocations[i]);
2789  }
2790 }
2791 
2792 size_t VmaAllocationVector::Free(const VkMappedMemoryRange* pMemory)
2793 {
2794  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2795  {
2796  VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2797  VMA_ASSERT(pAlloc);
2798  if(pAlloc->m_hMemory == pMemory->memory)
2799  {
2800  pAlloc->Free(pMemory);
2801  VMA_HEAVY_ASSERT(pAlloc->Validate());
2802  return allocIndex;
2803  }
2804  }
2805 
2806  return (size_t)-1;
2807 }
2808 
2809 void VmaAllocationVector::IncrementallySortAllocations()
2810 {
2811  // Bubble sort only until first swap.
2812  for(size_t i = 1; i < m_Allocations.size(); ++i)
2813  {
2814  if(m_Allocations[i - 1]->m_SumFreeSize > m_Allocations[i]->m_SumFreeSize)
2815  {
2816  VMA_SWAP(m_Allocations[i - 1], m_Allocations[i]);
2817  return;
2818  }
2819  }
2820 }
2821 
2822 #if VMA_STATS_STRING_ENABLED
2823 
2824 void VmaAllocationVector::PrintDetailedMap(class VmaStringBuilder& sb) const
2825 {
2826  for(size_t i = 0; i < m_Allocations.size(); ++i)
2827  {
2828  if(i > 0)
2829  sb.Add(",\n\t\t");
2830  else
2831  sb.Add("\n\t\t");
2832  m_Allocations[i]->PrintDetailedMap(sb);
2833  }
2834 }
2835 
2836 #endif // #if VMA_STATS_STRING_ENABLED
2837 
2838 void VmaAllocationVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
2839 {
2840  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2841  {
2842  const VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2843  VMA_ASSERT(pAlloc);
2844  VMA_HEAVY_ASSERT(pAlloc->Validate());
2845  VmaStatInfo allocationStatInfo;
2846  CalcAllocationStatInfo(allocationStatInfo, *pAlloc);
2847  VmaAddStatInfo(pStats->total, allocationStatInfo);
2848  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
2849  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
2850  }
2851 }
2852 
2854 // VmaAllocator_T
2855 
2856 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
2857  m_PhysicalDevice(pCreateInfo->physicalDevice),
2858  m_hDevice(pCreateInfo->device),
2859  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
2860  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
2861  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
2862  m_PreferredLargeHeapBlockSize(0),
2863  m_PreferredSmallHeapBlockSize(0),
2864  m_BufferToMemoryMap(VmaStlAllocator< VmaPair<VkBuffer, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks)),
2865  m_ImageToMemoryMap(VmaStlAllocator< VmaPair<VkImage, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks))
2866 {
2867  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
2868 
2869  memset(&m_MemProps, 0, sizeof(m_MemProps));
2870  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
2871 
2872  memset(&m_pAllocations, 0, sizeof(m_pAllocations));
2873  memset(&m_HasEmptyAllocation, 0, sizeof(m_HasEmptyAllocation));
2874  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
2875 
2876  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
2877  pCreateInfo->preferredLargeHeapBlockSize : VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE;
2878  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
2879  pCreateInfo->preferredSmallHeapBlockSize : VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE;
2880 
2881  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
2882  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
2883 
2884  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
2885  {
2886  m_pAllocations[i] = vma_new(this, VmaAllocationVector)(this);
2887  m_pOwnAllocations[i] = vma_new(this, OwnAllocationVectorType)(VmaStlAllocator<VmaOwnAllocation>(GetAllocationCallbacks()));
2888  }
2889 }
2890 
2891 VmaAllocator_T::~VmaAllocator_T()
2892 {
2893  for(VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = m_ImageToMemoryMap.begin();
2894  it != m_ImageToMemoryMap.end();
2895  ++it)
2896  {
2897  vkDestroyImage(m_hDevice, it->first, GetAllocationCallbacks());
2898  }
2899 
2900  for(VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = m_BufferToMemoryMap.begin();
2901  it != m_BufferToMemoryMap.end();
2902  ++it)
2903  {
2904  vkDestroyBuffer(m_hDevice, it->first, GetAllocationCallbacks());
2905  }
2906 
2907  for(uint32_t typeIndex = 0; typeIndex < GetMemoryTypeCount(); ++typeIndex)
2908  {
2909  OwnAllocationVectorType* pOwnAllocations = m_pOwnAllocations[typeIndex];
2910  VMA_ASSERT(pOwnAllocations);
2911  for(size_t allocationIndex = 0; allocationIndex < pOwnAllocations->size(); ++allocationIndex)
2912  {
2913  const VmaOwnAllocation& ownAlloc = (*pOwnAllocations)[allocationIndex];
2914  vkFreeMemory(m_hDevice, ownAlloc.m_hMemory, GetAllocationCallbacks());
2915  }
2916  }
2917 
2918  for(size_t i = GetMemoryTypeCount(); i--; )
2919  {
2920  vma_delete(this, m_pAllocations[i]);
2921  vma_delete(this, m_pOwnAllocations[i]);
2922  }
2923 }
2924 
2925 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
2926 {
2927  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
2928  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
2929  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
2930 }
2931 
2932 VkResult VmaAllocator_T::AllocateMemoryOfType(
2933  const VkMemoryRequirements& vkMemReq,
2934  const VmaMemoryRequirements& vmaMemReq,
2935  uint32_t memTypeIndex,
2936  VmaSuballocationType suballocType,
2937  VkMappedMemoryRange* pMemory)
2938 {
2939  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
2940 
2941  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
2942  pMemory->pNext = VMA_NULL;
2943  pMemory->size = vkMemReq.size;
2944 
2945  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
2946  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
2947  const bool ownMemory =
2948  vmaMemReq.ownMemory ||
2949  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
2950  ((vmaMemReq.neverAllocate == false) && (vkMemReq.size > preferredBlockSize / 2));
2951 
2952  if(ownMemory)
2953  {
2954  if(vmaMemReq.neverAllocate)
2955  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2956  else
2957  return AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
2958  }
2959  else
2960  {
2961  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
2962  VmaAllocationVector* const allocationVector = m_pAllocations[memTypeIndex];
2963  VMA_ASSERT(allocationVector);
2964 
2965  // 1. Search existing allocations.
2966  // Forward order - prefer blocks with smallest amount of free space.
2967  for(size_t allocIndex = 0; allocIndex < allocationVector->m_Allocations.size(); ++allocIndex )
2968  {
2969  VmaAllocation* const pAlloc = allocationVector->m_Allocations[allocIndex];
2970  VMA_ASSERT(pAlloc);
2971  VmaAllocationRequest allocRequest = {};
2972  // Check if can allocate from pAlloc.
2973  if(pAlloc->CreateAllocationRequest(
2974  GetBufferImageGranularity(),
2975  vkMemReq.size,
2976  vkMemReq.alignment,
2977  suballocType,
2978  &allocRequest))
2979  {
2980  // We no longer have an empty Allocation.
2981  if(pAlloc->IsEmpty())
2982  m_HasEmptyAllocation[memTypeIndex] = false;
2983  // Allocate from this pAlloc.
2984  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
2985  // Return VkDeviceMemory and offset (size already filled above).
2986  pMemory->memory = pAlloc->m_hMemory;
2987  pMemory->offset = allocRequest.offset;
2988  VMA_HEAVY_ASSERT(pAlloc->Validate());
2989  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
2990  return VK_SUCCESS;
2991  }
2992  }
2993 
2994  // 2. Create new Allocation.
2995  if(vmaMemReq.neverAllocate)
2996  {
2997  VMA_DEBUG_LOG(" FAILED due to VmaMemoryRequirements::neverAllocate");
2998  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2999  }
3000  else
3001  {
3002  // Start with full preferredBlockSize.
3003  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3004  allocInfo.memoryTypeIndex = memTypeIndex;
3005  allocInfo.allocationSize = preferredBlockSize;
3006  VkDeviceMemory mem = VK_NULL_HANDLE;
3007  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3008  if(res < 0)
3009  {
3010  // 3. Try half the size.
3011  allocInfo.allocationSize /= 2;
3012  if(allocInfo.allocationSize >= vkMemReq.size)
3013  {
3014  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3015  if(res < 0)
3016  {
3017  // 4. Try quarter the size.
3018  allocInfo.allocationSize /= 2;
3019  if(allocInfo.allocationSize >= vkMemReq.size)
3020  {
3021  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3022  }
3023  }
3024  }
3025  }
3026  if(res < 0)
3027  {
3028  // 5. Try OwnAlloc.
3029  res = AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3030  if(res == VK_SUCCESS)
3031  {
3032  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
3033  VMA_DEBUG_LOG(" Allocated as OwnMemory");
3034  return VK_SUCCESS;
3035  }
3036  else
3037  {
3038  // Everything failed: Return error code.
3039  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3040  return res;
3041  }
3042  }
3043 
3044  // New VkDeviceMemory successfully created. Create new Allocation for it.
3045  VmaAllocation* const pAlloc = vma_new(this, VmaAllocation)(this);
3046  pAlloc->Init(mem, allocInfo.allocationSize);
3047 
3048  allocationVector->m_Allocations.push_back(pAlloc);
3049 
3050  // Allocate from pAlloc. Because it is empty, allocRequest can be trivially filled.
3051  VmaAllocationRequest allocRequest = {};
3052  allocRequest.freeSuballocationItem = pAlloc->m_Suballocations.begin();
3053  allocRequest.offset = 0;
3054  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3055  pMemory->memory = mem;
3056  pMemory->offset = allocRequest.offset;
3057  VMA_HEAVY_ASSERT(pAlloc->Validate());
3058  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
3059  return VK_SUCCESS;
3060  }
3061  }
3062 }
3063 
3064 VkResult VmaAllocator_T::AllocateOwnMemory(
3065  VkDeviceSize size,
3066  VmaSuballocationType suballocType,
3067  uint32_t memTypeIndex,
3068  VkMappedMemoryRange* pMemory)
3069 {
3070  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3071  allocInfo.memoryTypeIndex = memTypeIndex;
3072  allocInfo.allocationSize = size;
3073 
3074  // Allocate VkDeviceMemory.
3075  VmaOwnAllocation ownAlloc = {};
3076  ownAlloc.m_Size = size;
3077  ownAlloc.m_Type = suballocType;
3078  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &ownAlloc.m_hMemory);
3079  if(res < 0)
3080  {
3081  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3082  return res;
3083  }
3084 
3085  // Register it in m_pOwnAllocations.
3086  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3087  OwnAllocationVectorType* ownAllocations = m_pOwnAllocations[memTypeIndex];
3088  VMA_ASSERT(ownAllocations);
3089  VmaOwnAllocation* const pOwnAllocationsBeg = ownAllocations->data();
3090  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + ownAllocations->size();
3091  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3092  pOwnAllocationsBeg,
3093  pOwnAllocationsEnd,
3094  ownAlloc,
3095  VmaOwnAllocationMemoryHandleLess()) - pOwnAllocationsBeg;
3096  VectorInsert(*ownAllocations, indexToInsert, ownAlloc);
3097 
3098  // Return parameters of the allocation.
3099  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3100  pMemory->pNext = VMA_NULL;
3101  pMemory->memory = ownAlloc.m_hMemory;
3102  pMemory->offset = 0;
3103  pMemory->size = size;
3104 
3105  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
3106 
3107  return VK_SUCCESS;
3108 }
3109 
3110 VkResult VmaAllocator_T::AllocateMemory(
3111  const VkMemoryRequirements& vkMemReq,
3112  const VmaMemoryRequirements& vmaMemReq,
3113  VmaSuballocationType suballocType,
3114  VkMappedMemoryRange* pMemory,
3115  uint32_t* pMemoryTypeIndex)
3116 {
3117  if(vmaMemReq.ownMemory && vmaMemReq.neverAllocate)
3118  {
3119  VMA_ASSERT(0 && "Specifying VmaMemoryRequirements::ownMemory && VmaMemoryRequirements::neverAllocate makes no sense.");
3120  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3121  }
3122 
3123  // Bit mask of memory Vulkan types acceptable for this allocation.
3124  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
3125  uint32_t memTypeIndex = UINT_MAX;
3126  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3127  if(res == VK_SUCCESS)
3128  {
3129  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3130  // Succeeded on first try.
3131  if(res == VK_SUCCESS)
3132  {
3133  if(pMemoryTypeIndex != VMA_NULL)
3134  *pMemoryTypeIndex = memTypeIndex;
3135  return res;
3136  }
3137  // Allocation from this memory type failed. Try other compatible memory types.
3138  else
3139  {
3140  for(;;)
3141  {
3142  // Remove old memTypeIndex from list of possibilities.
3143  memoryTypeBits &= ~(1u << memTypeIndex);
3144  // Find alternative memTypeIndex.
3145  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3146  if(res == VK_SUCCESS)
3147  {
3148  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3149  // Allocation from this alternative memory type succeeded.
3150  if(res == VK_SUCCESS)
3151  {
3152  if(pMemoryTypeIndex != VMA_NULL)
3153  *pMemoryTypeIndex = memTypeIndex;
3154  return res;
3155  }
3156  // else: Allocation from this memory type failed. Try next one - next loop iteration.
3157  }
3158  // No other matching memory type index could be found.
3159  else
3160  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
3161  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3162  }
3163  }
3164  }
3165  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
3166  else
3167  return res;
3168 }
3169 
3170 void VmaAllocator_T::FreeMemory(const VkMappedMemoryRange* pMemory)
3171 {
3172  uint32_t memTypeIndex = 0;
3173  bool found = false;
3174  VmaAllocation* allocationToDelete = VMA_NULL;
3175  // Check all memory types because we don't know which one does pMemory come from.
3176  for(; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3177  {
3178  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3179  VmaAllocationVector* const pAllocationVector = m_pAllocations[memTypeIndex];
3180  VMA_ASSERT(pAllocationVector);
3181  // Try to free pMemory from pAllocationVector.
3182  const size_t allocIndex = pAllocationVector->Free(pMemory);
3183  if(allocIndex != (size_t)-1)
3184  {
3185  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
3186  found = true;
3187  VmaAllocation* const pAlloc = pAllocationVector->m_Allocations[allocIndex];
3188  VMA_ASSERT(pAlloc);
3189  // pAlloc became empty after this deallocation.
3190  if(pAlloc->IsEmpty())
3191  {
3192  // Already has empty Allocation. We don't want to have two, so delete this one.
3193  if(m_HasEmptyAllocation[memTypeIndex])
3194  {
3195  allocationToDelete = pAlloc;
3196  VectorRemove(pAllocationVector->m_Allocations, allocIndex);
3197  break;
3198  }
3199  // We now have first empty Allocation.
3200  else
3201  m_HasEmptyAllocation[memTypeIndex] = true;
3202  }
3203  // Must be called after allocIndex is used, because later it may become invalid!
3204  pAllocationVector->IncrementallySortAllocations();
3205  break;
3206  }
3207  }
3208  if(found)
3209  {
3210  // Destruction of a free Allocation. Deferred until this point, outside of mutex
3211  // lock, for performance reason.
3212  if(allocationToDelete != VMA_NULL)
3213  {
3214  VMA_DEBUG_LOG(" Deleted empty allocation");
3215  allocationToDelete->Destroy(this);
3216  vma_delete(this, allocationToDelete);
3217  }
3218  return;
3219  }
3220 
3221  // pMemory not found in allocations. Try free it as Own Memory.
3222  if(FreeOwnMemory(pMemory))
3223  return;
3224 
3225  // pMemory not found as Own Memory either.
3226  VMA_ASSERT(0 && "Not found. Trying to free memory not allocated using this allocator (or some other bug).");
3227 }
3228 
3229 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
3230 {
3231  InitStatInfo(pStats->total);
3232  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
3233  InitStatInfo(pStats->memoryType[i]);
3234  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
3235  InitStatInfo(pStats->memoryHeap[i]);
3236 
3237  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3238  {
3239  VmaMutexLock allocationsLock(m_AllocationsMutex[memTypeIndex]);
3240  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3241  const VmaAllocationVector* const allocVector = m_pAllocations[memTypeIndex];
3242  VMA_ASSERT(allocVector);
3243  allocVector->AddStats(pStats, memTypeIndex, heapIndex);
3244  }
3245 
3246  VmaPostprocessCalcStatInfo(pStats->total);
3247  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
3248  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
3249  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
3250  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
3251 }
3252 
3253 bool VmaAllocator_T::FreeOwnMemory(const VkMappedMemoryRange* pMemory)
3254 {
3255  VkDeviceMemory vkMemory = VK_NULL_HANDLE;
3256 
3257  // Check all memory types because we don't know which one does pMemory come from.
3258  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3259  {
3260  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3261  OwnAllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex];
3262  VMA_ASSERT(pOwnAllocations);
3263  VmaOwnAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
3264  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
3265  VmaOwnAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
3266  pOwnAllocationsBeg,
3267  pOwnAllocationsEnd,
3268  pMemory->memory,
3269  VmaOwnAllocationMemoryHandleLess());
3270  if((pOwnAllocationIt != pOwnAllocationsEnd) &&
3271  (pOwnAllocationIt->m_hMemory == pMemory->memory))
3272  {
3273  VMA_ASSERT(pMemory->size == pOwnAllocationIt->m_Size && pMemory->offset == 0);
3274  vkMemory = pOwnAllocationIt->m_hMemory;
3275  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
3276  VectorRemove(*pOwnAllocations, ownAllocationIndex);
3277  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
3278  break;
3279  }
3280  }
3281 
3282  // Found. Free VkDeviceMemory deferred until this point, outside of mutex lock,
3283  // for performance reason.
3284  if(vkMemory != VK_NULL_HANDLE)
3285  {
3286  vkFreeMemory(m_hDevice, vkMemory, GetAllocationCallbacks());
3287  return true;
3288  }
3289  else
3290  return false;
3291 }
3292 
3293 #if VMA_STATS_STRING_ENABLED
3294 
3295 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
3296 {
3297  bool ownAllocationsStarted = false;
3298  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3299  {
3300  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex]);
3301  OwnAllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex];
3302  VMA_ASSERT(pOwnAllocVector);
3303  if(pOwnAllocVector->empty() == false)
3304  {
3305  if(ownAllocationsStarted)
3306  sb.Add(",\n\t\"Type ");
3307  else
3308  {
3309  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
3310  ownAllocationsStarted = true;
3311  }
3312  sb.AddNumber(memTypeIndex);
3313  sb.Add("\": [");
3314 
3315  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
3316  {
3317  const VmaOwnAllocation& ownAlloc = (*pOwnAllocVector)[i];
3318  if(i > 0)
3319  sb.Add(",\n\t\t{ \"Size\": ");
3320  else
3321  sb.Add("\n\t\t{ \"Size\": ");
3322  sb.AddNumber(ownAlloc.m_Size);
3323  sb.Add(", \"Type\": ");
3324  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[ownAlloc.m_Type]);
3325  sb.Add(" }");
3326  }
3327 
3328  sb.Add("\n\t]");
3329  }
3330  }
3331  if(ownAllocationsStarted)
3332  sb.Add("\n}");
3333 
3334  {
3335  bool allocationsStarted = false;
3336  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3337  {
3338  VmaMutexLock globalAllocationsLock(m_AllocationsMutex[memTypeIndex]);
3339  if(m_pAllocations[memTypeIndex]->IsEmpty() == false)
3340  {
3341  if(allocationsStarted)
3342  sb.Add(",\n\t\"Type ");
3343  else
3344  {
3345  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
3346  allocationsStarted = true;
3347  }
3348  sb.AddNumber(memTypeIndex);
3349  sb.Add("\": [");
3350 
3351  m_pAllocations[memTypeIndex]->PrintDetailedMap(sb);
3352 
3353  sb.Add("\n\t]");
3354  }
3355  }
3356  if(allocationsStarted)
3357  sb.Add("\n}");
3358  }
3359 }
3360 
3361 #endif // #if VMA_STATS_STRING_ENABLED
3362 
3363 static VkResult AllocateMemoryForImage(
3364  VmaAllocator allocator,
3365  VkImage image,
3366  const VmaMemoryRequirements* pMemoryRequirements,
3367  VmaSuballocationType suballocType,
3368  VkMappedMemoryRange* pMemory,
3369  uint32_t* pMemoryTypeIndex)
3370 {
3371  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3372 
3373  VkMemoryRequirements vkMemReq = {};
3374  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
3375 
3376  return allocator->AllocateMemory(
3377  vkMemReq,
3378  *pMemoryRequirements,
3379  suballocType,
3380  pMemory,
3381  pMemoryTypeIndex);
3382 }
3383 
3385 // Public interface
3386 
3387 VkResult vmaCreateAllocator(
3388  const VmaAllocatorCreateInfo* pCreateInfo,
3389  VmaAllocator* pAllocator)
3390 {
3391  VMA_ASSERT(pCreateInfo && pAllocator);
3392  VMA_DEBUG_LOG("vmaCreateAllocator");
3393  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
3394  return VK_SUCCESS;
3395 }
3396 
3397 void vmaDestroyAllocator(
3398  VmaAllocator allocator)
3399 {
3400  if(allocator != VK_NULL_HANDLE)
3401  {
3402  VMA_DEBUG_LOG("vmaDestroyAllocator");
3403  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
3404  vma_delete(&allocationCallbacks, allocator);
3405  }
3406 }
3407 
3409  VmaAllocator allocator,
3410  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
3411 {
3412  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
3413  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
3414 }
3415 
3417  VmaAllocator allocator,
3418  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
3419 {
3420  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
3421  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
3422 }
3423 
3425  VmaAllocator allocator,
3426  uint32_t memoryTypeIndex,
3427  VkMemoryPropertyFlags* pFlags)
3428 {
3429  VMA_ASSERT(allocator && pFlags);
3430  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
3431  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
3432 }
3433 
3434 void vmaCalculateStats(
3435  VmaAllocator allocator,
3436  VmaStats* pStats)
3437 {
3438  VMA_ASSERT(allocator && pStats);
3439  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3440  allocator->CalculateStats(pStats);
3441 }
3442 
3443 #if VMA_STATS_STRING_ENABLED
3444 
3445 void vmaBuildStatsString(
3446  VmaAllocator allocator,
3447  char** ppStatsString,
3448  VkBool32 detailedMap)
3449 {
3450  VMA_ASSERT(allocator && ppStatsString);
3451  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3452 
3453  VmaStringBuilder sb(allocator);
3454  {
3455  VmaStats stats;
3456  allocator->CalculateStats(&stats);
3457 
3458  sb.Add("{\n\"Total\": ");
3459  VmaPrintStatInfo(sb, stats.total);
3460 
3461  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
3462  {
3463  sb.Add(",\n\"Heap ");
3464  sb.AddNumber(heapIndex);
3465  sb.Add("\": {\n\t\"Size\": ");
3466  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
3467  sb.Add(",\n\t\"Flags\": ");
3468  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
3469  sb.AddString("DEVICE_LOCAL");
3470  else
3471  sb.AddString("");
3472  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
3473  {
3474  sb.Add(",\n\t\"Stats:\": ");
3475  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
3476  }
3477 
3478  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
3479  {
3480  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
3481  {
3482  sb.Add(",\n\t\"Type ");
3483  sb.AddNumber(typeIndex);
3484  sb.Add("\": {\n\t\t\"Flags\": \"");
3485  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
3486  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
3487  sb.Add(" DEVICE_LOCAL");
3488  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3489  sb.Add(" HOST_VISIBLE");
3490  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
3491  sb.Add(" HOST_COHERENT");
3492  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
3493  sb.Add(" HOST_CACHED");
3494  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
3495  sb.Add(" LAZILY_ALLOCATED");
3496  sb.Add("\"");
3497  if(stats.memoryType[typeIndex].AllocationCount > 0)
3498  {
3499  sb.Add(",\n\t\t\"Stats\": ");
3500  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
3501  }
3502  sb.Add("\n\t}");
3503  }
3504  }
3505  sb.Add("\n}");
3506  }
3507  if(detailedMap == VK_TRUE)
3508  allocator->PrintDetailedMap(sb);
3509  sb.Add("\n}\n");
3510  }
3511 
3512  const size_t len = sb.GetLength();
3513  char* const pChars = vma_new_array(allocator, char, len + 1);
3514  if(len > 0)
3515  memcpy(pChars, sb.GetData(), len);
3516  pChars[len] = '\0';
3517  *ppStatsString = pChars;
3518 }
3519 
3520 void vmaFreeStatsString(
3521  VmaAllocator allocator,
3522  char* pStatsString)
3523 {
3524  if(pStatsString != VMA_NULL)
3525  {
3526  VMA_ASSERT(allocator);
3527  size_t len = strlen(pStatsString);
3528  vma_delete_array(allocator, pStatsString, len + 1);
3529  }
3530 }
3531 
3532 #endif // #if VMA_STATS_STRING_ENABLED
3533 
3536 VkResult vmaFindMemoryTypeIndex(
3537  VmaAllocator allocator,
3538  uint32_t memoryTypeBits,
3539  const VmaMemoryRequirements* pMemoryRequirements,
3540  uint32_t* pMemoryTypeIndex)
3541 {
3542  VMA_ASSERT(allocator != VK_NULL_HANDLE);
3543  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
3544  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
3545 
3546  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
3547  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
3548  if(preferredFlags == 0)
3549  preferredFlags = requiredFlags;
3550  // preferredFlags, if not 0, must be subset of requiredFlags.
3551  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
3552 
3553  // Convert usage to requiredFlags and preferredFlags.
3554  switch(pMemoryRequirements->usage)
3555  {
3557  break;
3559  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3560  break;
3562  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3563  break;
3565  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3566  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3567  break;
3569  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3570  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3571  break;
3572  default:
3573  break;
3574  }
3575 
3576  *pMemoryTypeIndex = UINT_MAX;
3577  uint32_t minCost = UINT_MAX;
3578  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
3579  memTypeIndex < allocator->GetMemoryTypeCount();
3580  ++memTypeIndex, memTypeBit <<= 1)
3581  {
3582  // This memory type is acceptable according to memoryTypeBits bitmask.
3583  if((memTypeBit & memoryTypeBits) != 0)
3584  {
3585  const VkMemoryPropertyFlags currFlags =
3586  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
3587  // This memory type contains requiredFlags.
3588  if((requiredFlags & ~currFlags) == 0)
3589  {
3590  // Calculate cost as number of bits from preferredFlags not present in this memory type.
3591  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
3592  // Remember memory type with lowest cost.
3593  if(currCost < minCost)
3594  {
3595  *pMemoryTypeIndex = memTypeIndex;
3596  if(currCost == 0)
3597  return VK_SUCCESS;
3598  minCost = currCost;
3599  }
3600  }
3601  }
3602  }
3603  return (*pMemoryTypeIndex != UINT_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
3604 }
3605 
3606 VkResult vmaAllocateMemory(
3607  VmaAllocator allocator,
3608  const VkMemoryRequirements* pVkMemoryRequirements,
3609  const VmaMemoryRequirements* pVmaMemoryRequirements,
3610  VkMappedMemoryRange* pMemory,
3611  uint32_t* pMemoryTypeIndex)
3612 {
3613  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pMemory);
3614 
3615  VMA_DEBUG_LOG("vmaAllocateMemory");
3616 
3617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3618 
3619  return allocator->AllocateMemory(
3620  *pVkMemoryRequirements,
3621  *pVmaMemoryRequirements,
3622  VMA_SUBALLOCATION_TYPE_UNKNOWN,
3623  pMemory,
3624  pMemoryTypeIndex);
3625 }
3626 
3628  VmaAllocator allocator,
3629  VkBuffer buffer,
3630  const VmaMemoryRequirements* pMemoryRequirements,
3631  VkMappedMemoryRange* pMemory,
3632  uint32_t* pMemoryTypeIndex)
3633 {
3634  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3635 
3636  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
3637 
3638  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3639 
3640  VkMemoryRequirements vkMemReq = {};
3641  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
3642 
3643  return allocator->AllocateMemory(
3644  vkMemReq,
3645  *pMemoryRequirements,
3646  VMA_SUBALLOCATION_TYPE_BUFFER,
3647  pMemory,
3648  pMemoryTypeIndex);
3649 }
3650 
3651 VkResult vmaAllocateMemoryForImage(
3652  VmaAllocator allocator,
3653  VkImage image,
3654  const VmaMemoryRequirements* pMemoryRequirements,
3655  VkMappedMemoryRange* pMemory,
3656  uint32_t* pMemoryTypeIndex)
3657 {
3658  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements);
3659 
3660  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
3661 
3662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3663 
3664  return AllocateMemoryForImage(
3665  allocator,
3666  image,
3667  pMemoryRequirements,
3668  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
3669  pMemory,
3670  pMemoryTypeIndex);
3671 }
3672 
3673 void vmaFreeMemory(
3674  VmaAllocator allocator,
3675  const VkMappedMemoryRange* pMemory)
3676 {
3677  VMA_ASSERT(allocator && pMemory);
3678 
3679  VMA_DEBUG_LOG("vmaFreeMemory");
3680 
3681  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3682 
3683  allocator->FreeMemory(pMemory);
3684 }
3685 
3686 VkResult vmaMapMemory(
3687  VmaAllocator allocator,
3688  const VkMappedMemoryRange* pMemory,
3689  void** ppData)
3690 {
3691  VMA_ASSERT(allocator && pMemory && ppData);
3692 
3693  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3694 
3695  return vkMapMemory(allocator->m_hDevice, pMemory->memory,
3696  pMemory->offset, pMemory->size, 0, ppData);
3697 }
3698 
3699 void vmaUnmapMemory(
3700  VmaAllocator allocator,
3701  const VkMappedMemoryRange* pMemory)
3702 {
3703  VMA_ASSERT(allocator && pMemory);
3704 
3705  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3706 
3707  vkUnmapMemory(allocator->m_hDevice, pMemory->memory);
3708 }
3709 
3710 VkResult vmaCreateBuffer(
3711  VmaAllocator allocator,
3712  const VkBufferCreateInfo* pCreateInfo,
3713  const VmaMemoryRequirements* pMemoryRequirements,
3714  VkBuffer* pBuffer,
3715  VkMappedMemoryRange* pMemory,
3716  uint32_t* pMemoryTypeIndex)
3717 {
3718  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3719 
3720  VMA_DEBUG_LOG("vmaCreateBuffer");
3721 
3722  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3723 
3724  // 1. Create VkBuffer.
3725  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
3726  if(res >= 0)
3727  {
3728  VkMappedMemoryRange mem = {};
3729 
3730  // 2. vkGetBufferMemoryRequirements.
3731  VkMemoryRequirements vkMemReq = {};
3732  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
3733 
3734  // 3. Allocate memory using allocator.
3735  res = allocator->AllocateMemory(
3736  vkMemReq,
3737  *pMemoryRequirements,
3738  VMA_SUBALLOCATION_TYPE_BUFFER,
3739  &mem,
3740  pMemoryTypeIndex);
3741  if(res >= 0)
3742  {
3743  if(pMemory != VMA_NULL)
3744  {
3745  *pMemory = mem;
3746  }
3747  // 3. Bind buffer with memory.
3748  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, mem.memory, mem.offset);
3749  if(res >= 0)
3750  {
3751  // All steps succeeded.
3752  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3753  allocator->m_BufferToMemoryMap.insert(VmaPair<VkBuffer, VkMappedMemoryRange>(*pBuffer, mem));
3754  return VK_SUCCESS;
3755  }
3756  allocator->FreeMemory(&mem);
3757  return res;
3758  }
3759  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
3760  return res;
3761  }
3762  return res;
3763 }
3764 
3765 void vmaDestroyBuffer(
3766  VmaAllocator allocator,
3767  VkBuffer buffer)
3768 {
3769  if(buffer != VK_NULL_HANDLE)
3770  {
3771  VMA_ASSERT(allocator);
3772 
3773  VMA_DEBUG_LOG("vmaDestroyBuffer");
3774 
3775  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3776 
3777  VkMappedMemoryRange mem = {};
3778  {
3779  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3780  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = allocator->m_BufferToMemoryMap.find(buffer);
3781  if(it == allocator->m_BufferToMemoryMap.end())
3782  {
3783  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3784  return;
3785  }
3786  mem = it->second;
3787  allocator->m_BufferToMemoryMap.erase(it);
3788  }
3789 
3790  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
3791 
3792  allocator->FreeMemory(&mem);
3793  }
3794 }
3795 
3796 VkResult vmaCreateImage(
3797  VmaAllocator allocator,
3798  const VkImageCreateInfo* pCreateInfo,
3799  const VmaMemoryRequirements* pMemoryRequirements,
3800  VkImage* pImage,
3801  VkMappedMemoryRange* pMemory,
3802  uint32_t* pMemoryTypeIndex)
3803 {
3804  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3805 
3806  VMA_DEBUG_LOG("vmaCreateImage");
3807 
3808  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3809 
3810  // 1. Create VkImage.
3811  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
3812  if(res >= 0)
3813  {
3814  VkMappedMemoryRange mem = {};
3815  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
3816  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
3817  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
3818 
3819  // 2. Allocate memory using allocator.
3820  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, &mem, pMemoryTypeIndex);
3821  if(res >= 0)
3822  {
3823  if(pMemory != VMA_NULL)
3824  *pMemory = mem;
3825  // 3. Bind image with memory.
3826  res = vkBindImageMemory(allocator->m_hDevice, *pImage, mem.memory, mem.offset);
3827  if(res >= 0)
3828  {
3829  // All steps succeeded.
3830  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3831  allocator->m_ImageToMemoryMap.insert(VmaPair<VkImage, VkMappedMemoryRange>(*pImage, mem));
3832  return VK_SUCCESS;
3833  }
3834  allocator->FreeMemory(&mem);
3835  return res;
3836  }
3837  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
3838  return res;
3839  }
3840  return res;
3841 }
3842 
3843 void vmaDestroyImage(
3844  VmaAllocator allocator,
3845  VkImage image)
3846 {
3847  if(image != VK_NULL_HANDLE)
3848  {
3849  VMA_ASSERT(allocator);
3850 
3851  VMA_DEBUG_LOG("vmaDestroyImage");
3852 
3853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3854 
3855  VkMappedMemoryRange mem = {};
3856  {
3857  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3858  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = allocator->m_ImageToMemoryMap.find(image);
3859  if(it == allocator->m_ImageToMemoryMap.end())
3860  {
3861  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3862  return;
3863  }
3864  mem = it->second;
3865  allocator->m_ImageToMemoryMap.erase(it);
3866  }
3867 
3868  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
3869 
3870  allocator->FreeMemory(&mem);
3871  }
3872 }
3873 
3874 #endif // #ifdef VMA_IMPLEMENTATION
3875 
3876 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
struct VmaMemoryRequirements VmaMemoryRequirements
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
155 #include <vulkan/vulkan.h>
156 
158 
162 VK_DEFINE_HANDLE(VmaAllocator)
163 
164 typedef struct VmaAllocatorCreateInfo
166 {
168 
169  VkPhysicalDevice physicalDevice;
171 
172  VkDevice device;
174 
177 
180 
181  const VkAllocationCallbacks* pAllocationCallbacks;
183 
185 VkResult vmaCreateAllocator(
186  const VmaAllocatorCreateInfo* pCreateInfo,
187  VmaAllocator* pAllocator);
188 
191  VmaAllocator allocator);
192 
198  VmaAllocator allocator,
199  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
200 
206  VmaAllocator allocator,
207  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
208 
216  VmaAllocator allocator,
217  uint32_t memoryTypeIndex,
218  VkMemoryPropertyFlags* pFlags);
219 
220 typedef struct VmaStatInfo
221 {
222  uint32_t AllocationCount;
225  VkDeviceSize UsedBytes;
226  VkDeviceSize UnusedBytes;
227  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
228  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
229 } VmaStatInfo;
230 
232 struct VmaStats
233 {
234  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
235  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
237 };
238 
240 void vmaCalculateStats(
241  VmaAllocator allocator,
242  VmaStats* pStats);
243 
244 #define VMA_STATS_STRING_ENABLED 1
245 
246 #if VMA_STATS_STRING_ENABLED
247 
249 
252  VmaAllocator allocator,
253  char** ppStatsString,
254  VkBool32 detailedMap);
255 
256 void vmaFreeStatsString(
257  VmaAllocator allocator,
258  char* pStatsString);
259 
260 #endif // #if VMA_STATS_STRING_ENABLED
261 
264 
269 typedef enum VmaMemoryUsage
270 {
283 
284 typedef struct VmaMemoryRequirements
285 {
294  VkBool32 ownMemory;
303  VkMemoryPropertyFlags requiredFlags;
308  VkMemoryPropertyFlags preferredFlags;
315  VkBool32 neverAllocate;
317 
332 VkResult vmaFindMemoryTypeIndex(
333  VmaAllocator allocator,
334  uint32_t memoryTypeBits,
335  const VmaMemoryRequirements* pMemoryRequirements,
336  uint32_t* pMemoryTypeIndex);
337 
340 
357 VkResult vmaAllocateMemory(
358  VmaAllocator allocator,
359  const VkMemoryRequirements* pVkMemoryRequirements,
360  const VmaMemoryRequirements* pVmaMemoryRequirements,
361  VkMappedMemoryRange* pMemory,
362  uint32_t* pMemoryTypeIndex);
363 
372  VmaAllocator allocator,
373  VkBuffer buffer,
374  const VmaMemoryRequirements* pMemoryRequirements,
375  VkMappedMemoryRange* pMemory,
376  uint32_t* pMemoryTypeIndex);
377 
380  VmaAllocator allocator,
381  VkImage image,
382  const VmaMemoryRequirements* pMemoryRequirements,
383  VkMappedMemoryRange* pMemory,
384  uint32_t* pMemoryTypeIndex);
385 
387 void vmaFreeMemory(
388  VmaAllocator allocator,
389  const VkMappedMemoryRange* pMemory);
390 
396 VkResult vmaMapMemory(
397  VmaAllocator allocator,
398  const VkMappedMemoryRange* pMemory,
399  void** ppData);
400 
401 void vmaUnmapMemory(
402  VmaAllocator allocator,
403  const VkMappedMemoryRange* pMemory);
404 
407 
429 VkResult vmaCreateBuffer(
430  VmaAllocator allocator,
431  const VkBufferCreateInfo* pCreateInfo,
432  const VmaMemoryRequirements* pMemoryRequirements,
433  VkBuffer* pBuffer,
434  VkMappedMemoryRange* pMemory,
435  uint32_t* pMemoryTypeIndex);
436 
437 void vmaDestroyBuffer(
438  VmaAllocator allocator,
439  VkBuffer buffer);
440 
442 VkResult vmaCreateImage(
443  VmaAllocator allocator,
444  const VkImageCreateInfo* pCreateInfo,
445  const VmaMemoryRequirements* pMemoryRequirements,
446  VkImage* pImage,
447  VkMappedMemoryRange* pMemory,
448  uint32_t* pMemoryTypeIndex);
449 
450 void vmaDestroyImage(
451  VmaAllocator allocator,
452  VkImage image);
453 
456 #ifdef VMA_IMPLEMENTATION
457 
458 #include <cstdlib>
459 
460 /*******************************************************************************
461 CONFIGURATION
462 
463 Change these definitions depending on your environment.
464 */
465 
466 #define VMA_USE_STL_CONTAINERS 0
467 
468 /* Set this macro to 1 to make the library including and using STL containers:
469 std::pair, std::vector, std::list, std::unordered_map.
470 
471 Set it to 0 or undefined to make the library using its own implementation of
472 the containers.
473 */
474 #if VMA_USE_STL_CONTAINERS
475 #define VMA_USE_STL_VECTOR 1
476 #define VMA_USE_STL_UNORDERED_MAP 1
477 #define VMA_USE_STL_LIST 1
478 #endif
479 
480 #if VMA_USE_STL_VECTOR
481 #include <vector>
482 #endif
483 
484 #if VMA_USE_STL_UNORDERED_MAP
485 #include <unordered_map>
486 #endif
487 
488 #if VMA_USE_STL_LIST
489 #include <list>
490 #endif
491 
492 /*
493 Following headers are used in this CONFIGURATION section only, so feel free to
494 remove them if not needed.
495 */
496 #include <cassert> // for assert
497 #include <algorithm> // for min, max
498 #include <mutex> // for std::mutex
499 
500 #ifdef _DEBUG
501  // Normal assert to check for programmer's errors, especially in Debug configuration.
502  #define VMA_ASSERT(expr) assert(expr)
503  // Assert that will be called very often, like inside data structures e.g. operator[].
504  // Making it non-empty can make program slow.
505  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
506 #else
507  #define VMA_ASSERT(expr)
508  #define VMA_HEAVY_ASSERT(expr)
509 #endif
510 
511 // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
512 #define VMA_NULL nullptr
513 
514 #define VMA_ALIGN_OF(type) (__alignof(type))
515 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
516 #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
517 
518 #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
519 #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
520 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
521 
522 #define VMA_DEBUG_LOG(format, ...)
523 /*
524 #define VMA_DEBUG_LOG(format, ...) do { \
525  printf(format, __VA_ARGS__); \
526  printf("\n"); \
527 } while(false)
528 */
529 
530 #if VMA_STATS_STRING_ENABLED
531 
532 static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
533 {
534  _ultoa_s(num, outStr, strLen, 10);
535 }
536 static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
537 {
538  _ui64toa_s(num, outStr, strLen, 10);
539 }
540 
541 #endif // #if VMA_STATS_STRING_ENABLED
542 
543 class VmaMutex
544 {
545 public:
546  VmaMutex() { }
547  ~VmaMutex() { }
548  void Lock() { m_Mutex.lock(); }
549  void Unlock() { m_Mutex.unlock(); }
550 private:
551  std::mutex m_Mutex;
552 };
553 
554 /*
555 Main parameter for function assessing how good is a free suballocation for a new
556 allocation request.
557 
558 - Set to true to use Best-Fit algorithm - prefer smaller blocks, as close to the
559  size of requested allocations as possible.
560 - Set to false to use Worst-Fit algorithm - prefer larger blocks, as large as
561  possible.
562 
563 Experiments in special testing environment showed that Best-Fit algorithm is
564 better.
565 */
566 static const bool VMA_BEST_FIT = true;
567 
568 /*
569 Every object will have its own allocation.
570 Enable for debugging purposes only.
571 */
572 static const bool VMA_DEBUG_ALWAYS_OWN_MEMORY = false;
573 
574 /*
575 Minimum alignment of all suballocations, in bytes.
576 Set to more than 1 for debugging purposes only. Must be power of two.
577 */
578 static const VkDeviceSize VMA_DEBUG_ALIGNMENT = 1;
579 
580 /*
581 Minimum margin between suballocations, in bytes.
582 Set nonzero for debugging purposes only.
583 */
584 static const VkDeviceSize VMA_DEBUG_MARGIN = 0;
585 
586 /*
587 Set this to 1 for debugging purposes only, to enable single mutex protecting all
588 entry calls to the library. Can be useful for debugging multithreading issues.
589 */
590 #define VMA_DEBUG_GLOBAL_MUTEX 0
591 
592 /*
593 Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
594 Set to more than 1 for debugging purposes only. Must be power of two.
595 */
596 static const VkDeviceSize VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY = 1;
597 
598 // Maximum size of a memory heap in Vulkan to consider it "small".
599 static const VkDeviceSize VMA_SMALL_HEAP_MAX_SIZE = 512 * 1024 * 1024;
600 // Default size of a block allocated as single VkDeviceMemory from a "large" heap.
601 static const VkDeviceSize VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE = 256 * 1024 * 1024;
602 // Default size of a block allocated as single VkDeviceMemory from a "small" heap.
603 static const VkDeviceSize VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE = 64 * 1024 * 1024;
604 
605 /*******************************************************************************
606 END OF CONFIGURATION
607 */
608 
609 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
610  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
611 
612 // Returns number of bits set to 1 in (v).
613 static inline uint32_t CountBitsSet(uint32_t v)
614 {
615  uint32_t c = v - ((v >> 1) & 0x55555555);
616  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
617  c = ((c >> 4) + c) & 0x0F0F0F0F;
618  c = ((c >> 8) + c) & 0x00FF00FF;
619  c = ((c >> 16) + c) & 0x0000FFFF;
620  return c;
621 }
622 
623 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
624 // Use types like uint32_t, uint64_t as T.
625 template <typename T>
626 static inline T VmaAlignUp(T val, T align)
627 {
628  return (val + align - 1) / align * align;
629 }
630 
631 // Division with mathematical rounding to nearest number.
632 template <typename T>
633 inline T VmaRoundDiv(T x, T y)
634 {
635  return (x + (y / (T)2)) / y;
636 }
637 /*
638 Returns true if two memory blocks occupy overlapping pages.
639 ResourceA must be in less memory offset than ResourceB.
640 
641 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
642 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
643 */
644 static inline bool VmaBlocksOnSamePage(
645  VkDeviceSize resourceAOffset,
646  VkDeviceSize resourceASize,
647  VkDeviceSize resourceBOffset,
648  VkDeviceSize pageSize)
649 {
650  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
651  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
652  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
653  VkDeviceSize resourceBStart = resourceBOffset;
654  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
655  return resourceAEndPage == resourceBStartPage;
656 }
657 
658 enum VmaSuballocationType
659 {
660  VMA_SUBALLOCATION_TYPE_FREE = 0,
661  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
662  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
663  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
664  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
665  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
666  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
667 };
668 
669 /*
670 Returns true if given suballocation types could conflict and must respect
671 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
672 or linear image and another one is optimal image. If type is unknown, behave
673 conservatively.
674 */
675 static inline bool VmaIsBufferImageGranularityConflict(
676  VmaSuballocationType suballocType1,
677  VmaSuballocationType suballocType2)
678 {
679  if(suballocType1 > suballocType2)
680  VMA_SWAP(suballocType1, suballocType2);
681 
682  switch(suballocType1)
683  {
684  case VMA_SUBALLOCATION_TYPE_FREE:
685  return false;
686  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
687  return true;
688  case VMA_SUBALLOCATION_TYPE_BUFFER:
689  return
690  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
691  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
692  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
693  return
694  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
695  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
697  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
698  return
699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
700  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
701  return false;
702  default:
703  VMA_ASSERT(0);
704  return true;
705  }
706 }
707 
708 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
709 struct VmaMutexLock
710 {
711 public:
712  VmaMutexLock(VmaMutex& mutex) : m_Mutex(mutex) { mutex.Lock(); }
713  ~VmaMutexLock() { m_Mutex.Unlock(); }
714 
715 private:
716  VmaMutex& m_Mutex;
717 };
718 
719 #if VMA_DEBUG_GLOBAL_MUTEX
720  static VmaMutex gDebugGlobalMutex;
721  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
722 #else
723  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
724 #endif
725 
726 // Minimum size of a free suballocation to register it in the free suballocation collection.
727 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
728 
729 /*
730 Performs binary search and returns iterator to first element that is greater or
731 equal to (key), according to comparison (cmp).
732 
733 Cmp should return true if first argument is less than second argument.
734 
735 Returned value is the found element, if present in the collection or place where
736 new element with value (key) should be inserted.
737 */
738 template <typename IterT, typename KeyT, typename CmpT>
739 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
740 {
741  size_t down = 0, up = (end - beg);
742  while(down < up)
743  {
744  const size_t mid = (down + up) / 2;
745  if(cmp(*(beg+mid), key))
746  down = mid + 1;
747  else
748  up = mid;
749  }
750  return beg + down;
751 }
752 
754 // Memory allocation
755 
756 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
757 {
758  if((pAllocationCallbacks != VMA_NULL) &&
759  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
760  {
761  return (*pAllocationCallbacks->pfnAllocation)(
762  pAllocationCallbacks->pUserData,
763  size,
764  alignment,
765  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
766  }
767  else
768  {
769  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
770  }
771 }
772 
773 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
774 {
775  if((pAllocationCallbacks != VMA_NULL) &&
776  (pAllocationCallbacks->pfnFree != VMA_NULL))
777  {
778  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
779  }
780  else
781  {
782  VMA_SYSTEM_FREE(ptr);
783  }
784 }
785 
786 template<typename T>
787 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
788 {
789  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
790 }
791 
792 template<typename T>
793 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
794 {
795  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
796 }
797 
798 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
799 
800 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
801 
802 template<typename T>
803 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
804 {
805  ptr->~T();
806  VmaFree(pAllocationCallbacks, ptr);
807 }
808 
809 template<typename T>
810 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
811 {
812  if(ptr != VMA_NULL)
813  {
814  for(size_t i = count; i--; )
815  ptr[i].~T();
816  VmaFree(pAllocationCallbacks, ptr);
817  }
818 }
819 
820 // STL-compatible allocator.
821 template<typename T>
822 class VmaStlAllocator
823 {
824 public:
825  const VkAllocationCallbacks* const m_pCallbacks;
826  typedef T value_type;
827 
828  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
829  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
830 
831  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
832  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
833 
834  template<typename U>
835  bool operator==(const VmaStlAllocator<U>& rhs) const
836  {
837  return m_pCallbacks == rhs.m_pCallbacks;
838  }
839  template<typename U>
840  bool operator!=(const VmaStlAllocator<U>& rhs) const
841  {
842  return m_pCallbacks != rhs.m_pCallbacks;
843  }
844 };
845 
846 #if VMA_USE_STL_VECTOR
847 
848 #define VmaVector std::vector
849 
850 template<typename T, typename allocatorT>
851 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
852 {
853  vec.insert(vec.begin() + index, item);
854 }
855 
856 template<typename T, typename allocatorT>
857 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
858 {
859  vec.erase(vec.begin() + index);
860 }
861 
862 #else // #if VMA_USE_STL_VECTOR
863 
864 /* Class with interface compatible with subset of std::vector.
865 T must be POD because constructors and destructors are not called and memcpy is
866 used for these objects. */
867 template<typename T, typename AllocatorT>
868 class VmaVector
869 {
870 public:
871  VmaVector(AllocatorT& allocator) :
872  m_Allocator(allocator),
873  m_pArray(VMA_NULL),
874  m_Count(0),
875  m_Capacity(0)
876  {
877  }
878 
879  VmaVector(size_t count, AllocatorT& allocator) :
880  m_Allocator(allocator),
881  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
882  m_Count(count),
883  m_Capacity(count)
884  {
885  }
886 
887  VmaVector(const VmaVector<T, AllocatorT>& src) :
888  m_Allocator(src.m_Allocator),
889  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, src.m_Count) : VMA_NULL),
890  m_Count(src.m_Count),
891  m_Capacity(src.m_Count)
892  {
893  if(m_Count != 0)
894  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
895  }
896 
897  ~VmaVector()
898  {
899  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
900  }
901 
902  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
903  {
904  if(&rhs != this)
905  {
906  Resize(rhs.m_Count);
907  if(m_Count != 0)
908  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
909  }
910  return *this;
911  }
912 
913  bool empty() const { return m_Count == 0; }
914  size_t size() const { return m_Count; }
915  T* data() { return m_pArray; }
916  const T* data() const { return m_pArray; }
917 
918  T& operator[](size_t index)
919  {
920  VMA_HEAVY_ASSERT(index < m_Count);
921  return m_pArray[index];
922  }
923  const T& operator[](size_t index) const
924  {
925  VMA_HEAVY_ASSERT(index < m_Count);
926  return m_pArray[index];
927  }
928 
929  T& front()
930  {
931  VMA_HEAVY_ASSERT(m_Count > 0);
932  return m_pArray[0];
933  }
934  const T& front() const
935  {
936  VMA_HEAVY_ASSERT(m_Count > 0);
937  return m_pArray[0];
938  }
939  T& back()
940  {
941  VMA_HEAVY_ASSERT(m_Count > 0);
942  return m_pArray[m_Count - 1];
943  }
944  const T& back() const
945  {
946  VMA_HEAVY_ASSERT(m_Count > 0);
947  return m_pArray[m_Count - 1];
948  }
949 
950  void reserve(size_t newCapacity, bool freeMemory = false)
951  {
952  newCapacity = VMA_MAX(newCapacity, m_Count);
953 
954  if((newCapacity < m_Capacity) && !freeMemory)
955  newCapacity = m_Capacity;
956 
957  if(newCapacity != m_Capacity)
958  {
959  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_hAllocator, newCapacity) : VMA_NULL;
960  if(m_Count != 0)
961  memcpy(newArray, m_pArray, m_Count * sizeof(T));
962  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
963  m_Capacity = newCapacity;
964  m_pArray = newArray;
965  }
966  }
967 
968  void resize(size_t newCount, bool freeMemory = false)
969  {
970  size_t newCapacity = m_Capacity;
971  if(newCount > m_Capacity)
972  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
973  else if(freeMemory)
974  newCapacity = newCount;
975 
976  if(newCapacity != m_Capacity)
977  {
978  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
979  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
980  if(elementsToCopy != 0)
981  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
982  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
983  m_Capacity = newCapacity;
984  m_pArray = newArray;
985  }
986 
987  m_Count = newCount;
988  }
989 
990  void clear(bool freeMemory = false)
991  {
992  resize(0, freeMemory);
993  }
994 
995  void insert(size_t index, const T& src)
996  {
997  VMA_HEAVY_ASSERT(index <= m_Count);
998  const size_t oldCount = size();
999  resize(oldCount + 1);
1000  if(index < oldCount)
1001  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1002  m_pArray[index] = src;
1003  }
1004 
1005  void remove(size_t index)
1006  {
1007  VMA_HEAVY_ASSERT(index < m_Count);
1008  const size_t oldCount = size();
1009  if(index < oldCount - 1)
1010  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1011  resize(oldCount - 1);
1012  }
1013 
1014  void push_back(const T& src)
1015  {
1016  const size_t newIndex = size();
1017  resize(newIndex + 1);
1018  m_pArray[newIndex] = src;
1019  }
1020 
1021  void pop_back()
1022  {
1023  VMA_HEAVY_ASSERT(m_Count > 0);
1024  resize(size() - 1);
1025  }
1026 
1027  void push_front(const T& src)
1028  {
1029  insert(0, src);
1030  }
1031 
1032  void pop_front()
1033  {
1034  VMA_HEAVY_ASSERT(m_Count > 0);
1035  remove(0);
1036  }
1037 
1038  typedef T* iterator;
1039 
1040  iterator begin() { return m_pArray; }
1041  iterator end() { return m_pArray + m_Count; }
1042 
1043 private:
1044  AllocatorT m_Allocator;
1045  T* m_pArray;
1046  size_t m_Count;
1047  size_t m_Capacity;
1048 };
1049 
1050 template<typename T, typename allocatorT>
1051 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1052 {
1053  vec.insert(index, item);
1054 }
1055 
1056 template<typename T, typename allocatorT>
1057 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1058 {
1059  vec.remove(index);
1060 }
1061 
1062 #endif // #if VMA_USE_STL_VECTOR
1063 
1065 // class VmaPoolAllocator
1066 
1067 /*
1068 Allocator for objects of type T using a list of arrays (pools) to speed up
1069 allocation. Number of elements that can be allocated is not bounded because
1070 allocator can create multiple blocks.
1071 */
1072 template<typename T>
1073 class VmaPoolAllocator
1074 {
1075 public:
1076  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1077  ~VmaPoolAllocator();
1078  void Clear();
1079  T* Alloc();
1080  void Free(T* ptr);
1081 
1082 private:
1083  union Item
1084  {
1085  uint32_t NextFreeIndex;
1086  T Value;
1087  };
1088 
1089  struct ItemBlock
1090  {
1091  Item* pItems;
1092  uint32_t FirstFreeIndex;
1093  };
1094 
1095  const VkAllocationCallbacks* m_pAllocationCallbacks;
1096  size_t m_ItemsPerBlock;
1097  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1098 
1099  ItemBlock& CreateNewBlock();
1100 };
1101 
1102 template<typename T>
1103 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1104  m_pAllocationCallbacks(pAllocationCallbacks),
1105  m_ItemsPerBlock(itemsPerBlock),
1106  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1107 {
1108  VMA_ASSERT(itemsPerBlock > 0);
1109 }
1110 
1111 template<typename T>
1112 VmaPoolAllocator<T>::~VmaPoolAllocator()
1113 {
1114  Clear();
1115 }
1116 
1117 template<typename T>
1118 void VmaPoolAllocator<T>::Clear()
1119 {
1120  for(size_t i = m_ItemBlocks.size(); i--; )
1121  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1122  m_ItemBlocks.clear();
1123 }
1124 
1125 template<typename T>
1126 T* VmaPoolAllocator<T>::Alloc()
1127 {
1128  for(size_t i = m_ItemBlocks.size(); i--; )
1129  {
1130  ItemBlock& block = m_ItemBlocks[i];
1131  // This block has some free items: Use first one.
1132  if(block.FirstFreeIndex != UINT_MAX)
1133  {
1134  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1135  block.FirstFreeIndex = pItem->NextFreeIndex;
1136  return &pItem->Value;
1137  }
1138  }
1139 
1140  // No block has free item: Create new one and use it.
1141  ItemBlock& newBlock = CreateNewBlock();
1142  Item* const pItem = &newBlock.pItems[0];
1143  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1144  return &pItem->Value;
1145 }
1146 
1147 template<typename T>
1148 void VmaPoolAllocator<T>::Free(T* ptr)
1149 {
1150  // Search all memory blocks to find ptr.
1151  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1152  {
1153  ItemBlock& block = m_ItemBlocks[i];
1154 
1155  // Casting to union.
1156  Item* pItemPtr;
1157  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1158 
1159  // Check if pItemPtr is in address range of this block.
1160  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1161  {
1162  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1163  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1164  block.FirstFreeIndex = index;
1165  return;
1166  }
1167  }
1168  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1169 }
1170 
1171 template<typename T>
1172 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1173 {
1174  ItemBlock newBlock = {
1175  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1176 
1177  m_ItemBlocks.push_back(newBlock);
1178 
1179  // Setup singly-linked list of all free items in this block.
1180  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1181  newBlock.pItems[i].NextFreeIndex = i + 1;
1182  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT_MAX;
1183  return m_ItemBlocks.back();
1184 }
1185 
1187 // class VmaRawList, VmaList
1188 
1189 #if VMA_USE_STL_LIST
1190 
1191 #define VmaList std::list
1192 
1193 #else // #if VMA_USE_STL_LIST
1194 
1195 template<typename T>
1196 struct VmaListItem
1197 {
1198  VmaListItem* pPrev;
1199  VmaListItem* pNext;
1200  T Value;
1201 };
1202 
1203 // Doubly linked list.
1204 template<typename T>
1205 class VmaRawList
1206 {
1207 public:
1208  typedef VmaListItem<T> ItemType;
1209 
1210  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1211  ~VmaRawList();
1212  void Clear();
1213 
1214  size_t GetCount() const { return m_Count; }
1215  bool IsEmpty() const { return m_Count == 0; }
1216 
1217  ItemType* Front() { return m_pFront; }
1218  const ItemType* Front() const { return m_pFront; }
1219  ItemType* Back() { return m_pBack; }
1220  const ItemType* Back() const { return m_pBack; }
1221 
1222  ItemType* PushBack();
1223  ItemType* PushFront();
1224  ItemType* PushBack(const T& value);
1225  ItemType* PushFront(const T& value);
1226  void PopBack();
1227  void PopFront();
1228 
1229  // Item can be null - it means PushBack.
1230  ItemType* InsertBefore(ItemType* pItem);
1231  // Item can be null - it means PushFront.
1232  ItemType* InsertAfter(ItemType* pItem);
1233 
1234  ItemType* InsertBefore(ItemType* pItem, const T& value);
1235  ItemType* InsertAfter(ItemType* pItem, const T& value);
1236 
1237  void Remove(ItemType* pItem);
1238 
1239 private:
1240  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1241  VmaPoolAllocator<ItemType> m_ItemAllocator;
1242  ItemType* m_pFront;
1243  ItemType* m_pBack;
1244  size_t m_Count;
1245 
1246  // Declared not defined, to block copy constructor and assignment operator.
1247  VmaRawList(const VmaRawList<T>& src);
1248  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1249 };
1250 
1251 template<typename T>
1252 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1253  m_pAllocationCallbacks(pAllocationCallbacks),
1254  m_ItemAllocator(pAllocationCallbacks, 128),
1255  m_pFront(VMA_NULL),
1256  m_pBack(VMA_NULL),
1257  m_Count(0)
1258 {
1259 }
1260 
1261 template<typename T>
1262 VmaRawList<T>::~VmaRawList()
1263 {
1264  // Intentionally not calling Clear, because that would be unnecessary
1265  // computations to return all items to m_ItemAllocator as free.
1266 }
1267 
1268 template<typename T>
1269 void VmaRawList<T>::Clear()
1270 {
1271  if(IsEmpty() == false)
1272  {
1273  ItemType* pItem = m_pBack;
1274  while(pItem != VMA_NULL)
1275  {
1276  ItemType* const pPrevItem = pItem->pPrev;
1277  m_ItemAllocator.Free(pItem);
1278  pItem = pPrevItem;
1279  }
1280  m_pFront = VMA_NULL;
1281  m_pBack = VMA_NULL;
1282  m_Count = 0;
1283  }
1284 }
1285 
1286 template<typename T>
1287 VmaListItem<T>* VmaRawList<T>::PushBack()
1288 {
1289  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1290  pNewItem->pNext = VMA_NULL;
1291  if(IsEmpty())
1292  {
1293  pNewItem->pPrev = VMA_NULL;
1294  m_pFront = pNewItem;
1295  m_pBack = pNewItem;
1296  m_Count = 1;
1297  }
1298  else
1299  {
1300  pNewItem->pPrev = m_pBack;
1301  m_pBack->pNext = pNewItem;
1302  m_pBack = pNewItem;
1303  ++m_Count;
1304  }
1305  return pNewItem;
1306 }
1307 
1308 template<typename T>
1309 VmaListItem<T>* VmaRawList<T>::PushFront()
1310 {
1311  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1312  pNewItem->pPrev = VMA_NULL;
1313  if(IsEmpty())
1314  {
1315  pNewItem->pNext = VMA_NULL;
1316  m_pFront = pNewItem;
1317  m_pBack = pNewItem;
1318  m_Count = 1;
1319  }
1320  else
1321  {
1322  pNewItem->pNext = m_pFront;
1323  m_pFront->pPrev = pNewItem;
1324  m_pFront = pNewItem;
1325  ++m_Count;
1326  }
1327  return pNewItem;
1328 }
1329 
1330 template<typename T>
1331 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1332 {
1333  ItemType* const pNewItem = PushBack();
1334  pNewItem->Value = value;
1335  return pNewItem;
1336 }
1337 
1338 template<typename T>
1339 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1340 {
1341  ItemType* const pNewItem = PushFront();
1342  pNewItem->Value = value;
1343  return newItem;
1344 }
1345 
1346 template<typename T>
1347 void VmaRawList<T>::PopBack()
1348 {
1349  VMA_HEAVY_ASSERT(m_Count > 0);
1350  ItemType* const pBackItem = m_pBack;
1351  ItemType* const pPrevItem = pBackItem->pPrev;
1352  if(pPrevItem != VMA_NULL)
1353  pPrevItem->pNext = VMA_NULL;
1354  m_pBack = pPrevItem;
1355  m_ItemAllocator.Free(pBackItem);
1356  --m_Count;
1357 }
1358 
1359 template<typename T>
1360 void VmaRawList<T>::PopFront()
1361 {
1362  VMA_HEAVY_ASSERT(m_Count > 0);
1363  ItemType* const pFrontItem = m_pFront;
1364  ItemType* const pNextItem = pFrontItem->pNext;
1365  if(pNextItem != VMA_NULL)
1366  pNextItem->pPrev = VMA_NULL;
1367  m_pFront = pNextItem;
1368  m_ItemAllocator.Free(pFrontItem);
1369  --m_Count;
1370 }
1371 
1372 template<typename T>
1373 void VmaRawList<T>::Remove(ItemType* pItem)
1374 {
1375  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1376  VMA_HEAVY_ASSERT(m_Count > 0);
1377 
1378  if(pItem->pPrev != VMA_NULL)
1379  pItem->pPrev->pNext = pItem->pNext;
1380  else
1381  {
1382  VMA_HEAVY_ASSERT(m_pFront == pItem);
1383  m_pFront = pItem->pNext;
1384  }
1385 
1386  if(pItem->pNext != VMA_NULL)
1387  pItem->pNext->pPrev = pItem->pPrev;
1388  else
1389  {
1390  VMA_HEAVY_ASSERT(m_pBack == pItem);
1391  m_pBack = pItem->pPrev;
1392  }
1393 
1394  m_ItemAllocator.Free(pItem);
1395  --m_Count;
1396 }
1397 
1398 template<typename T>
1399 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1400 {
1401  if(pItem != VMA_NULL)
1402  {
1403  ItemType* const prevItem = pItem->pPrev;
1404  ItemType* const newItem = m_ItemAllocator.Alloc();
1405  newItem->pPrev = prevItem;
1406  newItem->pNext = pItem;
1407  pItem->pPrev = newItem;
1408  if(prevItem != VMA_NULL)
1409  prevItem->pNext = newItem;
1410  else
1411  {
1412  VMA_HEAVY_ASSERT(m_pFront = pItem);
1413  m_pFront = newItem;
1414  }
1415  ++m_Count;
1416  return newItem;
1417  }
1418  else
1419  return PushBack();
1420 }
1421 
1422 template<typename T>
1423 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1424 {
1425  if(pItem != VMA_NULL)
1426  {
1427  ItemType* const nextItem = pItem->pNext;
1428  ItemType* const newItem = m_ItemAllocator.Alloc();
1429  newItem->pNext = nextItem;
1430  newItem->pPrev = pItem;
1431  pItem->pNext = newItem;
1432  if(nextItem != VMA_NULL)
1433  nextItem->pPrev = newItem;
1434  else
1435  {
1436  VMA_HEAVY_ASSERT(m_pBack = pItem);
1437  m_pBack = newItem;
1438  }
1439  ++m_Count;
1440  return newItem;
1441  }
1442  else
1443  return PushFront();
1444 }
1445 
1446 template<typename T>
1447 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1448 {
1449  ItemType* const newItem = InsertBefore(pItem);
1450  newItem->Value = value;
1451  return newItem;
1452 }
1453 
1454 template<typename T>
1455 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1456 {
1457  ItemType* const newItem = InsertAfter(pItem);
1458  newItem->Value = value;
1459  return newItem;
1460 }
1461 
1462 template<typename T, typename AllocatorT>
1463 class VmaList
1464 {
1465 public:
1466  class iterator
1467  {
1468  public:
1469  iterator() :
1470  m_pList(VMA_NULL),
1471  m_pItem(VMA_NULL)
1472  {
1473  }
1474 
1475  T& operator*() const
1476  {
1477  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1478  return m_pItem->Value;
1479  }
1480  T* operator->() const
1481  {
1482  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1483  return &m_pItem->Value;
1484  }
1485 
1486  iterator& operator++()
1487  {
1488  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1489  m_pItem = m_pItem->pNext;
1490  return *this;
1491  }
1492  iterator& operator--()
1493  {
1494  if(m_pItem != VMA_NULL)
1495  m_pItem = m_pItem->pPrev;
1496  else
1497  {
1498  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1499  m_pItem = m_pList->Back();
1500  }
1501  return *this;
1502  }
1503 
1504  iterator operator++(int)
1505  {
1506  iterator result = *this;
1507  ++*this;
1508  return result;
1509  }
1510  iterator operator--(int)
1511  {
1512  iterator result = *this;
1513  --*this;
1514  return result;
1515  }
1516 
1517  bool operator==(const iterator& rhs) const
1518  {
1519  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1520  return m_pItem == rhs.m_pItem;
1521  }
1522  bool operator!=(const iterator& rhs) const
1523  {
1524  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1525  return m_pItem != rhs.m_pItem;
1526  }
1527 
1528  private:
1529  VmaRawList<T>* m_pList;
1530  VmaListItem<T>* m_pItem;
1531 
1532  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1533  m_pList(pList),
1534  m_pItem(pItem)
1535  {
1536  }
1537 
1538  friend class VmaList<T, AllocatorT>;
1539  friend class VmaList<T, AllocatorT>:: const_iterator;
1540  };
1541 
1542  class const_iterator
1543  {
1544  public:
1545  const_iterator() :
1546  m_pList(VMA_NULL),
1547  m_pItem(VMA_NULL)
1548  {
1549  }
1550 
1551  const_iterator(const iterator& src) :
1552  m_pList(src.m_pList),
1553  m_pItem(src.m_pItem)
1554  {
1555  }
1556 
1557  const T& operator*() const
1558  {
1559  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1560  return m_pItem->Value;
1561  }
1562  const T* operator->() const
1563  {
1564  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1565  return &m_pItem->Value;
1566  }
1567 
1568  const_iterator& operator++()
1569  {
1570  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1571  m_pItem = m_pItem->pNext;
1572  return *this;
1573  }
1574  const_iterator& operator--()
1575  {
1576  if(m_pItem != VMA_NULL)
1577  m_pItem = m_pItem->pPrev;
1578  else
1579  {
1580  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
1581  m_pItem = m_pList->Back();
1582  }
1583  return *this;
1584  }
1585 
1586  const_iterator operator++(int)
1587  {
1588  const_iterator result = *this;
1589  ++*this;
1590  return result;
1591  }
1592  const_iterator operator--(int)
1593  {
1594  const_iterator result = *this;
1595  --*this;
1596  return result;
1597  }
1598 
1599  bool operator==(const const_iterator& rhs) const
1600  {
1601  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1602  return m_pItem == rhs.m_pItem;
1603  }
1604  bool operator!=(const const_iterator& rhs) const
1605  {
1606  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1607  return m_pItem != rhs.m_pItem;
1608  }
1609 
1610  private:
1611  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
1612  m_pList(pList),
1613  m_pItem(pItem)
1614  {
1615  }
1616 
1617  const VmaRawList<T>* m_pList;
1618  const VmaListItem<T>* m_pItem;
1619 
1620  friend class VmaList<T, AllocatorT>;
1621  };
1622 
1623  VmaList(AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1624 
1625  bool empty() const { return m_RawList.IsEmpty(); }
1626  size_t size() const { return m_RawList.GetCount(); }
1627 
1628  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
1629  iterator end() { return iterator(&m_RawList, VMA_NULL); }
1630 
1631  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
1632  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
1633 
1634  void clear() { m_RawList.Clear(); }
1635  void push_back(const T& value) { m_RawList.PushBack(value); }
1636  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
1637  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
1638 
1639 private:
1640  VmaRawList<T> m_RawList;
1641 };
1642 
1643 #endif // #if VMA_USE_STL_LIST
1644 
1646 // class VmaMap
1647 
1648 #if VMA_USE_STL_UNORDERED_MAP
1649 
1650 #define VmaPair std::pair
1651 
1652 #define VMA_MAP_TYPE(KeyT, ValueT) \
1653  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
1654 
1655 #else // #if VMA_USE_STL_UNORDERED_MAP
1656 
1657 template<typename T1, typename T2>
1658 struct VmaPair
1659 {
1660  T1 first;
1661  T2 second;
1662 
1663  VmaPair() : first(), second() { }
1664  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
1665 };
1666 
1667 /* Class compatible with subset of interface of std::unordered_map.
1668 KeyT, ValueT must be POD because they will be stored in VmaVector.
1669 */
1670 template<typename KeyT, typename ValueT>
1671 class VmaMap
1672 {
1673 public:
1674  typedef VmaPair<KeyT, ValueT> PairType;
1675  typedef PairType* iterator;
1676 
1677  VmaMap(VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1678 
1679  iterator begin() { return m_Vector.begin(); }
1680  iterator end() { return m_Vector.end(); }
1681 
1682  void insert(const PairType& pair);
1683  iterator find(const KeyT& key);
1684  void erase(iterator it);
1685 
1686 private:
1687  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
1688 };
1689 
1690 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
1691 
1692 template<typename FirstT, typename SecondT>
1693 struct VmaPairFirstLess
1694 {
1695  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
1696  {
1697  return lhs.first < rhs.first;
1698  }
1699  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
1700  {
1701  return lhs.first < rhsFirst;
1702  }
1703 };
1704 
1705 template<typename KeyT, typename ValueT>
1706 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
1707 {
1708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
1709  m_Vector.data(),
1710  m_Vector.data() + m_Vector.size(),
1711  pair,
1712  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
1713  VectorInsert(m_Vector, indexToInsert, pair);
1714 }
1715 
1716 template<typename KeyT, typename ValueT>
1717 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
1718 {
1719  PairType* it = VmaBinaryFindFirstNotLess(
1720  m_Vector.data(),
1721  m_Vector.data() + m_Vector.size(),
1722  key,
1723  VmaPairFirstLess<KeyT, ValueT>());
1724  if((it != m_Vector.end()) && (it->first == key))
1725  return it;
1726  else
1727  return m_Vector.end();
1728 }
1729 
1730 template<typename KeyT, typename ValueT>
1731 void VmaMap<KeyT, ValueT>::erase(iterator it)
1732 {
1733  VectorRemove(m_Vector, it - m_Vector.begin());
1734 }
1735 
1736 #endif // #if VMA_USE_STL_UNORDERED_MAP
1737 
1738 /*
1739 Represents a region of VmaAllocation that is either assigned and returned as
1740 allocated memory block or free.
1741 */
1742 struct VmaSuballocation
1743 {
1744  VkDeviceSize offset;
1745  VkDeviceSize size;
1746  VmaSuballocationType type;
1747 };
1748 
1749 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
1750 
1751 // Parameters of an allocation.
1752 struct VmaAllocationRequest
1753 {
1754  VmaSuballocationList::iterator freeSuballocationItem;
1755  VkDeviceSize offset;
1756 };
1757 
1758 /* Single block of memory - VkDeviceMemory with all the data about its regions
1759 assigned or free. */
1760 class VmaAllocation
1761 {
1762 public:
1763  VkDeviceMemory m_hMemory;
1764  VkDeviceSize m_Size;
1765  uint32_t m_FreeCount;
1766  VkDeviceSize m_SumFreeSize;
1767  VmaSuballocationList m_Suballocations;
1768  // Suballocations that are free and have size greater than certain threshold.
1769  // Sorted by size, ascending.
1770  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
1771 
1772  VmaAllocation(VmaAllocator hAllocator);
1773 
1774  ~VmaAllocation()
1775  {
1776  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
1777  }
1778 
1779  // Always call after construction.
1780  void Init(VkDeviceMemory newMemory, VkDeviceSize newSize);
1781  // Always call before destruction.
1782  void Destroy(VmaAllocator allocator);
1783 
1784  // Validates all data structures inside this object. If not valid, returns false.
1785  bool Validate() const;
1786 
1787  // Tries to find a place for suballocation with given parameters inside this allocation.
1788  // If succeeded, fills pAllocationRequest and returns true.
1789  // If failed, returns false.
1790  bool CreateAllocationRequest(
1791  VkDeviceSize bufferImageGranularity,
1792  VkDeviceSize allocSize,
1793  VkDeviceSize allocAlignment,
1794  VmaSuballocationType allocType,
1795  VmaAllocationRequest* pAllocationRequest);
1796 
1797  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
1798  // If yes, fills pOffset and returns true. If no, returns false.
1799  bool CheckAllocation(
1800  VkDeviceSize bufferImageGranularity,
1801  VkDeviceSize allocSize,
1802  VkDeviceSize allocAlignment,
1803  VmaSuballocationType allocType,
1804  VmaSuballocationList::const_iterator freeSuballocItem,
1805  VkDeviceSize* pOffset) const;
1806 
1807  // Returns true if this allocation is empty - contains only single free suballocation.
1808  bool IsEmpty() const;
1809 
1810  // Makes actual allocation based on request. Request must already be checked
1811  // and valid.
1812  void Alloc(
1813  const VmaAllocationRequest& request,
1814  VmaSuballocationType type,
1815  VkDeviceSize allocSize);
1816 
1817  // Frees suballocation assigned to given memory region.
1818  void Free(const VkMappedMemoryRange* pMemory);
1819 
1820 #if VMA_STATS_STRING_ENABLED
1821  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1822 #endif
1823 
1824 private:
1825  // Given free suballocation, it merges it with following one, which must also be free.
1826  void MergeFreeWithNext(VmaSuballocationList::iterator item);
1827  // Releases given suballocation, making it free. Merges it with adjacent free
1828  // suballocations if applicable.
1829  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
1830  // Given free suballocation, it inserts it into sorted list of
1831  // m_FreeSuballocationsBySize if it's suitable.
1832  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
1833  // Given free suballocation, it removes it from sorted list of
1834  // m_FreeSuballocationsBySize if it's suitable.
1835  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
1836 };
1837 
1838 // Allocation for an object that has its own private VkDeviceMemory.
1839 struct VmaOwnAllocation
1840 {
1841  VkDeviceMemory m_hMemory;
1842  VkDeviceSize m_Size;
1843  VmaSuballocationType m_Type;
1844 };
1845 
1846 struct VmaOwnAllocationMemoryHandleLess
1847 {
1848  bool operator()(const VmaOwnAllocation& lhs, const VmaOwnAllocation& rhs) const
1849  {
1850  return lhs.m_hMemory < rhs.m_hMemory;
1851  }
1852  bool operator()(const VmaOwnAllocation& lhs, VkDeviceMemory rhsMem) const
1853  {
1854  return lhs.m_hMemory < rhsMem;
1855  }
1856 };
1857 
1858 /* Sequence of VmaAllocation. Represents memory blocks allocated for a specific
1859 Vulkan memory type. */
1860 struct VmaAllocationVector
1861 {
1862  // Incrementally sorted by sumFreeSize, ascending.
1863  VmaVector< VmaAllocation*, VmaStlAllocator<VmaAllocation*> > m_Allocations;
1864 
1865  VmaAllocationVector(VmaAllocator hAllocator);
1866  ~VmaAllocationVector();
1867 
1868  bool IsEmpty() const { return m_Allocations.empty(); }
1869 
1870  // Tries to free memory from any if its Allocations.
1871  // Returns index of Allocation that the memory was freed from, or -1 if not found.
1872  size_t Free(const VkMappedMemoryRange* pMemory);
1873 
1874  // Performs single step in sorting m_Allocations. They may not be fully sorted
1875  // after this call.
1876  void IncrementallySortAllocations();
1877 
1878  // Adds statistics of this AllocationVector to pStats.
1879  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
1880 
1881 #if VMA_STATS_STRING_ENABLED
1882  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1883 #endif
1884 
1885 private:
1886  VmaAllocator m_hAllocator;
1887 };
1888 
1889 // Main allocator object.
1890 struct VmaAllocator_T
1891 {
1892  VkDevice m_hDevice;
1893  bool m_AllocationCallbacksSpecified;
1894  VkAllocationCallbacks m_AllocationCallbacks;
1895  VkDeviceSize m_PreferredLargeHeapBlockSize;
1896  VkDeviceSize m_PreferredSmallHeapBlockSize;
1897 
1898  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
1899  VkPhysicalDeviceMemoryProperties m_MemProps;
1900 
1901  VmaAllocationVector* m_pAllocations[VK_MAX_MEMORY_TYPES];
1902  /* There can be at most one allocation that is completely empty - a
1903  hysteresis to avoid pessimistic case of alternating creation and destruction
1904  of a VkDeviceMemory. */
1905  bool m_HasEmptyAllocation[VK_MAX_MEMORY_TYPES];
1906  VmaMutex m_AllocationsMutex[VK_MAX_MEMORY_TYPES];
1907 
1908  // Each vector is sorted by memory (handle value).
1909  typedef VmaVector< VmaOwnAllocation, VmaStlAllocator<VmaOwnAllocation> > OwnAllocationVectorType;
1910  OwnAllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES];
1911  VmaMutex m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
1912 
1913  // Sorted by first (VkBuffer handle value).
1914  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange) m_BufferToMemoryMap;
1915  VmaMutex m_BufferToMemoryMapMutex;
1916  // Sorted by first (VkImage handle value).
1917  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange) m_ImageToMemoryMap;
1918  VmaMutex m_ImageToMemoryMapMutex;
1919 
1920  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
1921  ~VmaAllocator_T();
1922 
1923  const VkAllocationCallbacks* GetAllocationCallbacks() const
1924  {
1925  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
1926  }
1927 
1928  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
1929 
1930  VkDeviceSize GetBufferImageGranularity() const
1931  {
1932  return VMA_MAX(
1933  VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,
1934  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
1935  }
1936 
1937  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
1938  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
1939 
1940  // Main allocation function.
1941  VkResult AllocateMemory(
1942  const VkMemoryRequirements& vkMemReq,
1943  const VmaMemoryRequirements& vmaMemReq,
1944  VmaSuballocationType suballocType,
1945  VkMappedMemoryRange* pMemory,
1946  uint32_t* pMemoryTypeIndex);
1947 
1948  // Main deallocation function.
1949  void FreeMemory(const VkMappedMemoryRange* pMemory);
1950 
1951  void CalculateStats(VmaStats* pStats);
1952 
1953 #if VMA_STATS_STRING_ENABLED
1954  void PrintDetailedMap(class VmaStringBuilder& sb);
1955 #endif
1956 
1957 private:
1958  VkPhysicalDevice m_PhysicalDevice;
1959 
1960  VkResult AllocateMemoryOfType(
1961  const VkMemoryRequirements& vkMemReq,
1962  const VmaMemoryRequirements& vmaMemReq,
1963  uint32_t memTypeIndex,
1964  VmaSuballocationType suballocType,
1965  VkMappedMemoryRange* pMemory);
1966 
1967  // Allocates and registers new VkDeviceMemory specifically for single allocation.
1968  VkResult AllocateOwnMemory(
1969  VkDeviceSize size,
1970  VmaSuballocationType suballocType,
1971  uint32_t memTypeIndex,
1972  VkMappedMemoryRange* pMemory);
1973 
1974  // Tries to free pMemory as Own Memory. Returns true if found and freed.
1975  bool FreeOwnMemory(const VkMappedMemoryRange* pMemory);
1976 };
1977 
1979 // Memory allocation #2 after VmaAllocator_T definition
1980 
1981 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
1982 {
1983  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
1984 }
1985 
1986 static void VmaFree(VmaAllocator hAllocator, void* ptr)
1987 {
1988  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
1989 }
1990 
1991 template<typename T>
1992 static T* VmaAllocate(VmaAllocator hAllocator)
1993 {
1994  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
1995 }
1996 
1997 template<typename T>
1998 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
1999 {
2000  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2001 }
2002 
2003 template<typename T>
2004 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2005 {
2006  if(ptr != VMA_NULL)
2007  {
2008  ptr->~T();
2009  VmaFree(hAllocator, ptr);
2010  }
2011 }
2012 
2013 template<typename T>
2014 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2015 {
2016  if(ptr != VMA_NULL)
2017  {
2018  for(size_t i = count; i--; )
2019  ptr[i].~T();
2020  VmaFree(hAllocator, ptr);
2021  }
2022 }
2023 
2025 // VmaStringBuilder
2026 
2027 #if VMA_STATS_STRING_ENABLED
2028 
2029 class VmaStringBuilder
2030 {
2031 public:
2032  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2033  size_t GetLength() const { return m_Data.size(); }
2034  const char* GetData() const { return m_Data.data(); }
2035 
2036  void Add(char ch) { m_Data.push_back(ch); }
2037  void Add(const char* pStr);
2038  void AddNewLine() { Add('\n'); }
2039  void AddNumber(uint32_t num);
2040  void AddNumber(uint64_t num);
2041  void AddBool(bool b) { Add(b ? "true" : "false"); }
2042  void AddNull() { Add("null"); }
2043  void AddString(const char* pStr);
2044 
2045 private:
2046  VmaVector< char, VmaStlAllocator<char> > m_Data;
2047 };
2048 
2049 void VmaStringBuilder::Add(const char* pStr)
2050 {
2051  const size_t strLen = strlen(pStr);
2052  if(strLen > 0)
2053  {
2054  const size_t oldCount = m_Data.size();
2055  m_Data.resize(oldCount + strLen);
2056  memcpy(m_Data.data() + oldCount, pStr, strLen);
2057  }
2058 }
2059 
2060 void VmaStringBuilder::AddNumber(uint32_t num)
2061 {
2062  char buf[11];
2063  VmaUint32ToStr(buf, sizeof(buf), num);
2064  Add(buf);
2065 }
2066 
2067 void VmaStringBuilder::AddNumber(uint64_t num)
2068 {
2069  char buf[21];
2070  VmaUint64ToStr(buf, sizeof(buf), num);
2071  Add(buf);
2072 }
2073 
2074 void VmaStringBuilder::AddString(const char* pStr)
2075 {
2076  Add('"');
2077  const size_t strLen = strlen(pStr);
2078  for(size_t i = 0; i < strLen; ++i)
2079  {
2080  char ch = pStr[i];
2081  if(ch == '\'')
2082  Add("\\\\");
2083  else if(ch == '"')
2084  Add("\\\"");
2085  else if(ch >= 32)
2086  Add(ch);
2087  else switch(ch)
2088  {
2089  case '\n':
2090  Add("\\n");
2091  break;
2092  case '\r':
2093  Add("\\r");
2094  break;
2095  case '\t':
2096  Add("\\t");
2097  break;
2098  default:
2099  VMA_ASSERT(0 && "Character not currently supported.");
2100  break;
2101  }
2102  }
2103  Add('"');
2104 }
2105 
2107 
2108 // Correspond to values of enum VmaSuballocationType.
2109 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2110  "FREE",
2111  "UNKNOWN",
2112  "BUFFER",
2113  "IMAGE_UNKNOWN",
2114  "IMAGE_LINEAR",
2115  "IMAGE_OPTIMAL",
2116 };
2117 
2118 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2119 {
2120  sb.Add("{ \"Allocations\": ");
2121  sb.AddNumber(stat.AllocationCount);
2122  sb.Add(", \"Suballocations\": ");
2123  sb.AddNumber(stat.SuballocationCount);
2124  sb.Add(", \"UnusedRanges\": ");
2125  sb.AddNumber(stat.UnusedRangeCount);
2126  sb.Add(", \"UsedBytes\": ");
2127  sb.AddNumber(stat.UsedBytes);
2128  sb.Add(", \"UnusedBytes\": ");
2129  sb.AddNumber(stat.UnusedBytes);
2130  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2131  sb.AddNumber(stat.SuballocationSizeMin);
2132  sb.Add(", \"Avg\": ");
2133  sb.AddNumber(stat.SuballocationSizeAvg);
2134  sb.Add(", \"Max\": ");
2135  sb.AddNumber(stat.SuballocationSizeMax);
2136  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2137  sb.AddNumber(stat.UnusedRangeSizeMin);
2138  sb.Add(", \"Avg\": ");
2139  sb.AddNumber(stat.UnusedRangeSizeAvg);
2140  sb.Add(", \"Max\": ");
2141  sb.AddNumber(stat.UnusedRangeSizeMax);
2142  sb.Add(" } }");
2143 }
2144 
2145 #endif // #if VMA_STATS_STRING_ENABLED
2146 
2147 struct VmaSuballocationItemSizeLess
2148 {
2149  bool operator()(
2150  const VmaSuballocationList::iterator lhs,
2151  const VmaSuballocationList::iterator rhs) const
2152  {
2153  return lhs->size < rhs->size;
2154  }
2155  bool operator()(
2156  const VmaSuballocationList::iterator lhs,
2157  VkDeviceSize rhsSize) const
2158  {
2159  return lhs->size < rhsSize;
2160  }
2161 };
2162 
2163 VmaAllocation::VmaAllocation(VmaAllocator hAllocator) :
2164  m_hMemory(VK_NULL_HANDLE),
2165  m_Size(0),
2166  m_FreeCount(0),
2167  m_SumFreeSize(0),
2168  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2169  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2170 {
2171 }
2172 
2173 void VmaAllocation::Init(VkDeviceMemory newMemory, VkDeviceSize newSize)
2174 {
2175  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2176 
2177  m_hMemory = newMemory;
2178  m_Size = newSize;
2179  m_FreeCount = 1;
2180  m_SumFreeSize = newSize;
2181 
2182  m_Suballocations.clear();
2183  m_FreeSuballocationsBySize.clear();
2184 
2185  VmaSuballocation suballoc = {};
2186  suballoc.offset = 0;
2187  suballoc.size = newSize;
2188  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2189 
2190  m_Suballocations.push_back(suballoc);
2191  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2192  --suballocItem;
2193  m_FreeSuballocationsBySize.push_back(suballocItem);
2194 }
2195 
2196 void VmaAllocation::Destroy(VmaAllocator allocator)
2197 {
2198  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2199  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2200  m_hMemory = VK_NULL_HANDLE;
2201 }
2202 
2203 bool VmaAllocation::Validate() const
2204 {
2205  if((m_hMemory == VK_NULL_HANDLE) ||
2206  (m_Size == 0) ||
2207  m_Suballocations.empty())
2208  {
2209  return false;
2210  }
2211 
2212  // Expected offset of new suballocation as calculates from previous ones.
2213  VkDeviceSize calculatedOffset = 0;
2214  // Expected number of free suballocations as calculated from traversing their list.
2215  uint32_t calculatedFreeCount = 0;
2216  // Expected sum size of free suballocations as calculated from traversing their list.
2217  VkDeviceSize calculatedSumFreeSize = 0;
2218  // Expected number of free suballocations that should be registered in
2219  // m_FreeSuballocationsBySize calculated from traversing their list.
2220  size_t freeSuballocationsToRegister = 0;
2221  // True if previous visisted suballocation was free.
2222  bool prevFree = false;
2223 
2224  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2225  suballocItem != m_Suballocations.cend();
2226  ++suballocItem)
2227  {
2228  const VmaSuballocation& subAlloc = *suballocItem;
2229 
2230  // Actual offset of this suballocation doesn't match expected one.
2231  if(subAlloc.offset != calculatedOffset)
2232  return false;
2233 
2234  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2235  // Two adjacent free suballocations are invalid. They should be merged.
2236  if(prevFree && currFree)
2237  return false;
2238  prevFree = currFree;
2239 
2240  if(currFree)
2241  {
2242  calculatedSumFreeSize += subAlloc.size;
2243  ++calculatedFreeCount;
2244  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2245  ++freeSuballocationsToRegister;
2246  }
2247 
2248  calculatedOffset += subAlloc.size;
2249  }
2250 
2251  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2252  // match expected one.
2253  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2254  return false;
2255 
2256  VkDeviceSize lastSize = 0;
2257  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2258  {
2259  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2260 
2261  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2262  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2263  return false;
2264  // They must be sorted by size ascending.
2265  if(suballocItem->size < lastSize)
2266  return false;
2267 
2268  lastSize = suballocItem->size;
2269  }
2270 
2271  // Check if totals match calculacted values.
2272  return
2273  (calculatedOffset == m_Size) &&
2274  (calculatedSumFreeSize == m_SumFreeSize) &&
2275  (calculatedFreeCount == m_FreeCount);
2276 }
2277 
2278 /*
2279 How many suitable free suballocations to analyze before choosing best one.
2280 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2281  be chosen.
2282 - Set to UINT_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2283  suballocations will be analized and best one will be chosen.
2284 - Any other value is also acceptable.
2285 */
2286 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2287 
2288 bool VmaAllocation::CreateAllocationRequest(
2289  VkDeviceSize bufferImageGranularity,
2290  VkDeviceSize allocSize,
2291  VkDeviceSize allocAlignment,
2292  VmaSuballocationType allocType,
2293  VmaAllocationRequest* pAllocationRequest)
2294 {
2295  VMA_ASSERT(allocSize > 0);
2296  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2297  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2298  VMA_HEAVY_ASSERT(Validate());
2299 
2300  // There is not enough total free space in this allocation to fullfill the request: Early return.
2301  if(m_SumFreeSize < allocSize)
2302  return false;
2303 
2304  bool found = false;
2305 
2306  // Old brute-force algorithm, linearly searching suballocations.
2307  /*
2308  uint32_t suitableSuballocationsFound = 0;
2309  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2310  suballocItem != VMA_NULL &&
2311  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2312  suballocItem = suballocItem->Next)
2313  {
2314  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
2315  {
2316  VkDeviceSize offset = 0, cost = 0;
2317  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
2318  {
2319  ++suitableSuballocationsFound;
2320  if(cost < costLimit)
2321  {
2322  pAllocationRequest->freeSuballocationItem = suballocItem;
2323  pAllocationRequest->offset = offset;
2324  pAllocationRequest->cost = cost;
2325  if(cost == 0)
2326  return true;
2327  costLimit = cost;
2328  betterSuballocationFound = true;
2329  }
2330  }
2331  }
2332  }
2333  */
2334 
2335  // New algorithm, efficiently searching freeSuballocationsBySize.
2336  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
2337  if(freeSuballocCount > 0)
2338  {
2339  if(VMA_BEST_FIT)
2340  {
2341  // Find first free suballocation with size not less than allocSize.
2342  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2343  m_FreeSuballocationsBySize.data(),
2344  m_FreeSuballocationsBySize.data() + freeSuballocCount,
2345  allocSize,
2346  VmaSuballocationItemSizeLess());
2347  size_t index = it - m_FreeSuballocationsBySize.data();
2348  for(; index < freeSuballocCount; ++index)
2349  {
2350  VkDeviceSize offset = 0;
2351  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2352  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2353  {
2354  pAllocationRequest->freeSuballocationItem = suballocItem;
2355  pAllocationRequest->offset = offset;
2356  return true;
2357  }
2358  }
2359  }
2360  else
2361  {
2362  // Search staring from biggest suballocations.
2363  for(size_t index = freeSuballocCount; index--; )
2364  {
2365  VkDeviceSize offset = 0;
2366  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2367  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2368  {
2369  pAllocationRequest->freeSuballocationItem = suballocItem;
2370  pAllocationRequest->offset = offset;
2371  return true;
2372  }
2373  }
2374  }
2375  }
2376 
2377  return false;
2378 }
2379 
2380 bool VmaAllocation::CheckAllocation(
2381  VkDeviceSize bufferImageGranularity,
2382  VkDeviceSize allocSize,
2383  VkDeviceSize allocAlignment,
2384  VmaSuballocationType allocType,
2385  VmaSuballocationList::const_iterator freeSuballocItem,
2386  VkDeviceSize* pOffset) const
2387 {
2388  VMA_ASSERT(allocSize > 0);
2389  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2390  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
2391  VMA_ASSERT(pOffset != VMA_NULL);
2392 
2393  const VmaSuballocation& suballoc = *freeSuballocItem;
2394  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2395 
2396  // Size of this suballocation is too small for this request: Early return.
2397  if(suballoc.size < allocSize)
2398  return false;
2399 
2400  // Start from offset equal to beginning of this suballocation.
2401  *pOffset = suballoc.offset;
2402 
2403  // Apply VMA_DEBUG_MARGIN at the beginning.
2404  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
2405  *pOffset += VMA_DEBUG_MARGIN;
2406 
2407  // Apply alignment.
2408  const VkDeviceSize alignment = VMA_MAX(allocAlignment, VMA_DEBUG_ALIGNMENT);
2409  *pOffset = VmaAlignUp(*pOffset, alignment);
2410 
2411  // Check previous suballocations for BufferImageGranularity conflicts.
2412  // Make bigger alignment if necessary.
2413  if(bufferImageGranularity > 1)
2414  {
2415  bool bufferImageGranularityConflict = false;
2416  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
2417  while(prevSuballocItem != m_Suballocations.cbegin())
2418  {
2419  --prevSuballocItem;
2420  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
2421  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
2422  {
2423  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
2424  {
2425  bufferImageGranularityConflict = true;
2426  break;
2427  }
2428  }
2429  else
2430  // Already on previous page.
2431  break;
2432  }
2433  if(bufferImageGranularityConflict)
2434  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
2435  }
2436 
2437  // Calculate padding at the beginning based on current offset.
2438  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
2439 
2440  // Calculate required margin at the end if this is not last suballocation.
2441  VmaSuballocationList::const_iterator next = freeSuballocItem;
2442  ++next;
2443  const VkDeviceSize requiredEndMargin =
2444  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
2445 
2446  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
2447  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
2448  return false;
2449 
2450  // Check next suballocations for BufferImageGranularity conflicts.
2451  // If conflict exists, allocation cannot be made here.
2452  if(bufferImageGranularity > 1)
2453  {
2454  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
2455  ++nextSuballocItem;
2456  while(nextSuballocItem != m_Suballocations.cend())
2457  {
2458  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
2459  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
2460  {
2461  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
2462  return false;
2463  }
2464  else
2465  // Already on next page.
2466  break;
2467  ++nextSuballocItem;
2468  }
2469  }
2470 
2471  // All tests passed: Success. pOffset is already filled.
2472  return true;
2473 }
2474 
2475 bool VmaAllocation::IsEmpty() const
2476 {
2477  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
2478 }
2479 
2480 void VmaAllocation::Alloc(
2481  const VmaAllocationRequest& request,
2482  VmaSuballocationType type,
2483  VkDeviceSize allocSize)
2484 {
2485  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
2486  VmaSuballocation& suballoc = *request.freeSuballocationItem;
2487  // Given suballocation is a free block.
2488  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2489  // Given offset is inside this suballocation.
2490  VMA_ASSERT(request.offset >= suballoc.offset);
2491  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
2492  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
2493  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
2494 
2495  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
2496  // it to become used.
2497  UnregisterFreeSuballocation(request.freeSuballocationItem);
2498 
2499  suballoc.offset = request.offset;
2500  suballoc.size = allocSize;
2501  suballoc.type = type;
2502 
2503  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
2504  if(paddingEnd)
2505  {
2506  VmaSuballocation paddingSuballoc = {};
2507  paddingSuballoc.offset = request.offset + allocSize;
2508  paddingSuballoc.size = paddingEnd;
2509  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2510  VmaSuballocationList::iterator next = request.freeSuballocationItem;
2511  ++next;
2512  const VmaSuballocationList::iterator paddingEndItem =
2513  m_Suballocations.insert(next, paddingSuballoc);
2514  RegisterFreeSuballocation(paddingEndItem);
2515  }
2516 
2517  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
2518  if(paddingBegin)
2519  {
2520  VmaSuballocation paddingSuballoc = {};
2521  paddingSuballoc.offset = request.offset - paddingBegin;
2522  paddingSuballoc.size = paddingBegin;
2523  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2524  const VmaSuballocationList::iterator paddingBeginItem =
2525  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
2526  RegisterFreeSuballocation(paddingBeginItem);
2527  }
2528 
2529  // Update totals.
2530  m_FreeCount = m_FreeCount - 1;
2531  if(paddingBegin > 0)
2532  ++m_FreeCount;
2533  if(paddingEnd > 0)
2534  ++m_FreeCount;
2535  m_SumFreeSize -= allocSize;
2536 }
2537 
2538 void VmaAllocation::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
2539 {
2540  // Change this suballocation to be marked as free.
2541  VmaSuballocation& suballoc = *suballocItem;
2542  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2543 
2544  // Update totals.
2545  ++m_FreeCount;
2546  m_SumFreeSize += suballoc.size;
2547 
2548  // Merge with previous and/or next suballocation if it's also free.
2549  bool mergeWithNext = false;
2550  bool mergeWithPrev = false;
2551 
2552  VmaSuballocationList::iterator nextItem = suballocItem;
2553  ++nextItem;
2554  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
2555  mergeWithNext = true;
2556 
2557  VmaSuballocationList::iterator prevItem = suballocItem;
2558  if(suballocItem != m_Suballocations.begin())
2559  {
2560  --prevItem;
2561  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
2562  mergeWithPrev = true;
2563  }
2564 
2565  if(mergeWithNext)
2566  {
2567  UnregisterFreeSuballocation(nextItem);
2568  MergeFreeWithNext(suballocItem);
2569  }
2570 
2571  if(mergeWithPrev)
2572  {
2573  UnregisterFreeSuballocation(prevItem);
2574  MergeFreeWithNext(prevItem);
2575  RegisterFreeSuballocation(prevItem);
2576  }
2577  else
2578  RegisterFreeSuballocation(suballocItem);
2579 }
2580 
2581 void VmaAllocation::Free(const VkMappedMemoryRange* pMemory)
2582 {
2583  // If suballocation to free has offset smaller than half of allocation size, search forward.
2584  // Otherwise search backward.
2585  const bool forwardDirection = pMemory->offset < (m_Size / 2);
2586  if(forwardDirection)
2587  {
2588  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2589  suballocItem != m_Suballocations.end();
2590  ++suballocItem)
2591  {
2592  VmaSuballocation& suballoc = *suballocItem;
2593  if(suballoc.offset == pMemory->offset)
2594  {
2595  FreeSuballocation(suballocItem);
2596  VMA_HEAVY_ASSERT(Validate());
2597  return;
2598  }
2599  }
2600  VMA_ASSERT(0 && "Not found!");
2601  }
2602  else
2603  {
2604  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2605  suballocItem != m_Suballocations.end();
2606  ++suballocItem)
2607  {
2608  VmaSuballocation& suballoc = *suballocItem;
2609  if(suballoc.offset == pMemory->offset)
2610  {
2611  FreeSuballocation(suballocItem);
2612  VMA_HEAVY_ASSERT(Validate());
2613  return;
2614  }
2615  }
2616  VMA_ASSERT(0 && "Not found!");
2617  }
2618 }
2619 
2620 #if VMA_STATS_STRING_ENABLED
2621 
2622 void VmaAllocation::PrintDetailedMap(class VmaStringBuilder& sb) const
2623 {
2624  sb.Add("{\n\t\t\t\"Bytes\": ");
2625  sb.AddNumber(m_Size);
2626  sb.Add(",\n\t\t\t\"FreeBytes\": ");
2627  sb.AddNumber(m_SumFreeSize);
2628  sb.Add(",\n\t\t\t\"Suballocations\": ");
2629  sb.AddNumber(m_Suballocations.size());
2630  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
2631  sb.AddNumber(m_FreeCount);
2632  sb.Add(",\n\t\t\t\"SuballocationList\": [");
2633 
2634  size_t i = 0;
2635  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2636  suballocItem != m_Suballocations.cend();
2637  ++suballocItem, ++i)
2638  {
2639  if(i > 0)
2640  sb.Add(",\n\t\t\t\t{ \"Type\": ");
2641  else
2642  sb.Add("\n\t\t\t\t{ \"Type\": ");
2643  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
2644  sb.Add(", \"Size\": ");
2645  sb.AddNumber(suballocItem->size);
2646  sb.Add(", \"Offset\": ");
2647  sb.AddNumber(suballocItem->offset);
2648  sb.Add(" }");
2649  }
2650 
2651  sb.Add("\n\t\t\t]\n\t\t}");
2652 }
2653 
2654 #endif // #if VMA_STATS_STRING_ENABLED
2655 
2656 void VmaAllocation::MergeFreeWithNext(VmaSuballocationList::iterator item)
2657 {
2658  VMA_ASSERT(item != m_Suballocations.end());
2659  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2660 
2661  VmaSuballocationList::iterator nextItem = item;
2662  ++nextItem;
2663  VMA_ASSERT(nextItem != m_Suballocations.end());
2664  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
2665 
2666  item->size += nextItem->size;
2667  --m_FreeCount;
2668  m_Suballocations.erase(nextItem);
2669 }
2670 
2671 void VmaAllocation::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
2672 {
2673  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2674  VMA_ASSERT(item->size > 0);
2675 
2676  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2677  {
2678  if(m_FreeSuballocationsBySize.empty())
2679  m_FreeSuballocationsBySize.push_back(item);
2680  else
2681  {
2682  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2683  m_FreeSuballocationsBySize.data(),
2684  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2685  item,
2686  VmaSuballocationItemSizeLess());
2687  size_t index = it - m_FreeSuballocationsBySize.data();
2688  VectorInsert(m_FreeSuballocationsBySize, index, item);
2689  }
2690  }
2691 }
2692 
2693 void VmaAllocation::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
2694 {
2695  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2696  VMA_ASSERT(item->size > 0);
2697 
2698  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2699  {
2700  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2701  m_FreeSuballocationsBySize.data(),
2702  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2703  item,
2704  VmaSuballocationItemSizeLess());
2705  for(size_t index = it - m_FreeSuballocationsBySize.data();
2706  index < m_FreeSuballocationsBySize.size();
2707  ++index)
2708  {
2709  if(m_FreeSuballocationsBySize[index] == item)
2710  {
2711  VectorRemove(m_FreeSuballocationsBySize, index);
2712  return;
2713  }
2714  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
2715  }
2716  VMA_ASSERT(0 && "Not found.");
2717  }
2718 }
2719 
2720 static void InitStatInfo(VmaStatInfo& outInfo)
2721 {
2722  memset(&outInfo, 0, sizeof(outInfo));
2723  outInfo.SuballocationSizeMin = UINT64_MAX;
2724  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2725 }
2726 
2727 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaAllocation& alloc)
2728 {
2729  outInfo.AllocationCount = 1;
2730 
2731  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
2732  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
2733  outInfo.UnusedRangeCount = alloc.m_FreeCount;
2734 
2735  outInfo.UnusedBytes = alloc.m_SumFreeSize;
2736  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
2737 
2738  outInfo.SuballocationSizeMin = UINT64_MAX;
2739  outInfo.SuballocationSizeMax = 0;
2740  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2741  outInfo.UnusedRangeSizeMax = 0;
2742 
2743  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
2744  suballocItem != alloc.m_Suballocations.cend();
2745  ++suballocItem)
2746  {
2747  const VmaSuballocation& suballoc = *suballocItem;
2748  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
2749  {
2750  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
2751  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
2752  }
2753  else
2754  {
2755  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
2756  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
2757  }
2758  }
2759 }
2760 
2761 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
2762 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
2763 {
2764  inoutInfo.AllocationCount += srcInfo.AllocationCount;
2765  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
2766  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
2767  inoutInfo.UsedBytes += srcInfo.UsedBytes;
2768  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
2769  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
2770  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
2771  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
2772  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
2773 }
2774 
2775 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
2776 {
2777  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
2778  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
2779  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
2780  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
2781 }
2782 
2783 VmaAllocationVector::VmaAllocationVector(VmaAllocator hAllocator) :
2784  m_hAllocator(hAllocator),
2785  m_Allocations(VmaStlAllocator<VmaAllocation*>(hAllocator->GetAllocationCallbacks()))
2786 {
2787 }
2788 
2789 VmaAllocationVector::~VmaAllocationVector()
2790 {
2791  for(size_t i = m_Allocations.size(); i--; )
2792  {
2793  m_Allocations[i]->Destroy(m_hAllocator);
2794  vma_delete(m_hAllocator, m_Allocations[i]);
2795  }
2796 }
2797 
2798 size_t VmaAllocationVector::Free(const VkMappedMemoryRange* pMemory)
2799 {
2800  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2801  {
2802  VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2803  VMA_ASSERT(pAlloc);
2804  if(pAlloc->m_hMemory == pMemory->memory)
2805  {
2806  pAlloc->Free(pMemory);
2807  VMA_HEAVY_ASSERT(pAlloc->Validate());
2808  return allocIndex;
2809  }
2810  }
2811 
2812  return (size_t)-1;
2813 }
2814 
2815 void VmaAllocationVector::IncrementallySortAllocations()
2816 {
2817  // Bubble sort only until first swap.
2818  for(size_t i = 1; i < m_Allocations.size(); ++i)
2819  {
2820  if(m_Allocations[i - 1]->m_SumFreeSize > m_Allocations[i]->m_SumFreeSize)
2821  {
2822  VMA_SWAP(m_Allocations[i - 1], m_Allocations[i]);
2823  return;
2824  }
2825  }
2826 }
2827 
2828 #if VMA_STATS_STRING_ENABLED
2829 
2830 void VmaAllocationVector::PrintDetailedMap(class VmaStringBuilder& sb) const
2831 {
2832  for(size_t i = 0; i < m_Allocations.size(); ++i)
2833  {
2834  if(i > 0)
2835  sb.Add(",\n\t\t");
2836  else
2837  sb.Add("\n\t\t");
2838  m_Allocations[i]->PrintDetailedMap(sb);
2839  }
2840 }
2841 
2842 #endif // #if VMA_STATS_STRING_ENABLED
2843 
2844 void VmaAllocationVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
2845 {
2846  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2847  {
2848  const VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2849  VMA_ASSERT(pAlloc);
2850  VMA_HEAVY_ASSERT(pAlloc->Validate());
2851  VmaStatInfo allocationStatInfo;
2852  CalcAllocationStatInfo(allocationStatInfo, *pAlloc);
2853  VmaAddStatInfo(pStats->total, allocationStatInfo);
2854  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
2855  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
2856  }
2857 }
2858 
2860 // VmaAllocator_T
2861 
2862 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
2863  m_PhysicalDevice(pCreateInfo->physicalDevice),
2864  m_hDevice(pCreateInfo->device),
2865  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
2866  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
2867  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
2868  m_PreferredLargeHeapBlockSize(0),
2869  m_PreferredSmallHeapBlockSize(0),
2870  m_BufferToMemoryMap(VmaStlAllocator< VmaPair<VkBuffer, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks)),
2871  m_ImageToMemoryMap(VmaStlAllocator< VmaPair<VkImage, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks))
2872 {
2873  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
2874 
2875  memset(&m_MemProps, 0, sizeof(m_MemProps));
2876  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
2877 
2878  memset(&m_pAllocations, 0, sizeof(m_pAllocations));
2879  memset(&m_HasEmptyAllocation, 0, sizeof(m_HasEmptyAllocation));
2880  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
2881 
2882  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
2883  pCreateInfo->preferredLargeHeapBlockSize : VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE;
2884  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
2885  pCreateInfo->preferredSmallHeapBlockSize : VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE;
2886 
2887  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
2888  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
2889 
2890  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
2891  {
2892  m_pAllocations[i] = vma_new(this, VmaAllocationVector)(this);
2893  m_pOwnAllocations[i] = vma_new(this, OwnAllocationVectorType)(VmaStlAllocator<VmaOwnAllocation>(GetAllocationCallbacks()));
2894  }
2895 }
2896 
2897 VmaAllocator_T::~VmaAllocator_T()
2898 {
2899  for(VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = m_ImageToMemoryMap.begin();
2900  it != m_ImageToMemoryMap.end();
2901  ++it)
2902  {
2903  vkDestroyImage(m_hDevice, it->first, GetAllocationCallbacks());
2904  }
2905 
2906  for(VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = m_BufferToMemoryMap.begin();
2907  it != m_BufferToMemoryMap.end();
2908  ++it)
2909  {
2910  vkDestroyBuffer(m_hDevice, it->first, GetAllocationCallbacks());
2911  }
2912 
2913  for(uint32_t typeIndex = 0; typeIndex < GetMemoryTypeCount(); ++typeIndex)
2914  {
2915  OwnAllocationVectorType* pOwnAllocations = m_pOwnAllocations[typeIndex];
2916  VMA_ASSERT(pOwnAllocations);
2917  for(size_t allocationIndex = 0; allocationIndex < pOwnAllocations->size(); ++allocationIndex)
2918  {
2919  const VmaOwnAllocation& ownAlloc = (*pOwnAllocations)[allocationIndex];
2920  vkFreeMemory(m_hDevice, ownAlloc.m_hMemory, GetAllocationCallbacks());
2921  }
2922  }
2923 
2924  for(size_t i = GetMemoryTypeCount(); i--; )
2925  {
2926  vma_delete(this, m_pAllocations[i]);
2927  vma_delete(this, m_pOwnAllocations[i]);
2928  }
2929 }
2930 
2931 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
2932 {
2933  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
2934  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
2935  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
2936 }
2937 
2938 VkResult VmaAllocator_T::AllocateMemoryOfType(
2939  const VkMemoryRequirements& vkMemReq,
2940  const VmaMemoryRequirements& vmaMemReq,
2941  uint32_t memTypeIndex,
2942  VmaSuballocationType suballocType,
2943  VkMappedMemoryRange* pMemory)
2944 {
2945  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
2946 
2947  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
2948  pMemory->pNext = VMA_NULL;
2949  pMemory->size = vkMemReq.size;
2950 
2951  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
2952  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
2953  const bool ownMemory =
2954  vmaMemReq.ownMemory ||
2955  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
2956  ((vmaMemReq.neverAllocate == false) && (vkMemReq.size > preferredBlockSize / 2));
2957 
2958  if(ownMemory)
2959  {
2960  if(vmaMemReq.neverAllocate)
2961  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2962  else
2963  return AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
2964  }
2965  else
2966  {
2967  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
2968  VmaAllocationVector* const allocationVector = m_pAllocations[memTypeIndex];
2969  VMA_ASSERT(allocationVector);
2970 
2971  // 1. Search existing allocations.
2972  // Forward order - prefer blocks with smallest amount of free space.
2973  for(size_t allocIndex = 0; allocIndex < allocationVector->m_Allocations.size(); ++allocIndex )
2974  {
2975  VmaAllocation* const pAlloc = allocationVector->m_Allocations[allocIndex];
2976  VMA_ASSERT(pAlloc);
2977  VmaAllocationRequest allocRequest = {};
2978  // Check if can allocate from pAlloc.
2979  if(pAlloc->CreateAllocationRequest(
2980  GetBufferImageGranularity(),
2981  vkMemReq.size,
2982  vkMemReq.alignment,
2983  suballocType,
2984  &allocRequest))
2985  {
2986  // We no longer have an empty Allocation.
2987  if(pAlloc->IsEmpty())
2988  m_HasEmptyAllocation[memTypeIndex] = false;
2989  // Allocate from this pAlloc.
2990  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
2991  // Return VkDeviceMemory and offset (size already filled above).
2992  pMemory->memory = pAlloc->m_hMemory;
2993  pMemory->offset = allocRequest.offset;
2994  VMA_HEAVY_ASSERT(pAlloc->Validate());
2995  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
2996  return VK_SUCCESS;
2997  }
2998  }
2999 
3000  // 2. Create new Allocation.
3001  if(vmaMemReq.neverAllocate)
3002  {
3003  VMA_DEBUG_LOG(" FAILED due to VmaMemoryRequirements::neverAllocate");
3004  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3005  }
3006  else
3007  {
3008  // Start with full preferredBlockSize.
3009  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3010  allocInfo.memoryTypeIndex = memTypeIndex;
3011  allocInfo.allocationSize = preferredBlockSize;
3012  VkDeviceMemory mem = VK_NULL_HANDLE;
3013  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3014  if(res < 0)
3015  {
3016  // 3. Try half the size.
3017  allocInfo.allocationSize /= 2;
3018  if(allocInfo.allocationSize >= vkMemReq.size)
3019  {
3020  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3021  if(res < 0)
3022  {
3023  // 4. Try quarter the size.
3024  allocInfo.allocationSize /= 2;
3025  if(allocInfo.allocationSize >= vkMemReq.size)
3026  {
3027  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3028  }
3029  }
3030  }
3031  }
3032  if(res < 0)
3033  {
3034  // 5. Try OwnAlloc.
3035  res = AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3036  if(res == VK_SUCCESS)
3037  {
3038  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
3039  VMA_DEBUG_LOG(" Allocated as OwnMemory");
3040  return VK_SUCCESS;
3041  }
3042  else
3043  {
3044  // Everything failed: Return error code.
3045  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3046  return res;
3047  }
3048  }
3049 
3050  // New VkDeviceMemory successfully created. Create new Allocation for it.
3051  VmaAllocation* const pAlloc = vma_new(this, VmaAllocation)(this);
3052  pAlloc->Init(mem, allocInfo.allocationSize);
3053 
3054  allocationVector->m_Allocations.push_back(pAlloc);
3055 
3056  // Allocate from pAlloc. Because it is empty, allocRequest can be trivially filled.
3057  VmaAllocationRequest allocRequest = {};
3058  allocRequest.freeSuballocationItem = pAlloc->m_Suballocations.begin();
3059  allocRequest.offset = 0;
3060  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3061  pMemory->memory = mem;
3062  pMemory->offset = allocRequest.offset;
3063  VMA_HEAVY_ASSERT(pAlloc->Validate());
3064  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
3065  return VK_SUCCESS;
3066  }
3067  }
3068 }
3069 
3070 VkResult VmaAllocator_T::AllocateOwnMemory(
3071  VkDeviceSize size,
3072  VmaSuballocationType suballocType,
3073  uint32_t memTypeIndex,
3074  VkMappedMemoryRange* pMemory)
3075 {
3076  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3077  allocInfo.memoryTypeIndex = memTypeIndex;
3078  allocInfo.allocationSize = size;
3079 
3080  // Allocate VkDeviceMemory.
3081  VmaOwnAllocation ownAlloc = {};
3082  ownAlloc.m_Size = size;
3083  ownAlloc.m_Type = suballocType;
3084  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &ownAlloc.m_hMemory);
3085  if(res < 0)
3086  {
3087  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3088  return res;
3089  }
3090 
3091  // Register it in m_pOwnAllocations.
3092  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3093  OwnAllocationVectorType* ownAllocations = m_pOwnAllocations[memTypeIndex];
3094  VMA_ASSERT(ownAllocations);
3095  VmaOwnAllocation* const pOwnAllocationsBeg = ownAllocations->data();
3096  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + ownAllocations->size();
3097  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3098  pOwnAllocationsBeg,
3099  pOwnAllocationsEnd,
3100  ownAlloc,
3101  VmaOwnAllocationMemoryHandleLess()) - pOwnAllocationsBeg;
3102  VectorInsert(*ownAllocations, indexToInsert, ownAlloc);
3103 
3104  // Return parameters of the allocation.
3105  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3106  pMemory->pNext = VMA_NULL;
3107  pMemory->memory = ownAlloc.m_hMemory;
3108  pMemory->offset = 0;
3109  pMemory->size = size;
3110 
3111  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
3112 
3113  return VK_SUCCESS;
3114 }
3115 
3116 VkResult VmaAllocator_T::AllocateMemory(
3117  const VkMemoryRequirements& vkMemReq,
3118  const VmaMemoryRequirements& vmaMemReq,
3119  VmaSuballocationType suballocType,
3120  VkMappedMemoryRange* pMemory,
3121  uint32_t* pMemoryTypeIndex)
3122 {
3123  if(vmaMemReq.ownMemory && vmaMemReq.neverAllocate)
3124  {
3125  VMA_ASSERT(0 && "Specifying VmaMemoryRequirements::ownMemory && VmaMemoryRequirements::neverAllocate makes no sense.");
3126  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3127  }
3128 
3129  // Bit mask of memory Vulkan types acceptable for this allocation.
3130  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
3131  uint32_t memTypeIndex = UINT_MAX;
3132  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3133  if(res == VK_SUCCESS)
3134  {
3135  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3136  // Succeeded on first try.
3137  if(res == VK_SUCCESS)
3138  {
3139  if(pMemoryTypeIndex != VMA_NULL)
3140  *pMemoryTypeIndex = memTypeIndex;
3141  return res;
3142  }
3143  // Allocation from this memory type failed. Try other compatible memory types.
3144  else
3145  {
3146  for(;;)
3147  {
3148  // Remove old memTypeIndex from list of possibilities.
3149  memoryTypeBits &= ~(1u << memTypeIndex);
3150  // Find alternative memTypeIndex.
3151  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3152  if(res == VK_SUCCESS)
3153  {
3154  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3155  // Allocation from this alternative memory type succeeded.
3156  if(res == VK_SUCCESS)
3157  {
3158  if(pMemoryTypeIndex != VMA_NULL)
3159  *pMemoryTypeIndex = memTypeIndex;
3160  return res;
3161  }
3162  // else: Allocation from this memory type failed. Try next one - next loop iteration.
3163  }
3164  // No other matching memory type index could be found.
3165  else
3166  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
3167  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3168  }
3169  }
3170  }
3171  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
3172  else
3173  return res;
3174 }
3175 
3176 void VmaAllocator_T::FreeMemory(const VkMappedMemoryRange* pMemory)
3177 {
3178  uint32_t memTypeIndex = 0;
3179  bool found = false;
3180  VmaAllocation* allocationToDelete = VMA_NULL;
3181  // Check all memory types because we don't know which one does pMemory come from.
3182  for(; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3183  {
3184  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3185  VmaAllocationVector* const pAllocationVector = m_pAllocations[memTypeIndex];
3186  VMA_ASSERT(pAllocationVector);
3187  // Try to free pMemory from pAllocationVector.
3188  const size_t allocIndex = pAllocationVector->Free(pMemory);
3189  if(allocIndex != (size_t)-1)
3190  {
3191  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
3192  found = true;
3193  VmaAllocation* const pAlloc = pAllocationVector->m_Allocations[allocIndex];
3194  VMA_ASSERT(pAlloc);
3195  // pAlloc became empty after this deallocation.
3196  if(pAlloc->IsEmpty())
3197  {
3198  // Already has empty Allocation. We don't want to have two, so delete this one.
3199  if(m_HasEmptyAllocation[memTypeIndex])
3200  {
3201  allocationToDelete = pAlloc;
3202  VectorRemove(pAllocationVector->m_Allocations, allocIndex);
3203  break;
3204  }
3205  // We now have first empty Allocation.
3206  else
3207  m_HasEmptyAllocation[memTypeIndex] = true;
3208  }
3209  // Must be called after allocIndex is used, because later it may become invalid!
3210  pAllocationVector->IncrementallySortAllocations();
3211  break;
3212  }
3213  }
3214  if(found)
3215  {
3216  // Destruction of a free Allocation. Deferred until this point, outside of mutex
3217  // lock, for performance reason.
3218  if(allocationToDelete != VMA_NULL)
3219  {
3220  VMA_DEBUG_LOG(" Deleted empty allocation");
3221  allocationToDelete->Destroy(this);
3222  vma_delete(this, allocationToDelete);
3223  }
3224  return;
3225  }
3226 
3227  // pMemory not found in allocations. Try free it as Own Memory.
3228  if(FreeOwnMemory(pMemory))
3229  return;
3230 
3231  // pMemory not found as Own Memory either.
3232  VMA_ASSERT(0 && "Not found. Trying to free memory not allocated using this allocator (or some other bug).");
3233 }
3234 
3235 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
3236 {
3237  InitStatInfo(pStats->total);
3238  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
3239  InitStatInfo(pStats->memoryType[i]);
3240  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
3241  InitStatInfo(pStats->memoryHeap[i]);
3242 
3243  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3244  {
3245  VmaMutexLock allocationsLock(m_AllocationsMutex[memTypeIndex]);
3246  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3247  const VmaAllocationVector* const allocVector = m_pAllocations[memTypeIndex];
3248  VMA_ASSERT(allocVector);
3249  allocVector->AddStats(pStats, memTypeIndex, heapIndex);
3250  }
3251 
3252  VmaPostprocessCalcStatInfo(pStats->total);
3253  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
3254  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
3255  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
3256  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
3257 }
3258 
3259 bool VmaAllocator_T::FreeOwnMemory(const VkMappedMemoryRange* pMemory)
3260 {
3261  VkDeviceMemory vkMemory = VK_NULL_HANDLE;
3262 
3263  // Check all memory types because we don't know which one does pMemory come from.
3264  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3265  {
3266  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3267  OwnAllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex];
3268  VMA_ASSERT(pOwnAllocations);
3269  VmaOwnAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
3270  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
3271  VmaOwnAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
3272  pOwnAllocationsBeg,
3273  pOwnAllocationsEnd,
3274  pMemory->memory,
3275  VmaOwnAllocationMemoryHandleLess());
3276  if((pOwnAllocationIt != pOwnAllocationsEnd) &&
3277  (pOwnAllocationIt->m_hMemory == pMemory->memory))
3278  {
3279  VMA_ASSERT(pMemory->size == pOwnAllocationIt->m_Size && pMemory->offset == 0);
3280  vkMemory = pOwnAllocationIt->m_hMemory;
3281  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
3282  VectorRemove(*pOwnAllocations, ownAllocationIndex);
3283  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
3284  break;
3285  }
3286  }
3287 
3288  // Found. Free VkDeviceMemory deferred until this point, outside of mutex lock,
3289  // for performance reason.
3290  if(vkMemory != VK_NULL_HANDLE)
3291  {
3292  vkFreeMemory(m_hDevice, vkMemory, GetAllocationCallbacks());
3293  return true;
3294  }
3295  else
3296  return false;
3297 }
3298 
3299 #if VMA_STATS_STRING_ENABLED
3300 
3301 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
3302 {
3303  bool ownAllocationsStarted = false;
3304  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3305  {
3306  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex]);
3307  OwnAllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex];
3308  VMA_ASSERT(pOwnAllocVector);
3309  if(pOwnAllocVector->empty() == false)
3310  {
3311  if(ownAllocationsStarted)
3312  sb.Add(",\n\t\"Type ");
3313  else
3314  {
3315  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
3316  ownAllocationsStarted = true;
3317  }
3318  sb.AddNumber(memTypeIndex);
3319  sb.Add("\": [");
3320 
3321  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
3322  {
3323  const VmaOwnAllocation& ownAlloc = (*pOwnAllocVector)[i];
3324  if(i > 0)
3325  sb.Add(",\n\t\t{ \"Size\": ");
3326  else
3327  sb.Add("\n\t\t{ \"Size\": ");
3328  sb.AddNumber(ownAlloc.m_Size);
3329  sb.Add(", \"Type\": ");
3330  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[ownAlloc.m_Type]);
3331  sb.Add(" }");
3332  }
3333 
3334  sb.Add("\n\t]");
3335  }
3336  }
3337  if(ownAllocationsStarted)
3338  sb.Add("\n}");
3339 
3340  {
3341  bool allocationsStarted = false;
3342  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3343  {
3344  VmaMutexLock globalAllocationsLock(m_AllocationsMutex[memTypeIndex]);
3345  if(m_pAllocations[memTypeIndex]->IsEmpty() == false)
3346  {
3347  if(allocationsStarted)
3348  sb.Add(",\n\t\"Type ");
3349  else
3350  {
3351  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
3352  allocationsStarted = true;
3353  }
3354  sb.AddNumber(memTypeIndex);
3355  sb.Add("\": [");
3356 
3357  m_pAllocations[memTypeIndex]->PrintDetailedMap(sb);
3358 
3359  sb.Add("\n\t]");
3360  }
3361  }
3362  if(allocationsStarted)
3363  sb.Add("\n}");
3364  }
3365 }
3366 
3367 #endif // #if VMA_STATS_STRING_ENABLED
3368 
3369 static VkResult AllocateMemoryForImage(
3370  VmaAllocator allocator,
3371  VkImage image,
3372  const VmaMemoryRequirements* pMemoryRequirements,
3373  VmaSuballocationType suballocType,
3374  VkMappedMemoryRange* pMemory,
3375  uint32_t* pMemoryTypeIndex)
3376 {
3377  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3378 
3379  VkMemoryRequirements vkMemReq = {};
3380  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
3381 
3382  return allocator->AllocateMemory(
3383  vkMemReq,
3384  *pMemoryRequirements,
3385  suballocType,
3386  pMemory,
3387  pMemoryTypeIndex);
3388 }
3389 
3391 // Public interface
3392 
3393 VkResult vmaCreateAllocator(
3394  const VmaAllocatorCreateInfo* pCreateInfo,
3395  VmaAllocator* pAllocator)
3396 {
3397  VMA_ASSERT(pCreateInfo && pAllocator);
3398  VMA_DEBUG_LOG("vmaCreateAllocator");
3399  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
3400  return VK_SUCCESS;
3401 }
3402 
3403 void vmaDestroyAllocator(
3404  VmaAllocator allocator)
3405 {
3406  if(allocator != VK_NULL_HANDLE)
3407  {
3408  VMA_DEBUG_LOG("vmaDestroyAllocator");
3409  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
3410  vma_delete(&allocationCallbacks, allocator);
3411  }
3412 }
3413 
3415  VmaAllocator allocator,
3416  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
3417 {
3418  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
3419  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
3420 }
3421 
3423  VmaAllocator allocator,
3424  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
3425 {
3426  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
3427  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
3428 }
3429 
3431  VmaAllocator allocator,
3432  uint32_t memoryTypeIndex,
3433  VkMemoryPropertyFlags* pFlags)
3434 {
3435  VMA_ASSERT(allocator && pFlags);
3436  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
3437  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
3438 }
3439 
3440 void vmaCalculateStats(
3441  VmaAllocator allocator,
3442  VmaStats* pStats)
3443 {
3444  VMA_ASSERT(allocator && pStats);
3445  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3446  allocator->CalculateStats(pStats);
3447 }
3448 
3449 #if VMA_STATS_STRING_ENABLED
3450 
3451 void vmaBuildStatsString(
3452  VmaAllocator allocator,
3453  char** ppStatsString,
3454  VkBool32 detailedMap)
3455 {
3456  VMA_ASSERT(allocator && ppStatsString);
3457  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3458 
3459  VmaStringBuilder sb(allocator);
3460  {
3461  VmaStats stats;
3462  allocator->CalculateStats(&stats);
3463 
3464  sb.Add("{\n\"Total\": ");
3465  VmaPrintStatInfo(sb, stats.total);
3466 
3467  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
3468  {
3469  sb.Add(",\n\"Heap ");
3470  sb.AddNumber(heapIndex);
3471  sb.Add("\": {\n\t\"Size\": ");
3472  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
3473  sb.Add(",\n\t\"Flags\": ");
3474  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
3475  sb.AddString("DEVICE_LOCAL");
3476  else
3477  sb.AddString("");
3478  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
3479  {
3480  sb.Add(",\n\t\"Stats:\": ");
3481  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
3482  }
3483 
3484  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
3485  {
3486  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
3487  {
3488  sb.Add(",\n\t\"Type ");
3489  sb.AddNumber(typeIndex);
3490  sb.Add("\": {\n\t\t\"Flags\": \"");
3491  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
3492  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
3493  sb.Add(" DEVICE_LOCAL");
3494  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3495  sb.Add(" HOST_VISIBLE");
3496  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
3497  sb.Add(" HOST_COHERENT");
3498  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
3499  sb.Add(" HOST_CACHED");
3500  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
3501  sb.Add(" LAZILY_ALLOCATED");
3502  sb.Add("\"");
3503  if(stats.memoryType[typeIndex].AllocationCount > 0)
3504  {
3505  sb.Add(",\n\t\t\"Stats\": ");
3506  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
3507  }
3508  sb.Add("\n\t}");
3509  }
3510  }
3511  sb.Add("\n}");
3512  }
3513  if(detailedMap == VK_TRUE)
3514  allocator->PrintDetailedMap(sb);
3515  sb.Add("\n}\n");
3516  }
3517 
3518  const size_t len = sb.GetLength();
3519  char* const pChars = vma_new_array(allocator, char, len + 1);
3520  if(len > 0)
3521  memcpy(pChars, sb.GetData(), len);
3522  pChars[len] = '\0';
3523  *ppStatsString = pChars;
3524 }
3525 
3526 void vmaFreeStatsString(
3527  VmaAllocator allocator,
3528  char* pStatsString)
3529 {
3530  if(pStatsString != VMA_NULL)
3531  {
3532  VMA_ASSERT(allocator);
3533  size_t len = strlen(pStatsString);
3534  vma_delete_array(allocator, pStatsString, len + 1);
3535  }
3536 }
3537 
3538 #endif // #if VMA_STATS_STRING_ENABLED
3539 
3542 VkResult vmaFindMemoryTypeIndex(
3543  VmaAllocator allocator,
3544  uint32_t memoryTypeBits,
3545  const VmaMemoryRequirements* pMemoryRequirements,
3546  uint32_t* pMemoryTypeIndex)
3547 {
3548  VMA_ASSERT(allocator != VK_NULL_HANDLE);
3549  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
3550  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
3551 
3552  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
3553  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
3554  if(preferredFlags == 0)
3555  preferredFlags = requiredFlags;
3556  // preferredFlags, if not 0, must be subset of requiredFlags.
3557  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
3558 
3559  // Convert usage to requiredFlags and preferredFlags.
3560  switch(pMemoryRequirements->usage)
3561  {
3563  break;
3565  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3566  break;
3568  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3569  break;
3571  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3572  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3573  break;
3575  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3576  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3577  break;
3578  default:
3579  break;
3580  }
3581 
3582  *pMemoryTypeIndex = UINT_MAX;
3583  uint32_t minCost = UINT_MAX;
3584  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
3585  memTypeIndex < allocator->GetMemoryTypeCount();
3586  ++memTypeIndex, memTypeBit <<= 1)
3587  {
3588  // This memory type is acceptable according to memoryTypeBits bitmask.
3589  if((memTypeBit & memoryTypeBits) != 0)
3590  {
3591  const VkMemoryPropertyFlags currFlags =
3592  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
3593  // This memory type contains requiredFlags.
3594  if((requiredFlags & ~currFlags) == 0)
3595  {
3596  // Calculate cost as number of bits from preferredFlags not present in this memory type.
3597  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
3598  // Remember memory type with lowest cost.
3599  if(currCost < minCost)
3600  {
3601  *pMemoryTypeIndex = memTypeIndex;
3602  if(currCost == 0)
3603  return VK_SUCCESS;
3604  minCost = currCost;
3605  }
3606  }
3607  }
3608  }
3609  return (*pMemoryTypeIndex != UINT_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
3610 }
3611 
3612 VkResult vmaAllocateMemory(
3613  VmaAllocator allocator,
3614  const VkMemoryRequirements* pVkMemoryRequirements,
3615  const VmaMemoryRequirements* pVmaMemoryRequirements,
3616  VkMappedMemoryRange* pMemory,
3617  uint32_t* pMemoryTypeIndex)
3618 {
3619  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pMemory);
3620 
3621  VMA_DEBUG_LOG("vmaAllocateMemory");
3622 
3623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3624 
3625  return allocator->AllocateMemory(
3626  *pVkMemoryRequirements,
3627  *pVmaMemoryRequirements,
3628  VMA_SUBALLOCATION_TYPE_UNKNOWN,
3629  pMemory,
3630  pMemoryTypeIndex);
3631 }
3632 
3634  VmaAllocator allocator,
3635  VkBuffer buffer,
3636  const VmaMemoryRequirements* pMemoryRequirements,
3637  VkMappedMemoryRange* pMemory,
3638  uint32_t* pMemoryTypeIndex)
3639 {
3640  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3641 
3642  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
3643 
3644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3645 
3646  VkMemoryRequirements vkMemReq = {};
3647  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
3648 
3649  return allocator->AllocateMemory(
3650  vkMemReq,
3651  *pMemoryRequirements,
3652  VMA_SUBALLOCATION_TYPE_BUFFER,
3653  pMemory,
3654  pMemoryTypeIndex);
3655 }
3656 
3657 VkResult vmaAllocateMemoryForImage(
3658  VmaAllocator allocator,
3659  VkImage image,
3660  const VmaMemoryRequirements* pMemoryRequirements,
3661  VkMappedMemoryRange* pMemory,
3662  uint32_t* pMemoryTypeIndex)
3663 {
3664  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements);
3665 
3666  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
3667 
3668  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3669 
3670  return AllocateMemoryForImage(
3671  allocator,
3672  image,
3673  pMemoryRequirements,
3674  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
3675  pMemory,
3676  pMemoryTypeIndex);
3677 }
3678 
3679 void vmaFreeMemory(
3680  VmaAllocator allocator,
3681  const VkMappedMemoryRange* pMemory)
3682 {
3683  VMA_ASSERT(allocator && pMemory);
3684 
3685  VMA_DEBUG_LOG("vmaFreeMemory");
3686 
3687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3688 
3689  allocator->FreeMemory(pMemory);
3690 }
3691 
3692 VkResult vmaMapMemory(
3693  VmaAllocator allocator,
3694  const VkMappedMemoryRange* pMemory,
3695  void** ppData)
3696 {
3697  VMA_ASSERT(allocator && pMemory && ppData);
3698 
3699  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3700 
3701  return vkMapMemory(allocator->m_hDevice, pMemory->memory,
3702  pMemory->offset, pMemory->size, 0, ppData);
3703 }
3704 
3705 void vmaUnmapMemory(
3706  VmaAllocator allocator,
3707  const VkMappedMemoryRange* pMemory)
3708 {
3709  VMA_ASSERT(allocator && pMemory);
3710 
3711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3712 
3713  vkUnmapMemory(allocator->m_hDevice, pMemory->memory);
3714 }
3715 
3716 VkResult vmaCreateBuffer(
3717  VmaAllocator allocator,
3718  const VkBufferCreateInfo* pCreateInfo,
3719  const VmaMemoryRequirements* pMemoryRequirements,
3720  VkBuffer* pBuffer,
3721  VkMappedMemoryRange* pMemory,
3722  uint32_t* pMemoryTypeIndex)
3723 {
3724  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3725 
3726  VMA_DEBUG_LOG("vmaCreateBuffer");
3727 
3728  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3729 
3730  // 1. Create VkBuffer.
3731  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
3732  if(res >= 0)
3733  {
3734  VkMappedMemoryRange mem = {};
3735 
3736  // 2. vkGetBufferMemoryRequirements.
3737  VkMemoryRequirements vkMemReq = {};
3738  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
3739 
3740  // 3. Allocate memory using allocator.
3741  res = allocator->AllocateMemory(
3742  vkMemReq,
3743  *pMemoryRequirements,
3744  VMA_SUBALLOCATION_TYPE_BUFFER,
3745  &mem,
3746  pMemoryTypeIndex);
3747  if(res >= 0)
3748  {
3749  if(pMemory != VMA_NULL)
3750  {
3751  *pMemory = mem;
3752  }
3753  // 3. Bind buffer with memory.
3754  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, mem.memory, mem.offset);
3755  if(res >= 0)
3756  {
3757  // All steps succeeded.
3758  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3759  allocator->m_BufferToMemoryMap.insert(VmaPair<VkBuffer, VkMappedMemoryRange>(*pBuffer, mem));
3760  return VK_SUCCESS;
3761  }
3762  allocator->FreeMemory(&mem);
3763  return res;
3764  }
3765  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
3766  return res;
3767  }
3768  return res;
3769 }
3770 
3771 void vmaDestroyBuffer(
3772  VmaAllocator allocator,
3773  VkBuffer buffer)
3774 {
3775  if(buffer != VK_NULL_HANDLE)
3776  {
3777  VMA_ASSERT(allocator);
3778 
3779  VMA_DEBUG_LOG("vmaDestroyBuffer");
3780 
3781  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3782 
3783  VkMappedMemoryRange mem = {};
3784  {
3785  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3786  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = allocator->m_BufferToMemoryMap.find(buffer);
3787  if(it == allocator->m_BufferToMemoryMap.end())
3788  {
3789  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3790  return;
3791  }
3792  mem = it->second;
3793  allocator->m_BufferToMemoryMap.erase(it);
3794  }
3795 
3796  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
3797 
3798  allocator->FreeMemory(&mem);
3799  }
3800 }
3801 
3802 VkResult vmaCreateImage(
3803  VmaAllocator allocator,
3804  const VkImageCreateInfo* pCreateInfo,
3805  const VmaMemoryRequirements* pMemoryRequirements,
3806  VkImage* pImage,
3807  VkMappedMemoryRange* pMemory,
3808  uint32_t* pMemoryTypeIndex)
3809 {
3810  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3811 
3812  VMA_DEBUG_LOG("vmaCreateImage");
3813 
3814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3815 
3816  // 1. Create VkImage.
3817  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
3818  if(res >= 0)
3819  {
3820  VkMappedMemoryRange mem = {};
3821  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
3822  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
3823  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
3824 
3825  // 2. Allocate memory using allocator.
3826  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, &mem, pMemoryTypeIndex);
3827  if(res >= 0)
3828  {
3829  if(pMemory != VMA_NULL)
3830  *pMemory = mem;
3831  // 3. Bind image with memory.
3832  res = vkBindImageMemory(allocator->m_hDevice, *pImage, mem.memory, mem.offset);
3833  if(res >= 0)
3834  {
3835  // All steps succeeded.
3836  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3837  allocator->m_ImageToMemoryMap.insert(VmaPair<VkImage, VkMappedMemoryRange>(*pImage, mem));
3838  return VK_SUCCESS;
3839  }
3840  allocator->FreeMemory(&mem);
3841  return res;
3842  }
3843  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
3844  return res;
3845  }
3846  return res;
3847 }
3848 
3849 void vmaDestroyImage(
3850  VmaAllocator allocator,
3851  VkImage image)
3852 {
3853  if(image != VK_NULL_HANDLE)
3854  {
3855  VMA_ASSERT(allocator);
3856 
3857  VMA_DEBUG_LOG("vmaDestroyImage");
3858 
3859  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3860 
3861  VkMappedMemoryRange mem = {};
3862  {
3863  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3864  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = allocator->m_ImageToMemoryMap.find(image);
3865  if(it == allocator->m_ImageToMemoryMap.end())
3866  {
3867  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3868  return;
3869  }
3870  mem = it->second;
3871  allocator->m_ImageToMemoryMap.erase(it);
3872  }
3873 
3874  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
3875 
3876  allocator->FreeMemory(&mem);
3877  }
3878 }
3879 
3880 #endif // #ifdef VMA_IMPLEMENTATION
3881 
3882 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
struct VmaMemoryRequirements VmaMemoryRequirements
void vmaUnmapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
-
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:163
+
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:169
VkResult vmaMapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory, void **ppData)
-
Memory will be used for writing on device and readback on host.
Definition: vk_mem_alloc.h:274
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:293
+
Memory will be used for writing on device and readback on host.
Definition: vk_mem_alloc.h:280
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:299
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaAllocateMemoryForBuffer().
-
const VkAllocationCallbacks * pAllocationCallbacks
Custom allocation callbacks.
Definition: vk_mem_alloc.h:175
-
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:159
-
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:172
-
VmaStatInfo total
Definition: vk_mem_alloc.h:230
-
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:166
+
const VkAllocationCallbacks * pAllocationCallbacks
Custom allocation callbacks.
Definition: vk_mem_alloc.h:181
+
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:165
+
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:178
+
VmaStatInfo total
Definition: vk_mem_alloc.h:236
+
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:172
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaMemoryRequirements *pVmaMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
General purpose memory allocation.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer)
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkImage *pImage, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaCreateBuffer().
-
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:226
+
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:232
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:302
-
VmaMemoryUsage
Definition: vk_mem_alloc.h:263
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:308
+
VmaMemoryUsage
Definition: vk_mem_alloc.h:269
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkBuffer *pBuffer, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
-
Definition: vk_mem_alloc.h:214
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:297
-
Definition: vk_mem_alloc.h:278
-
VkBool32 neverAllocate
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:309
-
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:222
-
VkDeviceSize SuballocationSizeMax
Definition: vk_mem_alloc.h:221
+
Definition: vk_mem_alloc.h:220
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:303
+
Definition: vk_mem_alloc.h:284
+
VkBool32 neverAllocate
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:315
+
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:228
+
VkDeviceSize SuballocationSizeMax
Definition: vk_mem_alloc.h:227
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
VkBool32 ownMemory
Set to true if this allocation should have its own memory block.
Definition: vk_mem_alloc.h:288
-
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:228
+
VkBool32 ownMemory
Set to true if this allocation should have its own memory block.
Definition: vk_mem_alloc.h:294
+
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:234
void vmaDestroyImage(VmaAllocator allocator, VkImage image)
-
uint32_t AllocationCount
Definition: vk_mem_alloc.h:216
+
uint32_t AllocationCount
Definition: vk_mem_alloc.h:222
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
VkDeviceSize UsedBytes
Definition: vk_mem_alloc.h:219
-
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:169
-
uint32_t UnusedRangeCount
Definition: vk_mem_alloc.h:218
-
Memory will be mapped on host. Could be used for transfer to device.
Definition: vk_mem_alloc.h:270
+
VkDeviceSize UsedBytes
Definition: vk_mem_alloc.h:225
+
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:175
+
uint32_t UnusedRangeCount
Definition: vk_mem_alloc.h:224
+
Memory will be mapped on host. Could be used for transfer to device.
Definition: vk_mem_alloc.h:276
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
-
uint32_t SuballocationCount
Definition: vk_mem_alloc.h:217
-
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:222
-
VkDeviceSize SuballocationSizeMin
Definition: vk_mem_alloc.h:221
+
uint32_t SuballocationCount
Definition: vk_mem_alloc.h:223
+
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:228
+
VkDeviceSize SuballocationSizeMin
Definition: vk_mem_alloc.h:227
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
-
VkDeviceSize SuballocationSizeAvg
Definition: vk_mem_alloc.h:221
+
VkDeviceSize SuballocationSizeAvg
Definition: vk_mem_alloc.h:227
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
-
No intended memory usage specified.
Definition: vk_mem_alloc.h:266
-
Definition: vk_mem_alloc.h:275
-
Memory will be used for frequent (dynamic) updates from host and reads on device. ...
Definition: vk_mem_alloc.h:272
+
No intended memory usage specified.
Definition: vk_mem_alloc.h:272
+
Definition: vk_mem_alloc.h:281
+
Memory will be used for frequent (dynamic) updates from host and reads on device. ...
Definition: vk_mem_alloc.h:278
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
-
Memory will be used on device only, no need to be mapped on host.
Definition: vk_mem_alloc.h:268
+
Memory will be used on device only, no need to be mapped on host.
Definition: vk_mem_alloc.h:274
struct VmaStatInfo VmaStatInfo
-
VkDeviceSize UnusedBytes
Definition: vk_mem_alloc.h:220
-
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:229
+
VkDeviceSize UnusedBytes
Definition: vk_mem_alloc.h:226
+
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:235
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaMemoryRequirements *pMemoryRequirements, uint32_t *pMemoryTypeIndex)
void vmaFreeMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
Frees memory previously allocated using vmaAllocateMemoryForBuffer() or vmaAllocateMemoryForImage().
-
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:222
+
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:228