diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d69f97..87ad08d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 2.3.0-alpha.1 (2019-11-25) +# 2.3.0 (2019-12-04) Major release after a year of development in "master" branch and feature branches. Notable new features: supporting Vulkan 1.1, supporting query for memory budget. diff --git a/bin/VmaReplay_Release_vs2019.exe b/bin/VmaReplay_Release_vs2019.exe index afb2137..59552c5 100644 Binary files a/bin/VmaReplay_Release_vs2019.exe and b/bin/VmaReplay_Release_vs2019.exe differ diff --git a/bin/VulkanSample_Release_vs2019.exe b/bin/VulkanSample_Release_vs2019.exe index 31fe0d3..4940505 100644 Binary files a/bin/VulkanSample_Release_vs2019.exe and b/bin/VulkanSample_Release_vs2019.exe differ diff --git a/docs/html/index.html b/docs/html/index.html index 0ff700a..bbfb810 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -65,7 +65,7 @@ $(function() {
Vulkan Memory Allocator
-

Version 2.3.0-alpha.1 (2019-11-25)

+

Version 2.3.0 (2019-12-04)

Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
License: MIT

Documentation of all members: vk_mem_alloc.h

diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 4c50d1c..ec5240e 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -3455,7 +3455,7 @@ $(function() {
6278 
6279  void GetPoolStats(VmaPoolStats* pStats);
6280 
-
6281  bool IsEmpty() const { return m_Blocks.empty(); }
+
6281  bool IsEmpty();
6282  bool IsCorruptionDetectionEnabled() const;
6283 
6284  VkResult Allocate(
@@ -8994,5927 +8994,5933 @@ $(function() {
11827  }
11828 }
11829 
-
11830 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
-
11831 {
-
11832  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-
11833  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
-
11834  (VMA_DEBUG_MARGIN > 0) &&
-
11835  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
-
11836  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
-
11837 }
-
11838 
-
11839 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
-
11840 
-
11841 VkResult VmaBlockVector::Allocate(
-
11842  uint32_t currentFrameIndex,
-
11843  VkDeviceSize size,
-
11844  VkDeviceSize alignment,
-
11845  const VmaAllocationCreateInfo& createInfo,
-
11846  VmaSuballocationType suballocType,
-
11847  size_t allocationCount,
-
11848  VmaAllocation* pAllocations)
-
11849 {
-
11850  size_t allocIndex;
-
11851  VkResult res = VK_SUCCESS;
-
11852 
-
11853  if(IsCorruptionDetectionEnabled())
-
11854  {
-
11855  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-
11856  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-
11857  }
+
11830 bool VmaBlockVector::IsEmpty()
+
11831 {
+
11832  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
11833  return m_Blocks.empty();
+
11834 }
+
11835 
+
11836 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
+
11837 {
+
11838  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
11839  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
+
11840  (VMA_DEBUG_MARGIN > 0) &&
+
11841  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
+
11842  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
+
11843 }
+
11844 
+
11845 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+
11846 
+
11847 VkResult VmaBlockVector::Allocate(
+
11848  uint32_t currentFrameIndex,
+
11849  VkDeviceSize size,
+
11850  VkDeviceSize alignment,
+
11851  const VmaAllocationCreateInfo& createInfo,
+
11852  VmaSuballocationType suballocType,
+
11853  size_t allocationCount,
+
11854  VmaAllocation* pAllocations)
+
11855 {
+
11856  size_t allocIndex;
+
11857  VkResult res = VK_SUCCESS;
11858 
-
11859  {
-
11860  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
11861  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
11862  {
-
11863  res = AllocatePage(
-
11864  currentFrameIndex,
-
11865  size,
-
11866  alignment,
-
11867  createInfo,
-
11868  suballocType,
-
11869  pAllocations + allocIndex);
-
11870  if(res != VK_SUCCESS)
-
11871  {
-
11872  break;
-
11873  }
-
11874  }
-
11875  }
-
11876 
-
11877  if(res != VK_SUCCESS)
-
11878  {
-
11879  // Free all already created allocations.
-
11880  while(allocIndex--)
-
11881  {
-
11882  Free(pAllocations[allocIndex]);
-
11883  }
-
11884  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
11885  }
-
11886 
-
11887  return res;
-
11888 }
-
11889 
-
11890 VkResult VmaBlockVector::AllocatePage(
-
11891  uint32_t currentFrameIndex,
-
11892  VkDeviceSize size,
-
11893  VkDeviceSize alignment,
-
11894  const VmaAllocationCreateInfo& createInfo,
-
11895  VmaSuballocationType suballocType,
-
11896  VmaAllocation* pAllocation)
-
11897 {
-
11898  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-
11899  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
-
11900  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-
11901  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-
11902 
-
11903  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
-
11904  VkDeviceSize freeMemory;
-
11905  {
-
11906  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
11907  VmaBudget heapBudget = {};
-
11908  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-
11909  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
-
11910  }
-
11911 
-
11912  const bool canFallbackToDedicated = !IsCustomPool();
-
11913  const bool canCreateNewBlock =
-
11914  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
-
11915  (m_Blocks.size() < m_MaxBlockCount) &&
-
11916  (freeMemory >= size || !canFallbackToDedicated);
-
11917  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
-
11918 
-
11919  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
-
11920  // Which in turn is available only when maxBlockCount = 1.
-
11921  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
-
11922  {
-
11923  canMakeOtherLost = false;
-
11924  }
-
11925 
-
11926  // Upper address can only be used with linear allocator and within single memory block.
-
11927  if(isUpperAddress &&
-
11928  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
-
11929  {
-
11930  return VK_ERROR_FEATURE_NOT_PRESENT;
-
11931  }
-
11932 
-
11933  // Validate strategy.
-
11934  switch(strategy)
+
11859  if(IsCorruptionDetectionEnabled())
+
11860  {
+
11861  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+
11862  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+
11863  }
+
11864 
+
11865  {
+
11866  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
11867  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
11868  {
+
11869  res = AllocatePage(
+
11870  currentFrameIndex,
+
11871  size,
+
11872  alignment,
+
11873  createInfo,
+
11874  suballocType,
+
11875  pAllocations + allocIndex);
+
11876  if(res != VK_SUCCESS)
+
11877  {
+
11878  break;
+
11879  }
+
11880  }
+
11881  }
+
11882 
+
11883  if(res != VK_SUCCESS)
+
11884  {
+
11885  // Free all already created allocations.
+
11886  while(allocIndex--)
+
11887  {
+
11888  Free(pAllocations[allocIndex]);
+
11889  }
+
11890  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
11891  }
+
11892 
+
11893  return res;
+
11894 }
+
11895 
+
11896 VkResult VmaBlockVector::AllocatePage(
+
11897  uint32_t currentFrameIndex,
+
11898  VkDeviceSize size,
+
11899  VkDeviceSize alignment,
+
11900  const VmaAllocationCreateInfo& createInfo,
+
11901  VmaSuballocationType suballocType,
+
11902  VmaAllocation* pAllocation)
+
11903 {
+
11904  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+
11905  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
+
11906  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+
11907  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+
11908 
+
11909  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
+
11910  VkDeviceSize freeMemory;
+
11911  {
+
11912  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
11913  VmaBudget heapBudget = {};
+
11914  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+
11915  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
+
11916  }
+
11917 
+
11918  const bool canFallbackToDedicated = !IsCustomPool();
+
11919  const bool canCreateNewBlock =
+
11920  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
+
11921  (m_Blocks.size() < m_MaxBlockCount) &&
+
11922  (freeMemory >= size || !canFallbackToDedicated);
+
11923  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
+
11924 
+
11925  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
+
11926  // Which in turn is available only when maxBlockCount = 1.
+
11927  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
+
11928  {
+
11929  canMakeOtherLost = false;
+
11930  }
+
11931 
+
11932  // Upper address can only be used with linear allocator and within single memory block.
+
11933  if(isUpperAddress &&
+
11934  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11935  {
-
11936  case 0:
- -
11938  break;
- - - -
11942  break;
-
11943  default:
-
11944  return VK_ERROR_FEATURE_NOT_PRESENT;
-
11945  }
-
11946 
-
11947  // Early reject: requested allocation size is larger that maximum block size for this block vector.
-
11948  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
-
11949  {
-
11950  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
11936  return VK_ERROR_FEATURE_NOT_PRESENT;
+
11937  }
+
11938 
+
11939  // Validate strategy.
+
11940  switch(strategy)
+
11941  {
+
11942  case 0:
+ +
11944  break;
+ + + +
11948  break;
+
11949  default:
+
11950  return VK_ERROR_FEATURE_NOT_PRESENT;
11951  }
11952 
-
11953  /*
-
11954  Under certain condition, this whole section can be skipped for optimization, so
-
11955  we move on directly to trying to allocate with canMakeOtherLost. That's the case
-
11956  e.g. for custom pools with linear algorithm.
-
11957  */
-
11958  if(!canMakeOtherLost || canCreateNewBlock)
-
11959  {
-
11960  // 1. Search existing allocations. Try to allocate without making other allocations lost.
-
11961  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
- -
11963 
-
11964  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-
11965  {
-
11966  // Use only last block.
-
11967  if(!m_Blocks.empty())
-
11968  {
-
11969  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
-
11970  VMA_ASSERT(pCurrBlock);
-
11971  VkResult res = AllocateFromBlock(
-
11972  pCurrBlock,
-
11973  currentFrameIndex,
-
11974  size,
-
11975  alignment,
-
11976  allocFlagsCopy,
-
11977  createInfo.pUserData,
-
11978  suballocType,
-
11979  strategy,
-
11980  pAllocation);
-
11981  if(res == VK_SUCCESS)
-
11982  {
-
11983  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
-
11984  return VK_SUCCESS;
-
11985  }
-
11986  }
-
11987  }
-
11988  else
-
11989  {
- -
11991  {
-
11992  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-
11993  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
11994  {
-
11995  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
11996  VMA_ASSERT(pCurrBlock);
-
11997  VkResult res = AllocateFromBlock(
-
11998  pCurrBlock,
-
11999  currentFrameIndex,
-
12000  size,
-
12001  alignment,
-
12002  allocFlagsCopy,
-
12003  createInfo.pUserData,
-
12004  suballocType,
-
12005  strategy,
-
12006  pAllocation);
-
12007  if(res == VK_SUCCESS)
-
12008  {
-
12009  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
-
12010  return VK_SUCCESS;
-
12011  }
-
12012  }
-
12013  }
-
12014  else // WORST_FIT, FIRST_FIT
-
12015  {
-
12016  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-
12017  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
12018  {
-
12019  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12020  VMA_ASSERT(pCurrBlock);
-
12021  VkResult res = AllocateFromBlock(
-
12022  pCurrBlock,
-
12023  currentFrameIndex,
-
12024  size,
-
12025  alignment,
-
12026  allocFlagsCopy,
-
12027  createInfo.pUserData,
-
12028  suballocType,
-
12029  strategy,
-
12030  pAllocation);
-
12031  if(res == VK_SUCCESS)
-
12032  {
-
12033  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
-
12034  return VK_SUCCESS;
-
12035  }
-
12036  }
-
12037  }
-
12038  }
-
12039 
-
12040  // 2. Try to create new block.
-
12041  if(canCreateNewBlock)
-
12042  {
-
12043  // Calculate optimal size for new block.
-
12044  VkDeviceSize newBlockSize = m_PreferredBlockSize;
-
12045  uint32_t newBlockSizeShift = 0;
-
12046  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
-
12047 
-
12048  if(!m_ExplicitBlockSize)
-
12049  {
-
12050  // Allocate 1/8, 1/4, 1/2 as first blocks.
-
12051  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
-
12052  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
-
12053  {
-
12054  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-
12055  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
-
12056  {
-
12057  newBlockSize = smallerNewBlockSize;
-
12058  ++newBlockSizeShift;
-
12059  }
-
12060  else
-
12061  {
-
12062  break;
-
12063  }
-
12064  }
-
12065  }
-
12066 
-
12067  size_t newBlockIndex = 0;
-
12068  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-
12069  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12070  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
-
12071  if(!m_ExplicitBlockSize)
-
12072  {
-
12073  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
-
12074  {
-
12075  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-
12076  if(smallerNewBlockSize >= size)
-
12077  {
-
12078  newBlockSize = smallerNewBlockSize;
-
12079  ++newBlockSizeShift;
-
12080  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-
12081  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12082  }
-
12083  else
-
12084  {
-
12085  break;
-
12086  }
-
12087  }
-
12088  }
-
12089 
-
12090  if(res == VK_SUCCESS)
-
12091  {
-
12092  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
-
12093  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
-
12094 
-
12095  res = AllocateFromBlock(
-
12096  pBlock,
-
12097  currentFrameIndex,
-
12098  size,
-
12099  alignment,
-
12100  allocFlagsCopy,
-
12101  createInfo.pUserData,
-
12102  suballocType,
-
12103  strategy,
-
12104  pAllocation);
-
12105  if(res == VK_SUCCESS)
-
12106  {
-
12107  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
-
12108  return VK_SUCCESS;
-
12109  }
-
12110  else
-
12111  {
-
12112  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
-
12113  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12114  }
-
12115  }
-
12116  }
-
12117  }
-
12118 
-
12119  // 3. Try to allocate from existing blocks with making other allocations lost.
-
12120  if(canMakeOtherLost)
-
12121  {
-
12122  uint32_t tryIndex = 0;
-
12123  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
-
12124  {
-
12125  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
-
12126  VmaAllocationRequest bestRequest = {};
-
12127  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
-
12128 
-
12129  // 1. Search existing allocations.
- -
12131  {
-
12132  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-
12133  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
-
12134  {
-
12135  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12136  VMA_ASSERT(pCurrBlock);
-
12137  VmaAllocationRequest currRequest = {};
-
12138  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
12139  currentFrameIndex,
-
12140  m_FrameInUseCount,
-
12141  m_BufferImageGranularity,
-
12142  size,
-
12143  alignment,
-
12144  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
-
12145  suballocType,
-
12146  canMakeOtherLost,
-
12147  strategy,
-
12148  &currRequest))
-
12149  {
-
12150  const VkDeviceSize currRequestCost = currRequest.CalcCost();
-
12151  if(pBestRequestBlock == VMA_NULL ||
-
12152  currRequestCost < bestRequestCost)
-
12153  {
-
12154  pBestRequestBlock = pCurrBlock;
-
12155  bestRequest = currRequest;
-
12156  bestRequestCost = currRequestCost;
-
12157 
-
12158  if(bestRequestCost == 0)
-
12159  {
-
12160  break;
-
12161  }
-
12162  }
-
12163  }
-
12164  }
-
12165  }
-
12166  else // WORST_FIT, FIRST_FIT
-
12167  {
-
12168  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-
12169  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
12170  {
-
12171  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-
12172  VMA_ASSERT(pCurrBlock);
-
12173  VmaAllocationRequest currRequest = {};
-
12174  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
-
12175  currentFrameIndex,
-
12176  m_FrameInUseCount,
-
12177  m_BufferImageGranularity,
-
12178  size,
-
12179  alignment,
-
12180  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
-
12181  suballocType,
-
12182  canMakeOtherLost,
-
12183  strategy,
-
12184  &currRequest))
-
12185  {
-
12186  const VkDeviceSize currRequestCost = currRequest.CalcCost();
-
12187  if(pBestRequestBlock == VMA_NULL ||
-
12188  currRequestCost < bestRequestCost ||
- -
12190  {
-
12191  pBestRequestBlock = pCurrBlock;
-
12192  bestRequest = currRequest;
-
12193  bestRequestCost = currRequestCost;
-
12194 
-
12195  if(bestRequestCost == 0 ||
- -
12197  {
-
12198  break;
-
12199  }
-
12200  }
-
12201  }
-
12202  }
-
12203  }
-
12204 
-
12205  if(pBestRequestBlock != VMA_NULL)
-
12206  {
-
12207  if(mapped)
-
12208  {
-
12209  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
-
12210  if(res != VK_SUCCESS)
-
12211  {
-
12212  return res;
-
12213  }
-
12214  }
-
12215 
-
12216  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
-
12217  currentFrameIndex,
-
12218  m_FrameInUseCount,
-
12219  &bestRequest))
-
12220  {
-
12221  // Allocate from this pBlock.
-
12222  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
-
12223  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
-
12224  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
-
12225  UpdateHasEmptyBlock();
-
12226  (*pAllocation)->InitBlockAllocation(
-
12227  pBestRequestBlock,
-
12228  bestRequest.offset,
-
12229  alignment,
-
12230  size,
-
12231  m_MemoryTypeIndex,
-
12232  suballocType,
-
12233  mapped,
-
12234  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
-
12235  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
-
12236  VMA_DEBUG_LOG(" Returned from existing block");
-
12237  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
-
12238  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-
12239  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
12240  {
-
12241  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
12242  }
-
12243  if(IsCorruptionDetectionEnabled())
-
12244  {
-
12245  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
-
12246  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
-
12247  }
-
12248  return VK_SUCCESS;
-
12249  }
-
12250  // else: Some allocations must have been touched while we are here. Next try.
-
12251  }
-
12252  else
-
12253  {
-
12254  // Could not find place in any of the blocks - break outer loop.
-
12255  break;
-
12256  }
-
12257  }
-
12258  /* Maximum number of tries exceeded - a very unlike event when many other
-
12259  threads are simultaneously touching allocations making it impossible to make
-
12260  lost at the same time as we try to allocate. */
-
12261  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
-
12262  {
-
12263  return VK_ERROR_TOO_MANY_OBJECTS;
-
12264  }
-
12265  }
-
12266 
-
12267  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12268 }
-
12269 
-
12270 void VmaBlockVector::Free(
-
12271  const VmaAllocation hAllocation)
-
12272 {
-
12273  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
-
12274 
-
12275  bool budgetExceeded = false;
-
12276  {
-
12277  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-
12278  VmaBudget heapBudget = {};
-
12279  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
-
12280  budgetExceeded = heapBudget.usage >= heapBudget.budget;
-
12281  }
-
12282 
-
12283  // Scope for lock.
-
12284  {
-
12285  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12286 
-
12287  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
11953  // Early reject: requested allocation size is larger that maximum block size for this block vector.
+
11954  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
+
11955  {
+
11956  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
11957  }
+
11958 
+
11959  /*
+
11960  Under certain condition, this whole section can be skipped for optimization, so
+
11961  we move on directly to trying to allocate with canMakeOtherLost. That's the case
+
11962  e.g. for custom pools with linear algorithm.
+
11963  */
+
11964  if(!canMakeOtherLost || canCreateNewBlock)
+
11965  {
+
11966  // 1. Search existing allocations. Try to allocate without making other allocations lost.
+
11967  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
+ +
11969 
+
11970  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+
11971  {
+
11972  // Use only last block.
+
11973  if(!m_Blocks.empty())
+
11974  {
+
11975  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
+
11976  VMA_ASSERT(pCurrBlock);
+
11977  VkResult res = AllocateFromBlock(
+
11978  pCurrBlock,
+
11979  currentFrameIndex,
+
11980  size,
+
11981  alignment,
+
11982  allocFlagsCopy,
+
11983  createInfo.pUserData,
+
11984  suballocType,
+
11985  strategy,
+
11986  pAllocation);
+
11987  if(res == VK_SUCCESS)
+
11988  {
+
11989  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
+
11990  return VK_SUCCESS;
+
11991  }
+
11992  }
+
11993  }
+
11994  else
+
11995  {
+ +
11997  {
+
11998  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+
11999  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
12000  {
+
12001  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12002  VMA_ASSERT(pCurrBlock);
+
12003  VkResult res = AllocateFromBlock(
+
12004  pCurrBlock,
+
12005  currentFrameIndex,
+
12006  size,
+
12007  alignment,
+
12008  allocFlagsCopy,
+
12009  createInfo.pUserData,
+
12010  suballocType,
+
12011  strategy,
+
12012  pAllocation);
+
12013  if(res == VK_SUCCESS)
+
12014  {
+
12015  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
+
12016  return VK_SUCCESS;
+
12017  }
+
12018  }
+
12019  }
+
12020  else // WORST_FIT, FIRST_FIT
+
12021  {
+
12022  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+
12023  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
12024  {
+
12025  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12026  VMA_ASSERT(pCurrBlock);
+
12027  VkResult res = AllocateFromBlock(
+
12028  pCurrBlock,
+
12029  currentFrameIndex,
+
12030  size,
+
12031  alignment,
+
12032  allocFlagsCopy,
+
12033  createInfo.pUserData,
+
12034  suballocType,
+
12035  strategy,
+
12036  pAllocation);
+
12037  if(res == VK_SUCCESS)
+
12038  {
+
12039  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
+
12040  return VK_SUCCESS;
+
12041  }
+
12042  }
+
12043  }
+
12044  }
+
12045 
+
12046  // 2. Try to create new block.
+
12047  if(canCreateNewBlock)
+
12048  {
+
12049  // Calculate optimal size for new block.
+
12050  VkDeviceSize newBlockSize = m_PreferredBlockSize;
+
12051  uint32_t newBlockSizeShift = 0;
+
12052  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
+
12053 
+
12054  if(!m_ExplicitBlockSize)
+
12055  {
+
12056  // Allocate 1/8, 1/4, 1/2 as first blocks.
+
12057  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
+
12058  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
+
12059  {
+
12060  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+
12061  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
+
12062  {
+
12063  newBlockSize = smallerNewBlockSize;
+
12064  ++newBlockSizeShift;
+
12065  }
+
12066  else
+
12067  {
+
12068  break;
+
12069  }
+
12070  }
+
12071  }
+
12072 
+
12073  size_t newBlockIndex = 0;
+
12074  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+
12075  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12076  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
+
12077  if(!m_ExplicitBlockSize)
+
12078  {
+
12079  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
+
12080  {
+
12081  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+
12082  if(smallerNewBlockSize >= size)
+
12083  {
+
12084  newBlockSize = smallerNewBlockSize;
+
12085  ++newBlockSizeShift;
+
12086  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+
12087  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12088  }
+
12089  else
+
12090  {
+
12091  break;
+
12092  }
+
12093  }
+
12094  }
+
12095 
+
12096  if(res == VK_SUCCESS)
+
12097  {
+
12098  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
+
12099  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
+
12100 
+
12101  res = AllocateFromBlock(
+
12102  pBlock,
+
12103  currentFrameIndex,
+
12104  size,
+
12105  alignment,
+
12106  allocFlagsCopy,
+
12107  createInfo.pUserData,
+
12108  suballocType,
+
12109  strategy,
+
12110  pAllocation);
+
12111  if(res == VK_SUCCESS)
+
12112  {
+
12113  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
+
12114  return VK_SUCCESS;
+
12115  }
+
12116  else
+
12117  {
+
12118  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
+
12119  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12120  }
+
12121  }
+
12122  }
+
12123  }
+
12124 
+
12125  // 3. Try to allocate from existing blocks with making other allocations lost.
+
12126  if(canMakeOtherLost)
+
12127  {
+
12128  uint32_t tryIndex = 0;
+
12129  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
+
12130  {
+
12131  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
+
12132  VmaAllocationRequest bestRequest = {};
+
12133  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
+
12134 
+
12135  // 1. Search existing allocations.
+ +
12137  {
+
12138  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+
12139  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
+
12140  {
+
12141  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12142  VMA_ASSERT(pCurrBlock);
+
12143  VmaAllocationRequest currRequest = {};
+
12144  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
12145  currentFrameIndex,
+
12146  m_FrameInUseCount,
+
12147  m_BufferImageGranularity,
+
12148  size,
+
12149  alignment,
+
12150  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+
12151  suballocType,
+
12152  canMakeOtherLost,
+
12153  strategy,
+
12154  &currRequest))
+
12155  {
+
12156  const VkDeviceSize currRequestCost = currRequest.CalcCost();
+
12157  if(pBestRequestBlock == VMA_NULL ||
+
12158  currRequestCost < bestRequestCost)
+
12159  {
+
12160  pBestRequestBlock = pCurrBlock;
+
12161  bestRequest = currRequest;
+
12162  bestRequestCost = currRequestCost;
+
12163 
+
12164  if(bestRequestCost == 0)
+
12165  {
+
12166  break;
+
12167  }
+
12168  }
+
12169  }
+
12170  }
+
12171  }
+
12172  else // WORST_FIT, FIRST_FIT
+
12173  {
+
12174  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+
12175  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
12176  {
+
12177  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+
12178  VMA_ASSERT(pCurrBlock);
+
12179  VmaAllocationRequest currRequest = {};
+
12180  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
+
12181  currentFrameIndex,
+
12182  m_FrameInUseCount,
+
12183  m_BufferImageGranularity,
+
12184  size,
+
12185  alignment,
+
12186  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+
12187  suballocType,
+
12188  canMakeOtherLost,
+
12189  strategy,
+
12190  &currRequest))
+
12191  {
+
12192  const VkDeviceSize currRequestCost = currRequest.CalcCost();
+
12193  if(pBestRequestBlock == VMA_NULL ||
+
12194  currRequestCost < bestRequestCost ||
+ +
12196  {
+
12197  pBestRequestBlock = pCurrBlock;
+
12198  bestRequest = currRequest;
+
12199  bestRequestCost = currRequestCost;
+
12200 
+
12201  if(bestRequestCost == 0 ||
+ +
12203  {
+
12204  break;
+
12205  }
+
12206  }
+
12207  }
+
12208  }
+
12209  }
+
12210 
+
12211  if(pBestRequestBlock != VMA_NULL)
+
12212  {
+
12213  if(mapped)
+
12214  {
+
12215  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
+
12216  if(res != VK_SUCCESS)
+
12217  {
+
12218  return res;
+
12219  }
+
12220  }
+
12221 
+
12222  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
+
12223  currentFrameIndex,
+
12224  m_FrameInUseCount,
+
12225  &bestRequest))
+
12226  {
+
12227  // Allocate from this pBlock.
+
12228  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
+
12229  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+
12230  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
+
12231  UpdateHasEmptyBlock();
+
12232  (*pAllocation)->InitBlockAllocation(
+
12233  pBestRequestBlock,
+
12234  bestRequest.offset,
+
12235  alignment,
+
12236  size,
+
12237  m_MemoryTypeIndex,
+
12238  suballocType,
+
12239  mapped,
+
12240  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+
12241  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
+
12242  VMA_DEBUG_LOG(" Returned from existing block");
+
12243  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
+
12244  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+
12245  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
12246  {
+
12247  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
12248  }
+
12249  if(IsCorruptionDetectionEnabled())
+
12250  {
+
12251  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
+
12252  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+
12253  }
+
12254  return VK_SUCCESS;
+
12255  }
+
12256  // else: Some allocations must have been touched while we are here. Next try.
+
12257  }
+
12258  else
+
12259  {
+
12260  // Could not find place in any of the blocks - break outer loop.
+
12261  break;
+
12262  }
+
12263  }
+
12264  /* Maximum number of tries exceeded - a very unlike event when many other
+
12265  threads are simultaneously touching allocations making it impossible to make
+
12266  lost at the same time as we try to allocate. */
+
12267  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
+
12268  {
+
12269  return VK_ERROR_TOO_MANY_OBJECTS;
+
12270  }
+
12271  }
+
12272 
+
12273  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12274 }
+
12275 
+
12276 void VmaBlockVector::Free(
+
12277  const VmaAllocation hAllocation)
+
12278 {
+
12279  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
+
12280 
+
12281  bool budgetExceeded = false;
+
12282  {
+
12283  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+
12284  VmaBudget heapBudget = {};
+
12285  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
+
12286  budgetExceeded = heapBudget.usage >= heapBudget.budget;
+
12287  }
12288 
-
12289  if(IsCorruptionDetectionEnabled())
-
12290  {
-
12291  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
-
12292  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
-
12293  }
+
12289  // Scope for lock.
+
12290  {
+
12291  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12292 
+
12293  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12294 
-
12295  if(hAllocation->IsPersistentMap())
+
12295  if(IsCorruptionDetectionEnabled())
12296  {
-
12297  pBlock->Unmap(m_hAllocator, 1);
-
12298  }
-
12299 
-
12300  pBlock->m_pMetadata->Free(hAllocation);
-
12301  VMA_HEAVY_ASSERT(pBlock->Validate());
-
12302 
-
12303  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
-
12304 
-
12305  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
-
12306  // pBlock became empty after this deallocation.
-
12307  if(pBlock->m_pMetadata->IsEmpty())
-
12308  {
-
12309  // Already has empty block. We don't want to have two, so delete this one.
-
12310  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
-
12311  {
-
12312  pBlockToDelete = pBlock;
-
12313  Remove(pBlock);
-
12314  }
-
12315  // else: We now have an empty block - leave it.
-
12316  }
-
12317  // pBlock didn't become empty, but we have another empty block - find and free that one.
-
12318  // (This is optional, heuristics.)
-
12319  else if(m_HasEmptyBlock && canDeleteBlock)
-
12320  {
-
12321  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
-
12322  if(pLastBlock->m_pMetadata->IsEmpty())
-
12323  {
-
12324  pBlockToDelete = pLastBlock;
-
12325  m_Blocks.pop_back();
-
12326  }
-
12327  }
-
12328 
-
12329  UpdateHasEmptyBlock();
-
12330  IncrementallySortBlocks();
-
12331  }
-
12332 
-
12333  // Destruction of a free block. Deferred until this point, outside of mutex
-
12334  // lock, for performance reason.
-
12335  if(pBlockToDelete != VMA_NULL)
-
12336  {
-
12337  VMA_DEBUG_LOG(" Deleted empty block");
-
12338  pBlockToDelete->Destroy(m_hAllocator);
-
12339  vma_delete(m_hAllocator, pBlockToDelete);
-
12340  }
-
12341 }
-
12342 
-
12343 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
-
12344 {
-
12345  VkDeviceSize result = 0;
-
12346  for(size_t i = m_Blocks.size(); i--; )
-
12347  {
-
12348  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
-
12349  if(result >= m_PreferredBlockSize)
-
12350  {
-
12351  break;
-
12352  }
-
12353  }
-
12354  return result;
-
12355 }
-
12356 
-
12357 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
-
12358 {
-
12359  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
12360  {
-
12361  if(m_Blocks[blockIndex] == pBlock)
-
12362  {
-
12363  VmaVectorRemove(m_Blocks, blockIndex);
-
12364  return;
-
12365  }
-
12366  }
-
12367  VMA_ASSERT(0);
-
12368 }
-
12369 
-
12370 void VmaBlockVector::IncrementallySortBlocks()
-
12371 {
-
12372  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-
12373  {
-
12374  // Bubble sort only until first swap.
-
12375  for(size_t i = 1; i < m_Blocks.size(); ++i)
-
12376  {
-
12377  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
-
12378  {
-
12379  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
-
12380  return;
-
12381  }
-
12382  }
-
12383  }
-
12384 }
-
12385 
-
12386 VkResult VmaBlockVector::AllocateFromBlock(
-
12387  VmaDeviceMemoryBlock* pBlock,
-
12388  uint32_t currentFrameIndex,
-
12389  VkDeviceSize size,
-
12390  VkDeviceSize alignment,
-
12391  VmaAllocationCreateFlags allocFlags,
-
12392  void* pUserData,
-
12393  VmaSuballocationType suballocType,
-
12394  uint32_t strategy,
-
12395  VmaAllocation* pAllocation)
-
12396 {
-
12397  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
-
12398  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-
12399  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-
12400  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-
12401 
-
12402  VmaAllocationRequest currRequest = {};
-
12403  if(pBlock->m_pMetadata->CreateAllocationRequest(
-
12404  currentFrameIndex,
-
12405  m_FrameInUseCount,
-
12406  m_BufferImageGranularity,
-
12407  size,
-
12408  alignment,
-
12409  isUpperAddress,
-
12410  suballocType,
-
12411  false, // canMakeOtherLost
-
12412  strategy,
-
12413  &currRequest))
-
12414  {
-
12415  // Allocate from pCurrBlock.
-
12416  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
-
12417 
-
12418  if(mapped)
-
12419  {
-
12420  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
-
12421  if(res != VK_SUCCESS)
-
12422  {
-
12423  return res;
-
12424  }
-
12425  }
-
12426 
-
12427  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
-
12428  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
-
12429  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
-
12430  UpdateHasEmptyBlock();
-
12431  (*pAllocation)->InitBlockAllocation(
-
12432  pBlock,
-
12433  currRequest.offset,
-
12434  alignment,
-
12435  size,
-
12436  m_MemoryTypeIndex,
-
12437  suballocType,
-
12438  mapped,
-
12439  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
-
12440  VMA_HEAVY_ASSERT(pBlock->Validate());
-
12441  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
-
12442  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
-
12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
12444  {
-
12445  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
12446  }
-
12447  if(IsCorruptionDetectionEnabled())
-
12448  {
-
12449  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
-
12450  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
-
12451  }
-
12452  return VK_SUCCESS;
-
12453  }
-
12454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
12455 }
-
12456 
-
12457 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
-
12458 {
-
12459  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-
12460  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
-
12461  allocInfo.allocationSize = blockSize;
-
12462  VkDeviceMemory mem = VK_NULL_HANDLE;
-
12463  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
-
12464  if(res < 0)
-
12465  {
-
12466  return res;
-
12467  }
-
12468 
-
12469  // New VkDeviceMemory successfully created.
-
12470 
-
12471  // Create new Allocation for it.
-
12472  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
-
12473  pBlock->Init(
-
12474  m_hAllocator,
-
12475  m_hParentPool,
-
12476  m_MemoryTypeIndex,
-
12477  mem,
-
12478  allocInfo.allocationSize,
-
12479  m_NextBlockId++,
-
12480  m_Algorithm);
-
12481 
-
12482  m_Blocks.push_back(pBlock);
-
12483  if(pNewBlockIndex != VMA_NULL)
-
12484  {
-
12485  *pNewBlockIndex = m_Blocks.size() - 1;
-
12486  }
+
12297  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+
12298  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
+
12299  }
+
12300 
+
12301  if(hAllocation->IsPersistentMap())
+
12302  {
+
12303  pBlock->Unmap(m_hAllocator, 1);
+
12304  }
+
12305 
+
12306  pBlock->m_pMetadata->Free(hAllocation);
+
12307  VMA_HEAVY_ASSERT(pBlock->Validate());
+
12308 
+
12309  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
+
12310 
+
12311  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
+
12312  // pBlock became empty after this deallocation.
+
12313  if(pBlock->m_pMetadata->IsEmpty())
+
12314  {
+
12315  // Already has empty block. We don't want to have two, so delete this one.
+
12316  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
+
12317  {
+
12318  pBlockToDelete = pBlock;
+
12319  Remove(pBlock);
+
12320  }
+
12321  // else: We now have an empty block - leave it.
+
12322  }
+
12323  // pBlock didn't become empty, but we have another empty block - find and free that one.
+
12324  // (This is optional, heuristics.)
+
12325  else if(m_HasEmptyBlock && canDeleteBlock)
+
12326  {
+
12327  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
+
12328  if(pLastBlock->m_pMetadata->IsEmpty())
+
12329  {
+
12330  pBlockToDelete = pLastBlock;
+
12331  m_Blocks.pop_back();
+
12332  }
+
12333  }
+
12334 
+
12335  UpdateHasEmptyBlock();
+
12336  IncrementallySortBlocks();
+
12337  }
+
12338 
+
12339  // Destruction of a free block. Deferred until this point, outside of mutex
+
12340  // lock, for performance reason.
+
12341  if(pBlockToDelete != VMA_NULL)
+
12342  {
+
12343  VMA_DEBUG_LOG(" Deleted empty block");
+
12344  pBlockToDelete->Destroy(m_hAllocator);
+
12345  vma_delete(m_hAllocator, pBlockToDelete);
+
12346  }
+
12347 }
+
12348 
+
12349 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
+
12350 {
+
12351  VkDeviceSize result = 0;
+
12352  for(size_t i = m_Blocks.size(); i--; )
+
12353  {
+
12354  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
+
12355  if(result >= m_PreferredBlockSize)
+
12356  {
+
12357  break;
+
12358  }
+
12359  }
+
12360  return result;
+
12361 }
+
12362 
+
12363 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
+
12364 {
+
12365  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
12366  {
+
12367  if(m_Blocks[blockIndex] == pBlock)
+
12368  {
+
12369  VmaVectorRemove(m_Blocks, blockIndex);
+
12370  return;
+
12371  }
+
12372  }
+
12373  VMA_ASSERT(0);
+
12374 }
+
12375 
+
12376 void VmaBlockVector::IncrementallySortBlocks()
+
12377 {
+
12378  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+
12379  {
+
12380  // Bubble sort only until first swap.
+
12381  for(size_t i = 1; i < m_Blocks.size(); ++i)
+
12382  {
+
12383  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
+
12384  {
+
12385  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
+
12386  return;
+
12387  }
+
12388  }
+
12389  }
+
12390 }
+
12391 
+
12392 VkResult VmaBlockVector::AllocateFromBlock(
+
12393  VmaDeviceMemoryBlock* pBlock,
+
12394  uint32_t currentFrameIndex,
+
12395  VkDeviceSize size,
+
12396  VkDeviceSize alignment,
+
12397  VmaAllocationCreateFlags allocFlags,
+
12398  void* pUserData,
+
12399  VmaSuballocationType suballocType,
+
12400  uint32_t strategy,
+
12401  VmaAllocation* pAllocation)
+
12402 {
+
12403  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
+
12404  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+
12405  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+
12406  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+
12407 
+
12408  VmaAllocationRequest currRequest = {};
+
12409  if(pBlock->m_pMetadata->CreateAllocationRequest(
+
12410  currentFrameIndex,
+
12411  m_FrameInUseCount,
+
12412  m_BufferImageGranularity,
+
12413  size,
+
12414  alignment,
+
12415  isUpperAddress,
+
12416  suballocType,
+
12417  false, // canMakeOtherLost
+
12418  strategy,
+
12419  &currRequest))
+
12420  {
+
12421  // Allocate from pCurrBlock.
+
12422  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
+
12423 
+
12424  if(mapped)
+
12425  {
+
12426  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
+
12427  if(res != VK_SUCCESS)
+
12428  {
+
12429  return res;
+
12430  }
+
12431  }
+
12432 
+
12433  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
+
12434  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+
12435  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
+
12436  UpdateHasEmptyBlock();
+
12437  (*pAllocation)->InitBlockAllocation(
+
12438  pBlock,
+
12439  currRequest.offset,
+
12440  alignment,
+
12441  size,
+
12442  m_MemoryTypeIndex,
+
12443  suballocType,
+
12444  mapped,
+
12445  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+
12446  VMA_HEAVY_ASSERT(pBlock->Validate());
+
12447  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
+
12448  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
+
12449  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
12450  {
+
12451  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
12452  }
+
12453  if(IsCorruptionDetectionEnabled())
+
12454  {
+
12455  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
+
12456  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+
12457  }
+
12458  return VK_SUCCESS;
+
12459  }
+
12460  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
12461 }
+
12462 
+
12463 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
+
12464 {
+
12465  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+
12466  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
+
12467  allocInfo.allocationSize = blockSize;
+
12468  VkDeviceMemory mem = VK_NULL_HANDLE;
+
12469  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
+
12470  if(res < 0)
+
12471  {
+
12472  return res;
+
12473  }
+
12474 
+
12475  // New VkDeviceMemory successfully created.
+
12476 
+
12477  // Create new Allocation for it.
+
12478  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
+
12479  pBlock->Init(
+
12480  m_hAllocator,
+
12481  m_hParentPool,
+
12482  m_MemoryTypeIndex,
+
12483  mem,
+
12484  allocInfo.allocationSize,
+
12485  m_NextBlockId++,
+
12486  m_Algorithm);
12487 
-
12488  return VK_SUCCESS;
-
12489 }
-
12490 
-
12491 void VmaBlockVector::ApplyDefragmentationMovesCpu(
-
12492  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
12493  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
-
12494 {
-
12495  const size_t blockCount = m_Blocks.size();
-
12496  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
-
12497 
-
12498  enum BLOCK_FLAG
-
12499  {
-
12500  BLOCK_FLAG_USED = 0x00000001,
-
12501  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
-
12502  };
+
12488  m_Blocks.push_back(pBlock);
+
12489  if(pNewBlockIndex != VMA_NULL)
+
12490  {
+
12491  *pNewBlockIndex = m_Blocks.size() - 1;
+
12492  }
+
12493 
+
12494  return VK_SUCCESS;
+
12495 }
+
12496 
+
12497 void VmaBlockVector::ApplyDefragmentationMovesCpu(
+
12498  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
12499  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
+
12500 {
+
12501  const size_t blockCount = m_Blocks.size();
+
12502  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12503 
-
12504  struct BlockInfo
+
12504  enum BLOCK_FLAG
12505  {
-
12506  uint32_t flags;
-
12507  void* pMappedData;
+
12506  BLOCK_FLAG_USED = 0x00000001,
+
12507  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12508  };
-
12509  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
-
12510  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
-
12511  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
-
12512 
-
12513  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
-
12514  const size_t moveCount = moves.size();
-
12515  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
12516  {
-
12517  const VmaDefragmentationMove& move = moves[moveIndex];
-
12518  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
-
12519  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
-
12520  }
-
12521 
-
12522  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
-
12523 
-
12524  // Go over all blocks. Get mapped pointer or map if necessary.
-
12525  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
12526  {
-
12527  BlockInfo& currBlockInfo = blockInfo[blockIndex];
-
12528  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
12529  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
-
12530  {
-
12531  currBlockInfo.pMappedData = pBlock->GetMappedData();
-
12532  // It is not originally mapped - map it.
-
12533  if(currBlockInfo.pMappedData == VMA_NULL)
-
12534  {
-
12535  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
-
12536  if(pDefragCtx->res == VK_SUCCESS)
-
12537  {
-
12538  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
-
12539  }
-
12540  }
-
12541  }
-
12542  }
-
12543 
-
12544  // Go over all moves. Do actual data transfer.
-
12545  if(pDefragCtx->res == VK_SUCCESS)
-
12546  {
-
12547  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-
12548  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+
12509 
+
12510  struct BlockInfo
+
12511  {
+
12512  uint32_t flags;
+
12513  void* pMappedData;
+
12514  };
+
12515  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
+
12516  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
+
12517  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
+
12518 
+
12519  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+
12520  const size_t moveCount = moves.size();
+
12521  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
12522  {
+
12523  const VmaDefragmentationMove& move = moves[moveIndex];
+
12524  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
+
12525  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
+
12526  }
+
12527 
+
12528  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
12529 
+
12530  // Go over all blocks. Get mapped pointer or map if necessary.
+
12531  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
12532  {
+
12533  BlockInfo& currBlockInfo = blockInfo[blockIndex];
+
12534  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
12535  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
+
12536  {
+
12537  currBlockInfo.pMappedData = pBlock->GetMappedData();
+
12538  // It is not originally mapped - map it.
+
12539  if(currBlockInfo.pMappedData == VMA_NULL)
+
12540  {
+
12541  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
+
12542  if(pDefragCtx->res == VK_SUCCESS)
+
12543  {
+
12544  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
+
12545  }
+
12546  }
+
12547  }
+
12548  }
12549 
-
12550  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
12551  {
-
12552  const VmaDefragmentationMove& move = moves[moveIndex];
-
12553 
-
12554  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
-
12555  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
-
12556 
-
12557  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
-
12558 
-
12559  // Invalidate source.
-
12560  if(isNonCoherent)
-
12561  {
-
12562  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
-
12563  memRange.memory = pSrcBlock->GetDeviceMemory();
-
12564  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
-
12565  memRange.size = VMA_MIN(
-
12566  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
-
12567  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
-
12568  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
12569  }
-
12570 
-
12571  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
-
12572  memmove(
-
12573  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
-
12574  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
-
12575  static_cast<size_t>(move.size));
+
12550  // Go over all moves. Do actual data transfer.
+
12551  if(pDefragCtx->res == VK_SUCCESS)
+
12552  {
+
12553  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+
12554  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+
12555 
+
12556  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
12557  {
+
12558  const VmaDefragmentationMove& move = moves[moveIndex];
+
12559 
+
12560  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
+
12561  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
+
12562 
+
12563  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
+
12564 
+
12565  // Invalidate source.
+
12566  if(isNonCoherent)
+
12567  {
+
12568  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
+
12569  memRange.memory = pSrcBlock->GetDeviceMemory();
+
12570  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
+
12571  memRange.size = VMA_MIN(
+
12572  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
+
12573  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
+
12574  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
12575  }
12576 
-
12577  if(IsCorruptionDetectionEnabled())
-
12578  {
-
12579  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
-
12580  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
-
12581  }
+
12577  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
+
12578  memmove(
+
12579  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
+
12580  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
+
12581  static_cast<size_t>(move.size));
12582 
-
12583  // Flush destination.
-
12584  if(isNonCoherent)
-
12585  {
-
12586  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
-
12587  memRange.memory = pDstBlock->GetDeviceMemory();
-
12588  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
-
12589  memRange.size = VMA_MIN(
-
12590  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
-
12591  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
-
12592  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
-
12593  }
-
12594  }
-
12595  }
-
12596 
-
12597  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
-
12598  // Regardless of pCtx->res == VK_SUCCESS.
-
12599  for(size_t blockIndex = blockCount; blockIndex--; )
-
12600  {
-
12601  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
-
12602  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
-
12603  {
-
12604  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
12605  pBlock->Unmap(m_hAllocator, 1);
-
12606  }
-
12607  }
-
12608 }
-
12609 
-
12610 void VmaBlockVector::ApplyDefragmentationMovesGpu(
-
12611  class VmaBlockVectorDefragmentationContext* pDefragCtx,
-
12612  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
12613  VkCommandBuffer commandBuffer)
-
12614 {
-
12615  const size_t blockCount = m_Blocks.size();
-
12616 
-
12617  pDefragCtx->blockContexts.resize(blockCount);
-
12618  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
-
12619 
-
12620  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
-
12621  const size_t moveCount = moves.size();
-
12622  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
12623  {
-
12624  const VmaDefragmentationMove& move = moves[moveIndex];
-
12625  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
-
12626  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
-
12627  }
-
12628 
-
12629  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
-
12630 
-
12631  // Go over all blocks. Create and bind buffer for whole block if necessary.
-
12632  {
-
12633  VkBufferCreateInfo bufCreateInfo;
-
12634  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
-
12635 
-
12636  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
-
12637  {
-
12638  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
-
12639  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
12640  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
-
12641  {
-
12642  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
-
12643  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
-
12644  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
-
12645  if(pDefragCtx->res == VK_SUCCESS)
-
12646  {
-
12647  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
-
12648  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
-
12649  }
-
12650  }
-
12651  }
-
12652  }
-
12653 
-
12654  // Go over all moves. Post data transfer commands to command buffer.
-
12655  if(pDefragCtx->res == VK_SUCCESS)
-
12656  {
-
12657  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
-
12658  {
-
12659  const VmaDefragmentationMove& move = moves[moveIndex];
-
12660 
-
12661  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
-
12662  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
-
12663 
-
12664  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
-
12665 
-
12666  VkBufferCopy region = {
-
12667  move.srcOffset,
-
12668  move.dstOffset,
-
12669  move.size };
-
12670  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
-
12671  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
-
12672  }
-
12673  }
-
12674 
-
12675  // Save buffers to defrag context for later destruction.
-
12676  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
-
12677  {
-
12678  pDefragCtx->res = VK_NOT_READY;
+
12583  if(IsCorruptionDetectionEnabled())
+
12584  {
+
12585  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
+
12586  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
+
12587  }
+
12588 
+
12589  // Flush destination.
+
12590  if(isNonCoherent)
+
12591  {
+
12592  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
+
12593  memRange.memory = pDstBlock->GetDeviceMemory();
+
12594  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
+
12595  memRange.size = VMA_MIN(
+
12596  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
+
12597  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
+
12598  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+
12599  }
+
12600  }
+
12601  }
+
12602 
+
12603  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
+
12604  // Regardless of pCtx->res == VK_SUCCESS.
+
12605  for(size_t blockIndex = blockCount; blockIndex--; )
+
12606  {
+
12607  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
+
12608  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
+
12609  {
+
12610  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
12611  pBlock->Unmap(m_hAllocator, 1);
+
12612  }
+
12613  }
+
12614 }
+
12615 
+
12616 void VmaBlockVector::ApplyDefragmentationMovesGpu(
+
12617  class VmaBlockVectorDefragmentationContext* pDefragCtx,
+
12618  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
12619  VkCommandBuffer commandBuffer)
+
12620 {
+
12621  const size_t blockCount = m_Blocks.size();
+
12622 
+
12623  pDefragCtx->blockContexts.resize(blockCount);
+
12624  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
+
12625 
+
12626  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+
12627  const size_t moveCount = moves.size();
+
12628  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
12629  {
+
12630  const VmaDefragmentationMove& move = moves[moveIndex];
+
12631  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
12632  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
12633  }
+
12634 
+
12635  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
12636 
+
12637  // Go over all blocks. Create and bind buffer for whole block if necessary.
+
12638  {
+
12639  VkBufferCreateInfo bufCreateInfo;
+
12640  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
+
12641 
+
12642  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
+
12643  {
+
12644  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
+
12645  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
12646  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
+
12647  {
+
12648  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
+
12649  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
+
12650  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
+
12651  if(pDefragCtx->res == VK_SUCCESS)
+
12652  {
+
12653  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
+
12654  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
+
12655  }
+
12656  }
+
12657  }
+
12658  }
+
12659 
+
12660  // Go over all moves. Post data transfer commands to command buffer.
+
12661  if(pDefragCtx->res == VK_SUCCESS)
+
12662  {
+
12663  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
+
12664  {
+
12665  const VmaDefragmentationMove& move = moves[moveIndex];
+
12666 
+
12667  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
+
12668  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
+
12669 
+
12670  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
+
12671 
+
12672  VkBufferCopy region = {
+
12673  move.srcOffset,
+
12674  move.dstOffset,
+
12675  move.size };
+
12676  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
+
12677  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
+
12678  }
12679  }
-
12680 }
-
12681 
-
12682 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
-
12683 {
-
12684  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
-
12685  {
-
12686  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
-
12687  if(pBlock->m_pMetadata->IsEmpty())
-
12688  {
-
12689  if(m_Blocks.size() > m_MinBlockCount)
-
12690  {
-
12691  if(pDefragmentationStats != VMA_NULL)
-
12692  {
-
12693  ++pDefragmentationStats->deviceMemoryBlocksFreed;
-
12694  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
-
12695  }
-
12696 
-
12697  VmaVectorRemove(m_Blocks, blockIndex);
-
12698  pBlock->Destroy(m_hAllocator);
-
12699  vma_delete(m_hAllocator, pBlock);
-
12700  }
-
12701  else
-
12702  {
-
12703  break;
-
12704  }
-
12705  }
-
12706  }
-
12707  UpdateHasEmptyBlock();
-
12708 }
-
12709 
-
12710 void VmaBlockVector::UpdateHasEmptyBlock()
-
12711 {
-
12712  m_HasEmptyBlock = false;
-
12713  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
-
12714  {
-
12715  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
-
12716  if(pBlock->m_pMetadata->IsEmpty())
-
12717  {
-
12718  m_HasEmptyBlock = true;
-
12719  break;
-
12720  }
-
12721  }
-
12722 }
-
12723 
-
12724 #if VMA_STATS_STRING_ENABLED
-
12725 
-
12726 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
-
12727 {
-
12728  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12680 
+
12681  // Save buffers to defrag context for later destruction.
+
12682  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
+
12683  {
+
12684  pDefragCtx->res = VK_NOT_READY;
+
12685  }
+
12686 }
+
12687 
+
12688 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
+
12689 {
+
12690  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
+
12691  {
+
12692  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
+
12693  if(pBlock->m_pMetadata->IsEmpty())
+
12694  {
+
12695  if(m_Blocks.size() > m_MinBlockCount)
+
12696  {
+
12697  if(pDefragmentationStats != VMA_NULL)
+
12698  {
+
12699  ++pDefragmentationStats->deviceMemoryBlocksFreed;
+
12700  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
+
12701  }
+
12702 
+
12703  VmaVectorRemove(m_Blocks, blockIndex);
+
12704  pBlock->Destroy(m_hAllocator);
+
12705  vma_delete(m_hAllocator, pBlock);
+
12706  }
+
12707  else
+
12708  {
+
12709  break;
+
12710  }
+
12711  }
+
12712  }
+
12713  UpdateHasEmptyBlock();
+
12714 }
+
12715 
+
12716 void VmaBlockVector::UpdateHasEmptyBlock()
+
12717 {
+
12718  m_HasEmptyBlock = false;
+
12719  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
+
12720  {
+
12721  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
+
12722  if(pBlock->m_pMetadata->IsEmpty())
+
12723  {
+
12724  m_HasEmptyBlock = true;
+
12725  break;
+
12726  }
+
12727  }
+
12728 }
12729 
-
12730  json.BeginObject();
+
12730 #if VMA_STATS_STRING_ENABLED
12731 
-
12732  if(IsCustomPool())
-
12733  {
-
12734  const char* poolName = m_hParentPool->GetName();
-
12735  if(poolName != VMA_NULL && poolName[0] != '\0')
-
12736  {
-
12737  json.WriteString("Name");
-
12738  json.WriteString(poolName);
-
12739  }
-
12740 
-
12741  json.WriteString("MemoryTypeIndex");
-
12742  json.WriteNumber(m_MemoryTypeIndex);
-
12743 
-
12744  json.WriteString("BlockSize");
-
12745  json.WriteNumber(m_PreferredBlockSize);
+
12732 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
+
12733 {
+
12734  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12735 
+
12736  json.BeginObject();
+
12737 
+
12738  if(IsCustomPool())
+
12739  {
+
12740  const char* poolName = m_hParentPool->GetName();
+
12741  if(poolName != VMA_NULL && poolName[0] != '\0')
+
12742  {
+
12743  json.WriteString("Name");
+
12744  json.WriteString(poolName);
+
12745  }
12746 
-
12747  json.WriteString("BlockCount");
-
12748  json.BeginObject(true);
-
12749  if(m_MinBlockCount > 0)
-
12750  {
-
12751  json.WriteString("Min");
-
12752  json.WriteNumber((uint64_t)m_MinBlockCount);
-
12753  }
-
12754  if(m_MaxBlockCount < SIZE_MAX)
-
12755  {
-
12756  json.WriteString("Max");
-
12757  json.WriteNumber((uint64_t)m_MaxBlockCount);
-
12758  }
-
12759  json.WriteString("Cur");
-
12760  json.WriteNumber((uint64_t)m_Blocks.size());
-
12761  json.EndObject();
-
12762 
-
12763  if(m_FrameInUseCount > 0)
-
12764  {
-
12765  json.WriteString("FrameInUseCount");
-
12766  json.WriteNumber(m_FrameInUseCount);
-
12767  }
+
12747  json.WriteString("MemoryTypeIndex");
+
12748  json.WriteNumber(m_MemoryTypeIndex);
+
12749 
+
12750  json.WriteString("BlockSize");
+
12751  json.WriteNumber(m_PreferredBlockSize);
+
12752 
+
12753  json.WriteString("BlockCount");
+
12754  json.BeginObject(true);
+
12755  if(m_MinBlockCount > 0)
+
12756  {
+
12757  json.WriteString("Min");
+
12758  json.WriteNumber((uint64_t)m_MinBlockCount);
+
12759  }
+
12760  if(m_MaxBlockCount < SIZE_MAX)
+
12761  {
+
12762  json.WriteString("Max");
+
12763  json.WriteNumber((uint64_t)m_MaxBlockCount);
+
12764  }
+
12765  json.WriteString("Cur");
+
12766  json.WriteNumber((uint64_t)m_Blocks.size());
+
12767  json.EndObject();
12768 
-
12769  if(m_Algorithm != 0)
+
12769  if(m_FrameInUseCount > 0)
12770  {
-
12771  json.WriteString("Algorithm");
-
12772  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
+
12771  json.WriteString("FrameInUseCount");
+
12772  json.WriteNumber(m_FrameInUseCount);
12773  }
-
12774  }
-
12775  else
-
12776  {
-
12777  json.WriteString("PreferredBlockSize");
-
12778  json.WriteNumber(m_PreferredBlockSize);
-
12779  }
-
12780 
-
12781  json.WriteString("Blocks");
-
12782  json.BeginObject();
-
12783  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
12784  {
-
12785  json.BeginString();
-
12786  json.ContinueString(m_Blocks[i]->GetId());
-
12787  json.EndString();
-
12788 
-
12789  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
-
12790  }
-
12791  json.EndObject();
-
12792 
-
12793  json.EndObject();
-
12794 }
-
12795 
-
12796 #endif // #if VMA_STATS_STRING_ENABLED
-
12797 
-
12798 void VmaBlockVector::Defragment(
-
12799  class VmaBlockVectorDefragmentationContext* pCtx,
-
12800  VmaDefragmentationStats* pStats,
-
12801  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
-
12802  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
-
12803  VkCommandBuffer commandBuffer)
-
12804 {
-
12805  pCtx->res = VK_SUCCESS;
-
12806 
-
12807  const VkMemoryPropertyFlags memPropFlags =
-
12808  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
-
12809  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
-
12810 
-
12811  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
-
12812  isHostVisible;
-
12813  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
-
12814  !IsCorruptionDetectionEnabled() &&
-
12815  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
+
12774 
+
12775  if(m_Algorithm != 0)
+
12776  {
+
12777  json.WriteString("Algorithm");
+
12778  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
+
12779  }
+
12780  }
+
12781  else
+
12782  {
+
12783  json.WriteString("PreferredBlockSize");
+
12784  json.WriteNumber(m_PreferredBlockSize);
+
12785  }
+
12786 
+
12787  json.WriteString("Blocks");
+
12788  json.BeginObject();
+
12789  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
12790  {
+
12791  json.BeginString();
+
12792  json.ContinueString(m_Blocks[i]->GetId());
+
12793  json.EndString();
+
12794 
+
12795  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
+
12796  }
+
12797  json.EndObject();
+
12798 
+
12799  json.EndObject();
+
12800 }
+
12801 
+
12802 #endif // #if VMA_STATS_STRING_ENABLED
+
12803 
+
12804 void VmaBlockVector::Defragment(
+
12805  class VmaBlockVectorDefragmentationContext* pCtx,
+
12806  VmaDefragmentationStats* pStats,
+
12807  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
+
12808  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
+
12809  VkCommandBuffer commandBuffer)
+
12810 {
+
12811  pCtx->res = VK_SUCCESS;
+
12812 
+
12813  const VkMemoryPropertyFlags memPropFlags =
+
12814  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
+
12815  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12816 
-
12817  // There are options to defragment this memory type.
-
12818  if(canDefragmentOnCpu || canDefragmentOnGpu)
-
12819  {
-
12820  bool defragmentOnGpu;
-
12821  // There is only one option to defragment this memory type.
-
12822  if(canDefragmentOnGpu != canDefragmentOnCpu)
-
12823  {
-
12824  defragmentOnGpu = canDefragmentOnGpu;
-
12825  }
-
12826  // Both options are available: Heuristics to choose the best one.
-
12827  else
-
12828  {
-
12829  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
-
12830  m_hAllocator->IsIntegratedGpu();
+
12817  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
+
12818  isHostVisible;
+
12819  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
+
12820  !IsCorruptionDetectionEnabled() &&
+
12821  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
+
12822 
+
12823  // There are options to defragment this memory type.
+
12824  if(canDefragmentOnCpu || canDefragmentOnGpu)
+
12825  {
+
12826  bool defragmentOnGpu;
+
12827  // There is only one option to defragment this memory type.
+
12828  if(canDefragmentOnGpu != canDefragmentOnCpu)
+
12829  {
+
12830  defragmentOnGpu = canDefragmentOnGpu;
12831  }
-
12832 
-
12833  bool overlappingMoveSupported = !defragmentOnGpu;
-
12834 
-
12835  if(m_hAllocator->m_UseMutex)
-
12836  {
-
12837  m_Mutex.LockWrite();
-
12838  pCtx->mutexLocked = true;
-
12839  }
+
12832  // Both options are available: Heuristics to choose the best one.
+
12833  else
+
12834  {
+
12835  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
+
12836  m_hAllocator->IsIntegratedGpu();
+
12837  }
+
12838 
+
12839  bool overlappingMoveSupported = !defragmentOnGpu;
12840 
-
12841  pCtx->Begin(overlappingMoveSupported);
-
12842 
-
12843  // Defragment.
-
12844 
-
12845  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
-
12846  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
-
12847  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
-
12848  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
-
12849  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
+
12841  if(m_hAllocator->m_UseMutex)
+
12842  {
+
12843  m_Mutex.LockWrite();
+
12844  pCtx->mutexLocked = true;
+
12845  }
+
12846 
+
12847  pCtx->Begin(overlappingMoveSupported);
+
12848 
+
12849  // Defragment.
12850 
-
12851  // Accumulate statistics.
-
12852  if(pStats != VMA_NULL)
-
12853  {
-
12854  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
-
12855  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
-
12856  pStats->bytesMoved += bytesMoved;
-
12857  pStats->allocationsMoved += allocationsMoved;
-
12858  VMA_ASSERT(bytesMoved <= maxBytesToMove);
-
12859  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
-
12860  if(defragmentOnGpu)
-
12861  {
-
12862  maxGpuBytesToMove -= bytesMoved;
-
12863  maxGpuAllocationsToMove -= allocationsMoved;
-
12864  }
-
12865  else
-
12866  {
-
12867  maxCpuBytesToMove -= bytesMoved;
-
12868  maxCpuAllocationsToMove -= allocationsMoved;
-
12869  }
-
12870  }
-
12871 
-
12872  if(pCtx->res >= VK_SUCCESS)
-
12873  {
-
12874  if(defragmentOnGpu)
-
12875  {
-
12876  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
-
12877  }
-
12878  else
-
12879  {
-
12880  ApplyDefragmentationMovesCpu(pCtx, moves);
-
12881  }
-
12882  }
-
12883  }
-
12884 }
-
12885 
-
12886 void VmaBlockVector::DefragmentationEnd(
-
12887  class VmaBlockVectorDefragmentationContext* pCtx,
-
12888  VmaDefragmentationStats* pStats)
-
12889 {
-
12890  // Destroy buffers.
-
12891  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
-
12892  {
-
12893  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
-
12894  if(blockCtx.hBuffer)
-
12895  {
-
12896  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
-
12897  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
-
12898  }
-
12899  }
-
12900 
-
12901  if(pCtx->res >= VK_SUCCESS)
-
12902  {
-
12903  FreeEmptyBlocks(pStats);
-
12904  }
-
12905 
-
12906  if(pCtx->mutexLocked)
-
12907  {
-
12908  VMA_ASSERT(m_hAllocator->m_UseMutex);
-
12909  m_Mutex.UnlockWrite();
+
12851  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
+
12852  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
+
12853  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
+
12854  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
+
12855  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
+
12856 
+
12857  // Accumulate statistics.
+
12858  if(pStats != VMA_NULL)
+
12859  {
+
12860  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
+
12861  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
+
12862  pStats->bytesMoved += bytesMoved;
+
12863  pStats->allocationsMoved += allocationsMoved;
+
12864  VMA_ASSERT(bytesMoved <= maxBytesToMove);
+
12865  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
+
12866  if(defragmentOnGpu)
+
12867  {
+
12868  maxGpuBytesToMove -= bytesMoved;
+
12869  maxGpuAllocationsToMove -= allocationsMoved;
+
12870  }
+
12871  else
+
12872  {
+
12873  maxCpuBytesToMove -= bytesMoved;
+
12874  maxCpuAllocationsToMove -= allocationsMoved;
+
12875  }
+
12876  }
+
12877 
+
12878  if(pCtx->res >= VK_SUCCESS)
+
12879  {
+
12880  if(defragmentOnGpu)
+
12881  {
+
12882  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
+
12883  }
+
12884  else
+
12885  {
+
12886  ApplyDefragmentationMovesCpu(pCtx, moves);
+
12887  }
+
12888  }
+
12889  }
+
12890 }
+
12891 
+
12892 void VmaBlockVector::DefragmentationEnd(
+
12893  class VmaBlockVectorDefragmentationContext* pCtx,
+
12894  VmaDefragmentationStats* pStats)
+
12895 {
+
12896  // Destroy buffers.
+
12897  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
+
12898  {
+
12899  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
+
12900  if(blockCtx.hBuffer)
+
12901  {
+
12902  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
+
12903  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
+
12904  }
+
12905  }
+
12906 
+
12907  if(pCtx->res >= VK_SUCCESS)
+
12908  {
+
12909  FreeEmptyBlocks(pStats);
12910  }
-
12911 }
-
12912 
-
12913 size_t VmaBlockVector::CalcAllocationCount() const
-
12914 {
-
12915  size_t result = 0;
-
12916  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
12917  {
-
12918  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
-
12919  }
-
12920  return result;
-
12921 }
-
12922 
-
12923 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
-
12924 {
-
12925  if(m_BufferImageGranularity == 1)
-
12926  {
-
12927  return false;
-
12928  }
-
12929  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
-
12930  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
-
12931  {
-
12932  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
-
12933  VMA_ASSERT(m_Algorithm == 0);
-
12934  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
-
12935  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
-
12936  {
-
12937  return true;
-
12938  }
-
12939  }
-
12940  return false;
-
12941 }
-
12942 
-
12943 void VmaBlockVector::MakePoolAllocationsLost(
-
12944  uint32_t currentFrameIndex,
-
12945  size_t* pLostAllocationCount)
-
12946 {
-
12947  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12948  size_t lostAllocationCount = 0;
-
12949  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
12950  {
-
12951  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
12952  VMA_ASSERT(pBlock);
-
12953  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
-
12954  }
-
12955  if(pLostAllocationCount != VMA_NULL)
+
12911 
+
12912  if(pCtx->mutexLocked)
+
12913  {
+
12914  VMA_ASSERT(m_hAllocator->m_UseMutex);
+
12915  m_Mutex.UnlockWrite();
+
12916  }
+
12917 }
+
12918 
+
12919 size_t VmaBlockVector::CalcAllocationCount() const
+
12920 {
+
12921  size_t result = 0;
+
12922  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
12923  {
+
12924  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
+
12925  }
+
12926  return result;
+
12927 }
+
12928 
+
12929 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
+
12930 {
+
12931  if(m_BufferImageGranularity == 1)
+
12932  {
+
12933  return false;
+
12934  }
+
12935  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
+
12936  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
+
12937  {
+
12938  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
+
12939  VMA_ASSERT(m_Algorithm == 0);
+
12940  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
+
12941  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
+
12942  {
+
12943  return true;
+
12944  }
+
12945  }
+
12946  return false;
+
12947 }
+
12948 
+
12949 void VmaBlockVector::MakePoolAllocationsLost(
+
12950  uint32_t currentFrameIndex,
+
12951  size_t* pLostAllocationCount)
+
12952 {
+
12953  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12954  size_t lostAllocationCount = 0;
+
12955  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12956  {
-
12957  *pLostAllocationCount = lostAllocationCount;
-
12958  }
-
12959 }
-
12960 
-
12961 VkResult VmaBlockVector::CheckCorruption()
-
12962 {
-
12963  if(!IsCorruptionDetectionEnabled())
-
12964  {
-
12965  return VK_ERROR_FEATURE_NOT_PRESENT;
-
12966  }
-
12967 
-
12968  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12969  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
12957  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
12958  VMA_ASSERT(pBlock);
+
12959  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
+
12960  }
+
12961  if(pLostAllocationCount != VMA_NULL)
+
12962  {
+
12963  *pLostAllocationCount = lostAllocationCount;
+
12964  }
+
12965 }
+
12966 
+
12967 VkResult VmaBlockVector::CheckCorruption()
+
12968 {
+
12969  if(!IsCorruptionDetectionEnabled())
12970  {
-
12971  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
12972  VMA_ASSERT(pBlock);
-
12973  VkResult res = pBlock->CheckCorruption(m_hAllocator);
-
12974  if(res != VK_SUCCESS)
-
12975  {
-
12976  return res;
-
12977  }
-
12978  }
-
12979  return VK_SUCCESS;
-
12980 }
-
12981 
-
12982 void VmaBlockVector::AddStats(VmaStats* pStats)
-
12983 {
-
12984  const uint32_t memTypeIndex = m_MemoryTypeIndex;
-
12985  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
-
12986 
-
12987  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-
12988 
-
12989  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-
12990  {
-
12991  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-
12992  VMA_ASSERT(pBlock);
-
12993  VMA_HEAVY_ASSERT(pBlock->Validate());
-
12994  VmaStatInfo allocationStatInfo;
-
12995  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
-
12996  VmaAddStatInfo(pStats->total, allocationStatInfo);
-
12997  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
-
12998  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
-
12999  }
-
13000 }
-
13001 
-
13003 // VmaDefragmentationAlgorithm_Generic members definition
-
13004 
-
13005 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
-
13006  VmaAllocator hAllocator,
-
13007  VmaBlockVector* pBlockVector,
-
13008  uint32_t currentFrameIndex,
-
13009  bool overlappingMoveSupported) :
-
13010  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-
13011  m_AllocationCount(0),
-
13012  m_AllAllocations(false),
-
13013  m_BytesMoved(0),
-
13014  m_AllocationsMoved(0),
-
13015  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
-
13016 {
-
13017  // Create block info for each block.
-
13018  const size_t blockCount = m_pBlockVector->m_Blocks.size();
-
13019  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
13020  {
-
13021  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
-
13022  pBlockInfo->m_OriginalBlockIndex = blockIndex;
-
13023  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
-
13024  m_Blocks.push_back(pBlockInfo);
-
13025  }
-
13026 
-
13027  // Sort them by m_pBlock pointer value.
-
13028  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
-
13029 }
-
13030 
-
13031 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
-
13032 {
-
13033  for(size_t i = m_Blocks.size(); i--; )
-
13034  {
-
13035  vma_delete(m_hAllocator, m_Blocks[i]);
-
13036  }
-
13037 }
-
13038 
-
13039 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
-
13040 {
-
13041  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
-
13042  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
-
13043  {
-
13044  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
-
13045  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
-
13046  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
-
13047  {
-
13048  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
-
13049  (*it)->m_Allocations.push_back(allocInfo);
-
13050  }
-
13051  else
-
13052  {
-
13053  VMA_ASSERT(0);
-
13054  }
-
13055 
-
13056  ++m_AllocationCount;
-
13057  }
-
13058 }
-
13059 
-
13060 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
-
13061  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13062  VkDeviceSize maxBytesToMove,
-
13063  uint32_t maxAllocationsToMove)
-
13064 {
-
13065  if(m_Blocks.empty())
-
13066  {
-
13067  return VK_SUCCESS;
-
13068  }
-
13069 
-
13070  // This is a choice based on research.
-
13071  // Option 1:
-
13072  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
-
13073  // Option 2:
-
13074  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
-
13075  // Option 3:
-
13076  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
-
13077 
-
13078  size_t srcBlockMinIndex = 0;
-
13079  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
-
13080  /*
-
13081  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
-
13082  {
-
13083  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
-
13084  if(blocksWithNonMovableCount > 0)
-
13085  {
-
13086  srcBlockMinIndex = blocksWithNonMovableCount - 1;
-
13087  }
-
13088  }
-
13089  */
-
13090 
-
13091  size_t srcBlockIndex = m_Blocks.size() - 1;
-
13092  size_t srcAllocIndex = SIZE_MAX;
-
13093  for(;;)
-
13094  {
-
13095  // 1. Find next allocation to move.
-
13096  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
-
13097  // 1.2. Then start from last to first m_Allocations.
-
13098  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
-
13099  {
-
13100  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
-
13101  {
-
13102  // Finished: no more allocations to process.
-
13103  if(srcBlockIndex == srcBlockMinIndex)
-
13104  {
-
13105  return VK_SUCCESS;
-
13106  }
-
13107  else
-
13108  {
-
13109  --srcBlockIndex;
-
13110  srcAllocIndex = SIZE_MAX;
-
13111  }
-
13112  }
-
13113  else
-
13114  {
-
13115  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
-
13116  }
-
13117  }
-
13118 
-
13119  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
-
13120  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
-
13121 
-
13122  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
-
13123  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
-
13124  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
-
13125  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
-
13126 
-
13127  // 2. Try to find new place for this allocation in preceding or current block.
-
13128  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
-
13129  {
-
13130  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
-
13131  VmaAllocationRequest dstAllocRequest;
-
13132  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
-
13133  m_CurrentFrameIndex,
-
13134  m_pBlockVector->GetFrameInUseCount(),
-
13135  m_pBlockVector->GetBufferImageGranularity(),
-
13136  size,
-
13137  alignment,
-
13138  false, // upperAddress
-
13139  suballocType,
-
13140  false, // canMakeOtherLost
-
13141  strategy,
-
13142  &dstAllocRequest) &&
-
13143  MoveMakesSense(
-
13144  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
-
13145  {
-
13146  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
-
13147 
-
13148  // Reached limit on number of allocations or bytes to move.
-
13149  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
-
13150  (m_BytesMoved + size > maxBytesToMove))
-
13151  {
-
13152  return VK_SUCCESS;
-
13153  }
-
13154 
-
13155  VmaDefragmentationMove move;
-
13156  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
-
13157  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
-
13158  move.srcOffset = srcOffset;
-
13159  move.dstOffset = dstAllocRequest.offset;
-
13160  move.size = size;
-
13161  moves.push_back(move);
-
13162 
-
13163  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
-
13164  dstAllocRequest,
-
13165  suballocType,
-
13166  size,
-
13167  allocInfo.m_hAllocation);
-
13168  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
-
13169 
-
13170  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
-
13171 
-
13172  if(allocInfo.m_pChanged != VMA_NULL)
-
13173  {
-
13174  *allocInfo.m_pChanged = VK_TRUE;
-
13175  }
-
13176 
-
13177  ++m_AllocationsMoved;
-
13178  m_BytesMoved += size;
-
13179 
-
13180  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
-
13181 
-
13182  break;
-
13183  }
-
13184  }
+
12971  return VK_ERROR_FEATURE_NOT_PRESENT;
+
12972  }
+
12973 
+
12974  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12975  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
12976  {
+
12977  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
12978  VMA_ASSERT(pBlock);
+
12979  VkResult res = pBlock->CheckCorruption(m_hAllocator);
+
12980  if(res != VK_SUCCESS)
+
12981  {
+
12982  return res;
+
12983  }
+
12984  }
+
12985  return VK_SUCCESS;
+
12986 }
+
12987 
+
12988 void VmaBlockVector::AddStats(VmaStats* pStats)
+
12989 {
+
12990  const uint32_t memTypeIndex = m_MemoryTypeIndex;
+
12991  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
+
12992 
+
12993  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
12994 
+
12995  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+
12996  {
+
12997  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+
12998  VMA_ASSERT(pBlock);
+
12999  VMA_HEAVY_ASSERT(pBlock->Validate());
+
13000  VmaStatInfo allocationStatInfo;
+
13001  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
+
13002  VmaAddStatInfo(pStats->total, allocationStatInfo);
+
13003  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+
13004  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+
13005  }
+
13006 }
+
13007 
+
13009 // VmaDefragmentationAlgorithm_Generic members definition
+
13010 
+
13011 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
+
13012  VmaAllocator hAllocator,
+
13013  VmaBlockVector* pBlockVector,
+
13014  uint32_t currentFrameIndex,
+
13015  bool overlappingMoveSupported) :
+
13016  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+
13017  m_AllocationCount(0),
+
13018  m_AllAllocations(false),
+
13019  m_BytesMoved(0),
+
13020  m_AllocationsMoved(0),
+
13021  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
+
13022 {
+
13023  // Create block info for each block.
+
13024  const size_t blockCount = m_pBlockVector->m_Blocks.size();
+
13025  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13026  {
+
13027  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
+
13028  pBlockInfo->m_OriginalBlockIndex = blockIndex;
+
13029  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
+
13030  m_Blocks.push_back(pBlockInfo);
+
13031  }
+
13032 
+
13033  // Sort them by m_pBlock pointer value.
+
13034  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
+
13035 }
+
13036 
+
13037 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
+
13038 {
+
13039  for(size_t i = m_Blocks.size(); i--; )
+
13040  {
+
13041  vma_delete(m_hAllocator, m_Blocks[i]);
+
13042  }
+
13043 }
+
13044 
+
13045 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
+
13046 {
+
13047  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
+
13048  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
+
13049  {
+
13050  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
+
13051  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
+
13052  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
+
13053  {
+
13054  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
+
13055  (*it)->m_Allocations.push_back(allocInfo);
+
13056  }
+
13057  else
+
13058  {
+
13059  VMA_ASSERT(0);
+
13060  }
+
13061 
+
13062  ++m_AllocationCount;
+
13063  }
+
13064 }
+
13065 
+
13066 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
+
13067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
13068  VkDeviceSize maxBytesToMove,
+
13069  uint32_t maxAllocationsToMove)
+
13070 {
+
13071  if(m_Blocks.empty())
+
13072  {
+
13073  return VK_SUCCESS;
+
13074  }
+
13075 
+
13076  // This is a choice based on research.
+
13077  // Option 1:
+
13078  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
+
13079  // Option 2:
+
13080  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
+
13081  // Option 3:
+
13082  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
+
13083 
+
13084  size_t srcBlockMinIndex = 0;
+
13085  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
+
13086  /*
+
13087  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
+
13088  {
+
13089  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
+
13090  if(blocksWithNonMovableCount > 0)
+
13091  {
+
13092  srcBlockMinIndex = blocksWithNonMovableCount - 1;
+
13093  }
+
13094  }
+
13095  */
+
13096 
+
13097  size_t srcBlockIndex = m_Blocks.size() - 1;
+
13098  size_t srcAllocIndex = SIZE_MAX;
+
13099  for(;;)
+
13100  {
+
13101  // 1. Find next allocation to move.
+
13102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
+
13103  // 1.2. Then start from last to first m_Allocations.
+
13104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
+
13105  {
+
13106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
+
13107  {
+
13108  // Finished: no more allocations to process.
+
13109  if(srcBlockIndex == srcBlockMinIndex)
+
13110  {
+
13111  return VK_SUCCESS;
+
13112  }
+
13113  else
+
13114  {
+
13115  --srcBlockIndex;
+
13116  srcAllocIndex = SIZE_MAX;
+
13117  }
+
13118  }
+
13119  else
+
13120  {
+
13121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
+
13122  }
+
13123  }
+
13124 
+
13125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
+
13126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
+
13127 
+
13128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
+
13129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
+
13130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
+
13131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
+
13132 
+
13133  // 2. Try to find new place for this allocation in preceding or current block.
+
13134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
+
13135  {
+
13136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
+
13137  VmaAllocationRequest dstAllocRequest;
+
13138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
+
13139  m_CurrentFrameIndex,
+
13140  m_pBlockVector->GetFrameInUseCount(),
+
13141  m_pBlockVector->GetBufferImageGranularity(),
+
13142  size,
+
13143  alignment,
+
13144  false, // upperAddress
+
13145  suballocType,
+
13146  false, // canMakeOtherLost
+
13147  strategy,
+
13148  &dstAllocRequest) &&
+
13149  MoveMakesSense(
+
13150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
+
13151  {
+
13152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
+
13153 
+
13154  // Reached limit on number of allocations or bytes to move.
+
13155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
+
13156  (m_BytesMoved + size > maxBytesToMove))
+
13157  {
+
13158  return VK_SUCCESS;
+
13159  }
+
13160 
+
13161  VmaDefragmentationMove move;
+
13162  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
+
13163  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
+
13164  move.srcOffset = srcOffset;
+
13165  move.dstOffset = dstAllocRequest.offset;
+
13166  move.size = size;
+
13167  moves.push_back(move);
+
13168 
+
13169  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
+
13170  dstAllocRequest,
+
13171  suballocType,
+
13172  size,
+
13173  allocInfo.m_hAllocation);
+
13174  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
+
13175 
+
13176  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+
13177 
+
13178  if(allocInfo.m_pChanged != VMA_NULL)
+
13179  {
+
13180  *allocInfo.m_pChanged = VK_TRUE;
+
13181  }
+
13182 
+
13183  ++m_AllocationsMoved;
+
13184  m_BytesMoved += size;
13185 
-
13186  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
+
13186  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13187 
-
13188  if(srcAllocIndex > 0)
-
13189  {
-
13190  --srcAllocIndex;
-
13191  }
-
13192  else
-
13193  {
-
13194  if(srcBlockIndex > 0)
-
13195  {
-
13196  --srcBlockIndex;
-
13197  srcAllocIndex = SIZE_MAX;
-
13198  }
-
13199  else
-
13200  {
-
13201  return VK_SUCCESS;
-
13202  }
-
13203  }
-
13204  }
-
13205 }
-
13206 
-
13207 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
-
13208 {
-
13209  size_t result = 0;
-
13210  for(size_t i = 0; i < m_Blocks.size(); ++i)
-
13211  {
-
13212  if(m_Blocks[i]->m_HasNonMovableAllocations)
-
13213  {
-
13214  ++result;
-
13215  }
-
13216  }
-
13217  return result;
-
13218 }
-
13219 
-
13220 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
-
13221  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13222  VkDeviceSize maxBytesToMove,
-
13223  uint32_t maxAllocationsToMove)
-
13224 {
-
13225  if(!m_AllAllocations && m_AllocationCount == 0)
-
13226  {
-
13227  return VK_SUCCESS;
-
13228  }
-
13229 
-
13230  const size_t blockCount = m_Blocks.size();
-
13231  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13188  break;
+
13189  }
+
13190  }
+
13191 
+
13192  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
+
13193 
+
13194  if(srcAllocIndex > 0)
+
13195  {
+
13196  --srcAllocIndex;
+
13197  }
+
13198  else
+
13199  {
+
13200  if(srcBlockIndex > 0)
+
13201  {
+
13202  --srcBlockIndex;
+
13203  srcAllocIndex = SIZE_MAX;
+
13204  }
+
13205  else
+
13206  {
+
13207  return VK_SUCCESS;
+
13208  }
+
13209  }
+
13210  }
+
13211 }
+
13212 
+
13213 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
+
13214 {
+
13215  size_t result = 0;
+
13216  for(size_t i = 0; i < m_Blocks.size(); ++i)
+
13217  {
+
13218  if(m_Blocks[i]->m_HasNonMovableAllocations)
+
13219  {
+
13220  ++result;
+
13221  }
+
13222  }
+
13223  return result;
+
13224 }
+
13225 
+
13226 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
+
13227  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
13228  VkDeviceSize maxBytesToMove,
+
13229  uint32_t maxAllocationsToMove)
+
13230 {
+
13231  if(!m_AllAllocations && m_AllocationCount == 0)
13232  {
-
13233  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
-
13234 
-
13235  if(m_AllAllocations)
-
13236  {
-
13237  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
-
13238  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
-
13239  it != pMetadata->m_Suballocations.end();
-
13240  ++it)
-
13241  {
-
13242  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
-
13243  {
-
13244  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
-
13245  pBlockInfo->m_Allocations.push_back(allocInfo);
-
13246  }
-
13247  }
-
13248  }
-
13249 
-
13250  pBlockInfo->CalcHasNonMovableAllocations();
-
13251 
-
13252  // This is a choice based on research.
-
13253  // Option 1:
-
13254  pBlockInfo->SortAllocationsByOffsetDescending();
-
13255  // Option 2:
-
13256  //pBlockInfo->SortAllocationsBySizeDescending();
-
13257  }
-
13258 
-
13259  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
-
13260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
-
13261 
-
13262  // This is a choice based on research.
-
13263  const uint32_t roundCount = 2;
+
13233  return VK_SUCCESS;
+
13234  }
+
13235 
+
13236  const size_t blockCount = m_Blocks.size();
+
13237  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13238  {
+
13239  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
+
13240 
+
13241  if(m_AllAllocations)
+
13242  {
+
13243  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
+
13244  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
+
13245  it != pMetadata->m_Suballocations.end();
+
13246  ++it)
+
13247  {
+
13248  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
+
13249  {
+
13250  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
+
13251  pBlockInfo->m_Allocations.push_back(allocInfo);
+
13252  }
+
13253  }
+
13254  }
+
13255 
+
13256  pBlockInfo->CalcHasNonMovableAllocations();
+
13257 
+
13258  // This is a choice based on research.
+
13259  // Option 1:
+
13260  pBlockInfo->SortAllocationsByOffsetDescending();
+
13261  // Option 2:
+
13262  //pBlockInfo->SortAllocationsBySizeDescending();
+
13263  }
13264 
-
13265  // Execute defragmentation rounds (the main part).
-
13266  VkResult result = VK_SUCCESS;
-
13267  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
-
13268  {
-
13269  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
-
13270  }
-
13271 
-
13272  return result;
-
13273 }
-
13274 
-
13275 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
-
13276  size_t dstBlockIndex, VkDeviceSize dstOffset,
-
13277  size_t srcBlockIndex, VkDeviceSize srcOffset)
-
13278 {
-
13279  if(dstBlockIndex < srcBlockIndex)
-
13280  {
-
13281  return true;
-
13282  }
-
13283  if(dstBlockIndex > srcBlockIndex)
-
13284  {
-
13285  return false;
-
13286  }
-
13287  if(dstOffset < srcOffset)
-
13288  {
-
13289  return true;
-
13290  }
-
13291  return false;
-
13292 }
-
13293 
-
13295 // VmaDefragmentationAlgorithm_Fast
-
13296 
-
13297 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
-
13298  VmaAllocator hAllocator,
-
13299  VmaBlockVector* pBlockVector,
-
13300  uint32_t currentFrameIndex,
-
13301  bool overlappingMoveSupported) :
-
13302  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
-
13303  m_OverlappingMoveSupported(overlappingMoveSupported),
-
13304  m_AllocationCount(0),
-
13305  m_AllAllocations(false),
-
13306  m_BytesMoved(0),
-
13307  m_AllocationsMoved(0),
-
13308  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
-
13309 {
-
13310  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
-
13311 
-
13312 }
-
13313 
-
13314 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
+
13265  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
+
13266  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
+
13267 
+
13268  // This is a choice based on research.
+
13269  const uint32_t roundCount = 2;
+
13270 
+
13271  // Execute defragmentation rounds (the main part).
+
13272  VkResult result = VK_SUCCESS;
+
13273  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
+
13274  {
+
13275  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
+
13276  }
+
13277 
+
13278  return result;
+
13279 }
+
13280 
+
13281 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
+
13282  size_t dstBlockIndex, VkDeviceSize dstOffset,
+
13283  size_t srcBlockIndex, VkDeviceSize srcOffset)
+
13284 {
+
13285  if(dstBlockIndex < srcBlockIndex)
+
13286  {
+
13287  return true;
+
13288  }
+
13289  if(dstBlockIndex > srcBlockIndex)
+
13290  {
+
13291  return false;
+
13292  }
+
13293  if(dstOffset < srcOffset)
+
13294  {
+
13295  return true;
+
13296  }
+
13297  return false;
+
13298 }
+
13299 
+
13301 // VmaDefragmentationAlgorithm_Fast
+
13302 
+
13303 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
+
13304  VmaAllocator hAllocator,
+
13305  VmaBlockVector* pBlockVector,
+
13306  uint32_t currentFrameIndex,
+
13307  bool overlappingMoveSupported) :
+
13308  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+
13309  m_OverlappingMoveSupported(overlappingMoveSupported),
+
13310  m_AllocationCount(0),
+
13311  m_AllAllocations(false),
+
13312  m_BytesMoved(0),
+
13313  m_AllocationsMoved(0),
+
13314  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13315 {
-
13316 }
+
13316  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13317 
-
13318 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
-
13319  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
-
13320  VkDeviceSize maxBytesToMove,
-
13321  uint32_t maxAllocationsToMove)
-
13322 {
-
13323  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
-
13324 
-
13325  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
13326  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
-
13327  {
-
13328  return VK_SUCCESS;
-
13329  }
+
13318 }
+
13319 
+
13320 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
+
13321 {
+
13322 }
+
13323 
+
13324 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
+
13325  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+
13326  VkDeviceSize maxBytesToMove,
+
13327  uint32_t maxAllocationsToMove)
+
13328 {
+
13329  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13330 
-
13331  PreprocessMetadata();
-
13332 
-
13333  // Sort blocks in order from most destination.
-
13334 
-
13335  m_BlockInfos.resize(blockCount);
-
13336  for(size_t i = 0; i < blockCount; ++i)
-
13337  {
-
13338  m_BlockInfos[i].origBlockIndex = i;
-
13339  }
+
13331  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
13332  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
+
13333  {
+
13334  return VK_SUCCESS;
+
13335  }
+
13336 
+
13337  PreprocessMetadata();
+
13338 
+
13339  // Sort blocks in order from most destination.
13340 
-
13341  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
-
13342  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
-
13343  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
-
13344  });
-
13345 
-
13346  // THE MAIN ALGORITHM
-
13347 
-
13348  FreeSpaceDatabase freeSpaceDb;
-
13349 
-
13350  size_t dstBlockInfoIndex = 0;
-
13351  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-
13352  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-
13353  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-
13354  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
-
13355  VkDeviceSize dstOffset = 0;
-
13356 
-
13357  bool end = false;
-
13358  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
-
13359  {
-
13360  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
-
13361  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
-
13362  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
-
13363  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
-
13364  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
-
13365  {
-
13366  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
-
13367  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
-
13368  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
-
13369  if(m_AllocationsMoved == maxAllocationsToMove ||
-
13370  m_BytesMoved + srcAllocSize > maxBytesToMove)
-
13371  {
-
13372  end = true;
-
13373  break;
-
13374  }
-
13375  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
-
13376 
-
13377  // Try to place it in one of free spaces from the database.
-
13378  size_t freeSpaceInfoIndex;
-
13379  VkDeviceSize dstAllocOffset;
-
13380  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
-
13381  freeSpaceInfoIndex, dstAllocOffset))
-
13382  {
-
13383  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
-
13384  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
-
13385  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
-
13386 
-
13387  // Same block
-
13388  if(freeSpaceInfoIndex == srcBlockInfoIndex)
-
13389  {
-
13390  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
13391 
-
13392  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
-
13393 
-
13394  VmaSuballocation suballoc = *srcSuballocIt;
-
13395  suballoc.offset = dstAllocOffset;
-
13396  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
-
13397  m_BytesMoved += srcAllocSize;
-
13398  ++m_AllocationsMoved;
-
13399 
-
13400  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
13401  ++nextSuballocIt;
-
13402  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
13403  srcSuballocIt = nextSuballocIt;
-
13404 
-
13405  InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
13406 
-
13407  VmaDefragmentationMove move = {
-
13408  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
-
13409  srcAllocOffset, dstAllocOffset,
-
13410  srcAllocSize };
-
13411  moves.push_back(move);
-
13412  }
-
13413  // Different block
-
13414  else
-
13415  {
-
13416  // MOVE OPTION 2: Move the allocation to a different block.
-
13417 
-
13418  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
-
13419 
-
13420  VmaSuballocation suballoc = *srcSuballocIt;
-
13421  suballoc.offset = dstAllocOffset;
-
13422  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
-
13423  m_BytesMoved += srcAllocSize;
-
13424  ++m_AllocationsMoved;
+
13341  m_BlockInfos.resize(blockCount);
+
13342  for(size_t i = 0; i < blockCount; ++i)
+
13343  {
+
13344  m_BlockInfos[i].origBlockIndex = i;
+
13345  }
+
13346 
+
13347  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
+
13348  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
+
13349  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
+
13350  });
+
13351 
+
13352  // THE MAIN ALGORITHM
+
13353 
+
13354  FreeSpaceDatabase freeSpaceDb;
+
13355 
+
13356  size_t dstBlockInfoIndex = 0;
+
13357  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+
13358  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+
13359  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+
13360  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
+
13361  VkDeviceSize dstOffset = 0;
+
13362 
+
13363  bool end = false;
+
13364  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
+
13365  {
+
13366  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
+
13367  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
+
13368  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
+
13369  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
+
13370  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
+
13371  {
+
13372  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
+
13373  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
+
13374  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
+
13375  if(m_AllocationsMoved == maxAllocationsToMove ||
+
13376  m_BytesMoved + srcAllocSize > maxBytesToMove)
+
13377  {
+
13378  end = true;
+
13379  break;
+
13380  }
+
13381  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
+
13382 
+
13383  // Try to place it in one of free spaces from the database.
+
13384  size_t freeSpaceInfoIndex;
+
13385  VkDeviceSize dstAllocOffset;
+
13386  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
+
13387  freeSpaceInfoIndex, dstAllocOffset))
+
13388  {
+
13389  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
+
13390  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
+
13391  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
+
13392 
+
13393  // Same block
+
13394  if(freeSpaceInfoIndex == srcBlockInfoIndex)
+
13395  {
+
13396  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
13397 
+
13398  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+
13399 
+
13400  VmaSuballocation suballoc = *srcSuballocIt;
+
13401  suballoc.offset = dstAllocOffset;
+
13402  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
+
13403  m_BytesMoved += srcAllocSize;
+
13404  ++m_AllocationsMoved;
+
13405 
+
13406  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
13407  ++nextSuballocIt;
+
13408  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
13409  srcSuballocIt = nextSuballocIt;
+
13410 
+
13411  InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
13412 
+
13413  VmaDefragmentationMove move = {
+
13414  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
+
13415  srcAllocOffset, dstAllocOffset,
+
13416  srcAllocSize };
+
13417  moves.push_back(move);
+
13418  }
+
13419  // Different block
+
13420  else
+
13421  {
+
13422  // MOVE OPTION 2: Move the allocation to a different block.
+
13423 
+
13424  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13425 
-
13426  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
13427  ++nextSuballocIt;
-
13428  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
13429  srcSuballocIt = nextSuballocIt;
-
13430 
-
13431  InsertSuballoc(pFreeSpaceMetadata, suballoc);
-
13432 
-
13433  VmaDefragmentationMove move = {
-
13434  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
-
13435  srcAllocOffset, dstAllocOffset,
-
13436  srcAllocSize };
-
13437  moves.push_back(move);
-
13438  }
-
13439  }
-
13440  else
-
13441  {
-
13442  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
-
13443 
-
13444  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
-
13445  while(dstBlockInfoIndex < srcBlockInfoIndex &&
-
13446  dstAllocOffset + srcAllocSize > dstBlockSize)
-
13447  {
-
13448  // But before that, register remaining free space at the end of dst block.
-
13449  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
-
13450 
-
13451  ++dstBlockInfoIndex;
-
13452  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
-
13453  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
-
13454  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
-
13455  dstBlockSize = pDstMetadata->GetSize();
-
13456  dstOffset = 0;
-
13457  dstAllocOffset = 0;
-
13458  }
-
13459 
-
13460  // Same block
-
13461  if(dstBlockInfoIndex == srcBlockInfoIndex)
-
13462  {
-
13463  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
-
13464 
-
13465  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
-
13466 
-
13467  bool skipOver = overlap;
-
13468  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
-
13469  {
-
13470  // If destination and source place overlap, skip if it would move it
-
13471  // by only < 1/64 of its size.
-
13472  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
-
13473  }
-
13474 
-
13475  if(skipOver)
-
13476  {
-
13477  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
-
13478 
-
13479  dstOffset = srcAllocOffset + srcAllocSize;
-
13480  ++srcSuballocIt;
-
13481  }
-
13482  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
-
13483  else
-
13484  {
-
13485  srcSuballocIt->offset = dstAllocOffset;
-
13486  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
-
13487  dstOffset = dstAllocOffset + srcAllocSize;
-
13488  m_BytesMoved += srcAllocSize;
-
13489  ++m_AllocationsMoved;
-
13490  ++srcSuballocIt;
-
13491  VmaDefragmentationMove move = {
-
13492  srcOrigBlockIndex, dstOrigBlockIndex,
-
13493  srcAllocOffset, dstAllocOffset,
-
13494  srcAllocSize };
-
13495  moves.push_back(move);
-
13496  }
-
13497  }
-
13498  // Different block
-
13499  else
-
13500  {
-
13501  // MOVE OPTION 2: Move the allocation to a different block.
-
13502 
-
13503  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
-
13504  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
-
13505 
-
13506  VmaSuballocation suballoc = *srcSuballocIt;
-
13507  suballoc.offset = dstAllocOffset;
-
13508  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
-
13509  dstOffset = dstAllocOffset + srcAllocSize;
-
13510  m_BytesMoved += srcAllocSize;
-
13511  ++m_AllocationsMoved;
-
13512 
-
13513  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
-
13514  ++nextSuballocIt;
-
13515  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
-
13516  srcSuballocIt = nextSuballocIt;
-
13517 
-
13518  pDstMetadata->m_Suballocations.push_back(suballoc);
-
13519 
-
13520  VmaDefragmentationMove move = {
-
13521  srcOrigBlockIndex, dstOrigBlockIndex,
-
13522  srcAllocOffset, dstAllocOffset,
-
13523  srcAllocSize };
-
13524  moves.push_back(move);
-
13525  }
-
13526  }
-
13527  }
-
13528  }
-
13529 
-
13530  m_BlockInfos.clear();
-
13531 
-
13532  PostprocessMetadata();
-
13533 
-
13534  return VK_SUCCESS;
-
13535 }
-
13536 
-
13537 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
-
13538 {
-
13539  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
13540  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
13541  {
-
13542  VmaBlockMetadata_Generic* const pMetadata =
-
13543  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-
13544  pMetadata->m_FreeCount = 0;
-
13545  pMetadata->m_SumFreeSize = pMetadata->GetSize();
-
13546  pMetadata->m_FreeSuballocationsBySize.clear();
-
13547  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-
13548  it != pMetadata->m_Suballocations.end(); )
-
13549  {
-
13550  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
-
13551  {
-
13552  VmaSuballocationList::iterator nextIt = it;
-
13553  ++nextIt;
-
13554  pMetadata->m_Suballocations.erase(it);
-
13555  it = nextIt;
-
13556  }
-
13557  else
-
13558  {
-
13559  ++it;
-
13560  }
-
13561  }
-
13562  }
-
13563 }
-
13564 
-
13565 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
-
13566 {
-
13567  const size_t blockCount = m_pBlockVector->GetBlockCount();
-
13568  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-
13569  {
-
13570  VmaBlockMetadata_Generic* const pMetadata =
-
13571  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
-
13572  const VkDeviceSize blockSize = pMetadata->GetSize();
-
13573 
-
13574  // No allocations in this block - entire area is free.
-
13575  if(pMetadata->m_Suballocations.empty())
-
13576  {
-
13577  pMetadata->m_FreeCount = 1;
-
13578  //pMetadata->m_SumFreeSize is already set to blockSize.
-
13579  VmaSuballocation suballoc = {
-
13580  0, // offset
-
13581  blockSize, // size
-
13582  VMA_NULL, // hAllocation
-
13583  VMA_SUBALLOCATION_TYPE_FREE };
-
13584  pMetadata->m_Suballocations.push_back(suballoc);
-
13585  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
-
13586  }
-
13587  // There are some allocations in this block.
-
13588  else
-
13589  {
-
13590  VkDeviceSize offset = 0;
-
13591  VmaSuballocationList::iterator it;
-
13592  for(it = pMetadata->m_Suballocations.begin();
-
13593  it != pMetadata->m_Suballocations.end();
-
13594  ++it)
-
13595  {
-
13596  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
-
13597  VMA_ASSERT(it->offset >= offset);
-
13598 
-
13599  // Need to insert preceding free space.
-
13600  if(it->offset > offset)
-
13601  {
-
13602  ++pMetadata->m_FreeCount;
-
13603  const VkDeviceSize freeSize = it->offset - offset;
-
13604  VmaSuballocation suballoc = {
-
13605  offset, // offset
-
13606  freeSize, // size
-
13607  VMA_NULL, // hAllocation
-
13608  VMA_SUBALLOCATION_TYPE_FREE };
-
13609  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-
13610  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
13611  {
-
13612  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
-
13613  }
-
13614  }
-
13615 
-
13616  pMetadata->m_SumFreeSize -= it->size;
-
13617  offset = it->offset + it->size;
-
13618  }
-
13619 
-
13620  // Need to insert trailing free space.
-
13621  if(offset < blockSize)
-
13622  {
-
13623  ++pMetadata->m_FreeCount;
-
13624  const VkDeviceSize freeSize = blockSize - offset;
-
13625  VmaSuballocation suballoc = {
-
13626  offset, // offset
-
13627  freeSize, // size
-
13628  VMA_NULL, // hAllocation
-
13629  VMA_SUBALLOCATION_TYPE_FREE };
-
13630  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
-
13631  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
-
13632  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
-
13633  {
-
13634  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
-
13635  }
-
13636  }
-
13637 
-
13638  VMA_SORT(
-
13639  pMetadata->m_FreeSuballocationsBySize.begin(),
-
13640  pMetadata->m_FreeSuballocationsBySize.end(),
-
13641  VmaSuballocationItemSizeLess());
-
13642  }
+
13426  VmaSuballocation suballoc = *srcSuballocIt;
+
13427  suballoc.offset = dstAllocOffset;
+
13428  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
+
13429  m_BytesMoved += srcAllocSize;
+
13430  ++m_AllocationsMoved;
+
13431 
+
13432  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
13433  ++nextSuballocIt;
+
13434  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
13435  srcSuballocIt = nextSuballocIt;
+
13436 
+
13437  InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
13438 
+
13439  VmaDefragmentationMove move = {
+
13440  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
+
13441  srcAllocOffset, dstAllocOffset,
+
13442  srcAllocSize };
+
13443  moves.push_back(move);
+
13444  }
+
13445  }
+
13446  else
+
13447  {
+
13448  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
+
13449 
+
13450  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
+
13451  while(dstBlockInfoIndex < srcBlockInfoIndex &&
+
13452  dstAllocOffset + srcAllocSize > dstBlockSize)
+
13453  {
+
13454  // But before that, register remaining free space at the end of dst block.
+
13455  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
+
13456 
+
13457  ++dstBlockInfoIndex;
+
13458  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+
13459  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+
13460  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
+
13461  dstBlockSize = pDstMetadata->GetSize();
+
13462  dstOffset = 0;
+
13463  dstAllocOffset = 0;
+
13464  }
+
13465 
+
13466  // Same block
+
13467  if(dstBlockInfoIndex == srcBlockInfoIndex)
+
13468  {
+
13469  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
13470 
+
13471  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
+
13472 
+
13473  bool skipOver = overlap;
+
13474  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
+
13475  {
+
13476  // If destination and source place overlap, skip if it would move it
+
13477  // by only < 1/64 of its size.
+
13478  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
+
13479  }
+
13480 
+
13481  if(skipOver)
+
13482  {
+
13483  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
+
13484 
+
13485  dstOffset = srcAllocOffset + srcAllocSize;
+
13486  ++srcSuballocIt;
+
13487  }
+
13488  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+
13489  else
+
13490  {
+
13491  srcSuballocIt->offset = dstAllocOffset;
+
13492  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
+
13493  dstOffset = dstAllocOffset + srcAllocSize;
+
13494  m_BytesMoved += srcAllocSize;
+
13495  ++m_AllocationsMoved;
+
13496  ++srcSuballocIt;
+
13497  VmaDefragmentationMove move = {
+
13498  srcOrigBlockIndex, dstOrigBlockIndex,
+
13499  srcAllocOffset, dstAllocOffset,
+
13500  srcAllocSize };
+
13501  moves.push_back(move);
+
13502  }
+
13503  }
+
13504  // Different block
+
13505  else
+
13506  {
+
13507  // MOVE OPTION 2: Move the allocation to a different block.
+
13508 
+
13509  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
+
13510  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
+
13511 
+
13512  VmaSuballocation suballoc = *srcSuballocIt;
+
13513  suballoc.offset = dstAllocOffset;
+
13514  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
+
13515  dstOffset = dstAllocOffset + srcAllocSize;
+
13516  m_BytesMoved += srcAllocSize;
+
13517  ++m_AllocationsMoved;
+
13518 
+
13519  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+
13520  ++nextSuballocIt;
+
13521  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+
13522  srcSuballocIt = nextSuballocIt;
+
13523 
+
13524  pDstMetadata->m_Suballocations.push_back(suballoc);
+
13525 
+
13526  VmaDefragmentationMove move = {
+
13527  srcOrigBlockIndex, dstOrigBlockIndex,
+
13528  srcAllocOffset, dstAllocOffset,
+
13529  srcAllocSize };
+
13530  moves.push_back(move);
+
13531  }
+
13532  }
+
13533  }
+
13534  }
+
13535 
+
13536  m_BlockInfos.clear();
+
13537 
+
13538  PostprocessMetadata();
+
13539 
+
13540  return VK_SUCCESS;
+
13541 }
+
13542 
+
13543 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
+
13544 {
+
13545  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
13546  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13547  {
+
13548  VmaBlockMetadata_Generic* const pMetadata =
+
13549  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+
13550  pMetadata->m_FreeCount = 0;
+
13551  pMetadata->m_SumFreeSize = pMetadata->GetSize();
+
13552  pMetadata->m_FreeSuballocationsBySize.clear();
+
13553  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+
13554  it != pMetadata->m_Suballocations.end(); )
+
13555  {
+
13556  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
+
13557  {
+
13558  VmaSuballocationList::iterator nextIt = it;
+
13559  ++nextIt;
+
13560  pMetadata->m_Suballocations.erase(it);
+
13561  it = nextIt;
+
13562  }
+
13563  else
+
13564  {
+
13565  ++it;
+
13566  }
+
13567  }
+
13568  }
+
13569 }
+
13570 
+
13571 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
+
13572 {
+
13573  const size_t blockCount = m_pBlockVector->GetBlockCount();
+
13574  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+
13575  {
+
13576  VmaBlockMetadata_Generic* const pMetadata =
+
13577  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+
13578  const VkDeviceSize blockSize = pMetadata->GetSize();
+
13579 
+
13580  // No allocations in this block - entire area is free.
+
13581  if(pMetadata->m_Suballocations.empty())
+
13582  {
+
13583  pMetadata->m_FreeCount = 1;
+
13584  //pMetadata->m_SumFreeSize is already set to blockSize.
+
13585  VmaSuballocation suballoc = {
+
13586  0, // offset
+
13587  blockSize, // size
+
13588  VMA_NULL, // hAllocation
+
13589  VMA_SUBALLOCATION_TYPE_FREE };
+
13590  pMetadata->m_Suballocations.push_back(suballoc);
+
13591  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
+
13592  }
+
13593  // There are some allocations in this block.
+
13594  else
+
13595  {
+
13596  VkDeviceSize offset = 0;
+
13597  VmaSuballocationList::iterator it;
+
13598  for(it = pMetadata->m_Suballocations.begin();
+
13599  it != pMetadata->m_Suballocations.end();
+
13600  ++it)
+
13601  {
+
13602  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
+
13603  VMA_ASSERT(it->offset >= offset);
+
13604 
+
13605  // Need to insert preceding free space.
+
13606  if(it->offset > offset)
+
13607  {
+
13608  ++pMetadata->m_FreeCount;
+
13609  const VkDeviceSize freeSize = it->offset - offset;
+
13610  VmaSuballocation suballoc = {
+
13611  offset, // offset
+
13612  freeSize, // size
+
13613  VMA_NULL, // hAllocation
+
13614  VMA_SUBALLOCATION_TYPE_FREE };
+
13615  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+
13616  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
13617  {
+
13618  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
+
13619  }
+
13620  }
+
13621 
+
13622  pMetadata->m_SumFreeSize -= it->size;
+
13623  offset = it->offset + it->size;
+
13624  }
+
13625 
+
13626  // Need to insert trailing free space.
+
13627  if(offset < blockSize)
+
13628  {
+
13629  ++pMetadata->m_FreeCount;
+
13630  const VkDeviceSize freeSize = blockSize - offset;
+
13631  VmaSuballocation suballoc = {
+
13632  offset, // offset
+
13633  freeSize, // size
+
13634  VMA_NULL, // hAllocation
+
13635  VMA_SUBALLOCATION_TYPE_FREE };
+
13636  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
+
13637  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+
13638  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
+
13639  {
+
13640  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
+
13641  }
+
13642  }
13643 
-
13644  VMA_HEAVY_ASSERT(pMetadata->Validate());
-
13645  }
-
13646 }
-
13647 
-
13648 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
-
13649 {
-
13650  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
-
13651  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
-
13652  while(it != pMetadata->m_Suballocations.end())
-
13653  {
-
13654  if(it->offset < suballoc.offset)
-
13655  {
-
13656  ++it;
-
13657  }
-
13658  }
-
13659  pMetadata->m_Suballocations.insert(it, suballoc);
-
13660 }
-
13661 
-
13663 // VmaBlockVectorDefragmentationContext
-
13664 
-
13665 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
-
13666  VmaAllocator hAllocator,
-
13667  VmaPool hCustomPool,
-
13668  VmaBlockVector* pBlockVector,
-
13669  uint32_t currFrameIndex) :
-
13670  res(VK_SUCCESS),
-
13671  mutexLocked(false),
-
13672  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
-
13673  m_hAllocator(hAllocator),
-
13674  m_hCustomPool(hCustomPool),
-
13675  m_pBlockVector(pBlockVector),
-
13676  m_CurrFrameIndex(currFrameIndex),
-
13677  m_pAlgorithm(VMA_NULL),
-
13678  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
-
13679  m_AllAllocations(false)
-
13680 {
-
13681 }
-
13682 
-
13683 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
-
13684 {
-
13685  vma_delete(m_hAllocator, m_pAlgorithm);
-
13686 }
-
13687 
-
13688 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
-
13689 {
-
13690  AllocInfo info = { hAlloc, pChanged };
-
13691  m_Allocations.push_back(info);
+
13644  VMA_SORT(
+
13645  pMetadata->m_FreeSuballocationsBySize.begin(),
+
13646  pMetadata->m_FreeSuballocationsBySize.end(),
+
13647  VmaSuballocationItemSizeLess());
+
13648  }
+
13649 
+
13650  VMA_HEAVY_ASSERT(pMetadata->Validate());
+
13651  }
+
13652 }
+
13653 
+
13654 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
+
13655 {
+
13656  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
+
13657  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+
13658  while(it != pMetadata->m_Suballocations.end())
+
13659  {
+
13660  if(it->offset < suballoc.offset)
+
13661  {
+
13662  ++it;
+
13663  }
+
13664  }
+
13665  pMetadata->m_Suballocations.insert(it, suballoc);
+
13666 }
+
13667 
+
13669 // VmaBlockVectorDefragmentationContext
+
13670 
+
13671 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
+
13672  VmaAllocator hAllocator,
+
13673  VmaPool hCustomPool,
+
13674  VmaBlockVector* pBlockVector,
+
13675  uint32_t currFrameIndex) :
+
13676  res(VK_SUCCESS),
+
13677  mutexLocked(false),
+
13678  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
+
13679  m_hAllocator(hAllocator),
+
13680  m_hCustomPool(hCustomPool),
+
13681  m_pBlockVector(pBlockVector),
+
13682  m_CurrFrameIndex(currFrameIndex),
+
13683  m_pAlgorithm(VMA_NULL),
+
13684  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
+
13685  m_AllAllocations(false)
+
13686 {
+
13687 }
+
13688 
+
13689 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
+
13690 {
+
13691  vma_delete(m_hAllocator, m_pAlgorithm);
13692 }
13693 
-
13694 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
+
13694 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13695 {
-
13696  const bool allAllocations = m_AllAllocations ||
-
13697  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
-
13698 
-
13699  /********************************
-
13700  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
-
13701  ********************************/
-
13702 
-
13703  /*
-
13704  Fast algorithm is supported only when certain criteria are met:
-
13705  - VMA_DEBUG_MARGIN is 0.
-
13706  - All allocations in this block vector are moveable.
-
13707  - There is no possibility of image/buffer granularity conflict.
-
13708  */
-
13709  if(VMA_DEBUG_MARGIN == 0 &&
-
13710  allAllocations &&
-
13711  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
-
13712  {
-
13713  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
-
13714  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
13715  }
-
13716  else
-
13717  {
-
13718  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
-
13719  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
-
13720  }
-
13721 
-
13722  if(allAllocations)
+
13696  AllocInfo info = { hAlloc, pChanged };
+
13697  m_Allocations.push_back(info);
+
13698 }
+
13699 
+
13700 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
+
13701 {
+
13702  const bool allAllocations = m_AllAllocations ||
+
13703  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
+
13704 
+
13705  /********************************
+
13706  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
+
13707  ********************************/
+
13708 
+
13709  /*
+
13710  Fast algorithm is supported only when certain criteria are met:
+
13711  - VMA_DEBUG_MARGIN is 0.
+
13712  - All allocations in this block vector are moveable.
+
13713  - There is no possibility of image/buffer granularity conflict.
+
13714  */
+
13715  if(VMA_DEBUG_MARGIN == 0 &&
+
13716  allAllocations &&
+
13717  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
+
13718  {
+
13719  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
+
13720  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
13721  }
+
13722  else
13723  {
-
13724  m_pAlgorithm->AddAll();
-
13725  }
-
13726  else
-
13727  {
-
13728  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
-
13729  {
-
13730  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
-
13731  }
-
13732  }
-
13733 }
-
13734 
-
13736 // VmaDefragmentationContext
-
13737 
-
13738 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
-
13739  VmaAllocator hAllocator,
-
13740  uint32_t currFrameIndex,
-
13741  uint32_t flags,
-
13742  VmaDefragmentationStats* pStats) :
-
13743  m_hAllocator(hAllocator),
-
13744  m_CurrFrameIndex(currFrameIndex),
-
13745  m_Flags(flags),
-
13746  m_pStats(pStats),
-
13747  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
-
13748 {
-
13749  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
-
13750 }
-
13751 
-
13752 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
-
13753 {
-
13754  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
13755  {
-
13756  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
-
13757  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
-
13758  vma_delete(m_hAllocator, pBlockVectorCtx);
-
13759  }
-
13760  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
+
13724  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
+
13725  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+
13726  }
+
13727 
+
13728  if(allAllocations)
+
13729  {
+
13730  m_pAlgorithm->AddAll();
+
13731  }
+
13732  else
+
13733  {
+
13734  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
+
13735  {
+
13736  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
+
13737  }
+
13738  }
+
13739 }
+
13740 
+
13742 // VmaDefragmentationContext
+
13743 
+
13744 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
+
13745  VmaAllocator hAllocator,
+
13746  uint32_t currFrameIndex,
+
13747  uint32_t flags,
+
13748  VmaDefragmentationStats* pStats) :
+
13749  m_hAllocator(hAllocator),
+
13750  m_CurrFrameIndex(currFrameIndex),
+
13751  m_Flags(flags),
+
13752  m_pStats(pStats),
+
13753  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
+
13754 {
+
13755  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
+
13756 }
+
13757 
+
13758 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
+
13759 {
+
13760  for(size_t i = m_CustomPoolContexts.size(); i--; )
13761  {
-
13762  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
-
13763  if(pBlockVectorCtx)
-
13764  {
-
13765  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
-
13766  vma_delete(m_hAllocator, pBlockVectorCtx);
-
13767  }
-
13768  }
-
13769 }
-
13770 
-
13771 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
-
13772 {
-
13773  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
13774  {
-
13775  VmaPool pool = pPools[poolIndex];
-
13776  VMA_ASSERT(pool);
-
13777  // Pools with algorithm other than default are not defragmented.
-
13778  if(pool->m_BlockVector.GetAlgorithm() == 0)
-
13779  {
-
13780  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
13781 
-
13782  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
13783  {
-
13784  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
-
13785  {
-
13786  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
13787  break;
-
13788  }
-
13789  }
-
13790 
-
13791  if(!pBlockVectorDefragCtx)
-
13792  {
-
13793  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
13794  m_hAllocator,
-
13795  pool,
-
13796  &pool->m_BlockVector,
-
13797  m_CurrFrameIndex);
-
13798  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
13799  }
-
13800 
-
13801  pBlockVectorDefragCtx->AddAll();
-
13802  }
-
13803  }
-
13804 }
-
13805 
-
13806 void VmaDefragmentationContext_T::AddAllocations(
-
13807  uint32_t allocationCount,
-
13808  VmaAllocation* pAllocations,
-
13809  VkBool32* pAllocationsChanged)
-
13810 {
-
13811  // Dispatch pAllocations among defragmentators. Create them when necessary.
-
13812  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
13813  {
-
13814  const VmaAllocation hAlloc = pAllocations[allocIndex];
-
13815  VMA_ASSERT(hAlloc);
-
13816  // DedicatedAlloc cannot be defragmented.
-
13817  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
-
13818  // Lost allocation cannot be defragmented.
-
13819  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
-
13820  {
-
13821  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
13822 
-
13823  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
-
13824  // This allocation belongs to custom pool.
-
13825  if(hAllocPool != VK_NULL_HANDLE)
-
13826  {
-
13827  // Pools with algorithm other than default are not defragmented.
-
13828  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
-
13829  {
-
13830  for(size_t i = m_CustomPoolContexts.size(); i--; )
-
13831  {
-
13832  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
-
13833  {
-
13834  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
-
13835  break;
-
13836  }
-
13837  }
-
13838  if(!pBlockVectorDefragCtx)
-
13839  {
-
13840  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
13841  m_hAllocator,
-
13842  hAllocPool,
-
13843  &hAllocPool->m_BlockVector,
-
13844  m_CurrFrameIndex);
-
13845  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
-
13846  }
-
13847  }
-
13848  }
-
13849  // This allocation belongs to default pool.
-
13850  else
-
13851  {
-
13852  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
-
13853  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
-
13854  if(!pBlockVectorDefragCtx)
-
13855  {
-
13856  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
-
13857  m_hAllocator,
-
13858  VMA_NULL, // hCustomPool
-
13859  m_hAllocator->m_pBlockVectors[memTypeIndex],
-
13860  m_CurrFrameIndex);
-
13861  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
-
13862  }
-
13863  }
-
13864 
-
13865  if(pBlockVectorDefragCtx)
-
13866  {
-
13867  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
-
13868  &pAllocationsChanged[allocIndex] : VMA_NULL;
-
13869  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
-
13870  }
-
13871  }
-
13872  }
-
13873 }
-
13874 
-
13875 VkResult VmaDefragmentationContext_T::Defragment(
-
13876  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
-
13877  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
-
13878  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
-
13879 {
-
13880  if(pStats)
-
13881  {
-
13882  memset(pStats, 0, sizeof(VmaDefragmentationStats));
-
13883  }
-
13884 
-
13885  if(commandBuffer == VK_NULL_HANDLE)
-
13886  {
-
13887  maxGpuBytesToMove = 0;
-
13888  maxGpuAllocationsToMove = 0;
+
13762  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
+
13763  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+
13764  vma_delete(m_hAllocator, pBlockVectorCtx);
+
13765  }
+
13766  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
+
13767  {
+
13768  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
+
13769  if(pBlockVectorCtx)
+
13770  {
+
13771  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+
13772  vma_delete(m_hAllocator, pBlockVectorCtx);
+
13773  }
+
13774  }
+
13775 }
+
13776 
+
13777 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
+
13778 {
+
13779  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+
13780  {
+
13781  VmaPool pool = pPools[poolIndex];
+
13782  VMA_ASSERT(pool);
+
13783  // Pools with algorithm other than default are not defragmented.
+
13784  if(pool->m_BlockVector.GetAlgorithm() == 0)
+
13785  {
+
13786  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
13787 
+
13788  for(size_t i = m_CustomPoolContexts.size(); i--; )
+
13789  {
+
13790  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
+
13791  {
+
13792  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
13793  break;
+
13794  }
+
13795  }
+
13796 
+
13797  if(!pBlockVectorDefragCtx)
+
13798  {
+
13799  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
13800  m_hAllocator,
+
13801  pool,
+
13802  &pool->m_BlockVector,
+
13803  m_CurrFrameIndex);
+
13804  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
13805  }
+
13806 
+
13807  pBlockVectorDefragCtx->AddAll();
+
13808  }
+
13809  }
+
13810 }
+
13811 
+
13812 void VmaDefragmentationContext_T::AddAllocations(
+
13813  uint32_t allocationCount,
+
13814  VmaAllocation* pAllocations,
+
13815  VkBool32* pAllocationsChanged)
+
13816 {
+
13817  // Dispatch pAllocations among defragmentators. Create them when necessary.
+
13818  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
13819  {
+
13820  const VmaAllocation hAlloc = pAllocations[allocIndex];
+
13821  VMA_ASSERT(hAlloc);
+
13822  // DedicatedAlloc cannot be defragmented.
+
13823  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
+
13824  // Lost allocation cannot be defragmented.
+
13825  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
+
13826  {
+
13827  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
+
13828 
+
13829  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
+
13830  // This allocation belongs to custom pool.
+
13831  if(hAllocPool != VK_NULL_HANDLE)
+
13832  {
+
13833  // Pools with algorithm other than default are not defragmented.
+
13834  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
+
13835  {
+
13836  for(size_t i = m_CustomPoolContexts.size(); i--; )
+
13837  {
+
13838  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
+
13839  {
+
13840  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+
13841  break;
+
13842  }
+
13843  }
+
13844  if(!pBlockVectorDefragCtx)
+
13845  {
+
13846  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
13847  m_hAllocator,
+
13848  hAllocPool,
+
13849  &hAllocPool->m_BlockVector,
+
13850  m_CurrFrameIndex);
+
13851  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+
13852  }
+
13853  }
+
13854  }
+
13855  // This allocation belongs to default pool.
+
13856  else
+
13857  {
+
13858  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
+
13859  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
+
13860  if(!pBlockVectorDefragCtx)
+
13861  {
+
13862  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+
13863  m_hAllocator,
+
13864  VMA_NULL, // hCustomPool
+
13865  m_hAllocator->m_pBlockVectors[memTypeIndex],
+
13866  m_CurrFrameIndex);
+
13867  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
+
13868  }
+
13869  }
+
13870 
+
13871  if(pBlockVectorDefragCtx)
+
13872  {
+
13873  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
+
13874  &pAllocationsChanged[allocIndex] : VMA_NULL;
+
13875  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
+
13876  }
+
13877  }
+
13878  }
+
13879 }
+
13880 
+
13881 VkResult VmaDefragmentationContext_T::Defragment(
+
13882  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+
13883  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+
13884  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
+
13885 {
+
13886  if(pStats)
+
13887  {
+
13888  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13889  }
13890 
-
13891  VkResult res = VK_SUCCESS;
-
13892 
-
13893  // Process default pools.
-
13894  for(uint32_t memTypeIndex = 0;
-
13895  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
-
13896  ++memTypeIndex)
-
13897  {
-
13898  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
-
13899  if(pBlockVectorCtx)
-
13900  {
-
13901  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
-
13902  pBlockVectorCtx->GetBlockVector()->Defragment(
-
13903  pBlockVectorCtx,
-
13904  pStats,
-
13905  maxCpuBytesToMove, maxCpuAllocationsToMove,
-
13906  maxGpuBytesToMove, maxGpuAllocationsToMove,
-
13907  commandBuffer);
-
13908  if(pBlockVectorCtx->res != VK_SUCCESS)
-
13909  {
-
13910  res = pBlockVectorCtx->res;
-
13911  }
-
13912  }
-
13913  }
-
13914 
-
13915  // Process custom pools.
-
13916  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
-
13917  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
-
13918  ++customCtxIndex)
-
13919  {
-
13920  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
-
13921  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
-
13922  pBlockVectorCtx->GetBlockVector()->Defragment(
-
13923  pBlockVectorCtx,
-
13924  pStats,
-
13925  maxCpuBytesToMove, maxCpuAllocationsToMove,
-
13926  maxGpuBytesToMove, maxGpuAllocationsToMove,
-
13927  commandBuffer);
-
13928  if(pBlockVectorCtx->res != VK_SUCCESS)
-
13929  {
-
13930  res = pBlockVectorCtx->res;
-
13931  }
-
13932  }
-
13933 
-
13934  return res;
-
13935 }
-
13936 
-
13938 // VmaRecorder
+
13891  if(commandBuffer == VK_NULL_HANDLE)
+
13892  {
+
13893  maxGpuBytesToMove = 0;
+
13894  maxGpuAllocationsToMove = 0;
+
13895  }
+
13896 
+
13897  VkResult res = VK_SUCCESS;
+
13898 
+
13899  // Process default pools.
+
13900  for(uint32_t memTypeIndex = 0;
+
13901  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
+
13902  ++memTypeIndex)
+
13903  {
+
13904  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+
13905  if(pBlockVectorCtx)
+
13906  {
+
13907  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
13908  pBlockVectorCtx->GetBlockVector()->Defragment(
+
13909  pBlockVectorCtx,
+
13910  pStats,
+
13911  maxCpuBytesToMove, maxCpuAllocationsToMove,
+
13912  maxGpuBytesToMove, maxGpuAllocationsToMove,
+
13913  commandBuffer);
+
13914  if(pBlockVectorCtx->res != VK_SUCCESS)
+
13915  {
+
13916  res = pBlockVectorCtx->res;
+
13917  }
+
13918  }
+
13919  }
+
13920 
+
13921  // Process custom pools.
+
13922  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+
13923  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
+
13924  ++customCtxIndex)
+
13925  {
+
13926  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+
13927  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
13928  pBlockVectorCtx->GetBlockVector()->Defragment(
+
13929  pBlockVectorCtx,
+
13930  pStats,
+
13931  maxCpuBytesToMove, maxCpuAllocationsToMove,
+
13932  maxGpuBytesToMove, maxGpuAllocationsToMove,
+
13933  commandBuffer);
+
13934  if(pBlockVectorCtx->res != VK_SUCCESS)
+
13935  {
+
13936  res = pBlockVectorCtx->res;
+
13937  }
+
13938  }
13939 
-
13940 #if VMA_RECORDING_ENABLED
-
13941 
-
13942 VmaRecorder::VmaRecorder() :
-
13943  m_UseMutex(true),
-
13944  m_Flags(0),
-
13945  m_File(VMA_NULL),
-
13946  m_Freq(INT64_MAX),
-
13947  m_StartCounter(INT64_MAX)
-
13948 {
-
13949 }
-
13950 
-
13951 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
-
13952 {
-
13953  m_UseMutex = useMutex;
-
13954  m_Flags = settings.flags;
-
13955 
-
13956  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
-
13957  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
-
13958 
-
13959  // Open file for writing.
-
13960  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
-
13961  if(err != 0)
-
13962  {
-
13963  return VK_ERROR_INITIALIZATION_FAILED;
-
13964  }
-
13965 
-
13966  // Write header.
-
13967  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
-
13968  fprintf(m_File, "%s\n", "1,8");
-
13969 
-
13970  return VK_SUCCESS;
-
13971 }
-
13972 
-
13973 VmaRecorder::~VmaRecorder()
-
13974 {
-
13975  if(m_File != VMA_NULL)
-
13976  {
-
13977  fclose(m_File);
-
13978  }
-
13979 }
-
13980 
-
13981 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
-
13982 {
-
13983  CallParams callParams;
-
13984  GetBasicParams(callParams);
-
13985 
-
13986  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
13987  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
13988  Flush();
-
13989 }
-
13990 
-
13991 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
-
13992 {
-
13993  CallParams callParams;
-
13994  GetBasicParams(callParams);
-
13995 
-
13996  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
13997  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
-
13998  Flush();
-
13999 }
-
14000 
-
14001 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
-
14002 {
-
14003  CallParams callParams;
-
14004  GetBasicParams(callParams);
-
14005 
-
14006  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14007  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14008  createInfo.memoryTypeIndex,
-
14009  createInfo.flags,
-
14010  createInfo.blockSize,
-
14011  (uint64_t)createInfo.minBlockCount,
-
14012  (uint64_t)createInfo.maxBlockCount,
-
14013  createInfo.frameInUseCount,
-
14014  pool);
-
14015  Flush();
-
14016 }
-
14017 
-
14018 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
-
14019 {
-
14020  CallParams callParams;
-
14021  GetBasicParams(callParams);
-
14022 
-
14023  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14024  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14025  pool);
-
14026  Flush();
-
14027 }
+
13940  return res;
+
13941 }
+
13942 
+
13944 // VmaRecorder
+
13945 
+
13946 #if VMA_RECORDING_ENABLED
+
13947 
+
13948 VmaRecorder::VmaRecorder() :
+
13949  m_UseMutex(true),
+
13950  m_Flags(0),
+
13951  m_File(VMA_NULL),
+
13952  m_Freq(INT64_MAX),
+
13953  m_StartCounter(INT64_MAX)
+
13954 {
+
13955 }
+
13956 
+
13957 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
+
13958 {
+
13959  m_UseMutex = useMutex;
+
13960  m_Flags = settings.flags;
+
13961 
+
13962  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
+
13963  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
+
13964 
+
13965  // Open file for writing.
+
13966  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
+
13967  if(err != 0)
+
13968  {
+
13969  return VK_ERROR_INITIALIZATION_FAILED;
+
13970  }
+
13971 
+
13972  // Write header.
+
13973  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
+
13974  fprintf(m_File, "%s\n", "1,8");
+
13975 
+
13976  return VK_SUCCESS;
+
13977 }
+
13978 
+
13979 VmaRecorder::~VmaRecorder()
+
13980 {
+
13981  if(m_File != VMA_NULL)
+
13982  {
+
13983  fclose(m_File);
+
13984  }
+
13985 }
+
13986 
+
13987 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
+
13988 {
+
13989  CallParams callParams;
+
13990  GetBasicParams(callParams);
+
13991 
+
13992  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
13993  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
13994  Flush();
+
13995 }
+
13996 
+
13997 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
+
13998 {
+
13999  CallParams callParams;
+
14000  GetBasicParams(callParams);
+
14001 
+
14002  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14003  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
+
14004  Flush();
+
14005 }
+
14006 
+
14007 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
+
14008 {
+
14009  CallParams callParams;
+
14010  GetBasicParams(callParams);
+
14011 
+
14012  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14013  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14014  createInfo.memoryTypeIndex,
+
14015  createInfo.flags,
+
14016  createInfo.blockSize,
+
14017  (uint64_t)createInfo.minBlockCount,
+
14018  (uint64_t)createInfo.maxBlockCount,
+
14019  createInfo.frameInUseCount,
+
14020  pool);
+
14021  Flush();
+
14022 }
+
14023 
+
14024 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
+
14025 {
+
14026  CallParams callParams;
+
14027  GetBasicParams(callParams);
14028 
-
14029 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
-
14030  const VkMemoryRequirements& vkMemReq,
-
14031  const VmaAllocationCreateInfo& createInfo,
-
14032  VmaAllocation allocation)
-
14033 {
-
14034  CallParams callParams;
-
14035  GetBasicParams(callParams);
-
14036 
-
14037  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14038  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14039  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14040  vkMemReq.size,
-
14041  vkMemReq.alignment,
-
14042  vkMemReq.memoryTypeBits,
-
14043  createInfo.flags,
-
14044  createInfo.usage,
-
14045  createInfo.requiredFlags,
-
14046  createInfo.preferredFlags,
-
14047  createInfo.memoryTypeBits,
-
14048  createInfo.pool,
-
14049  allocation,
-
14050  userDataStr.GetString());
-
14051  Flush();
-
14052 }
-
14053 
-
14054 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
-
14055  const VkMemoryRequirements& vkMemReq,
-
14056  const VmaAllocationCreateInfo& createInfo,
-
14057  uint64_t allocationCount,
-
14058  const VmaAllocation* pAllocations)
-
14059 {
-
14060  CallParams callParams;
-
14061  GetBasicParams(callParams);
-
14062 
-
14063  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14064  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14065  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
-
14066  vkMemReq.size,
-
14067  vkMemReq.alignment,
-
14068  vkMemReq.memoryTypeBits,
-
14069  createInfo.flags,
-
14070  createInfo.usage,
-
14071  createInfo.requiredFlags,
-
14072  createInfo.preferredFlags,
-
14073  createInfo.memoryTypeBits,
-
14074  createInfo.pool);
-
14075  PrintPointerList(allocationCount, pAllocations);
-
14076  fprintf(m_File, ",%s\n", userDataStr.GetString());
-
14077  Flush();
-
14078 }
-
14079 
-
14080 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
-
14081  const VkMemoryRequirements& vkMemReq,
-
14082  bool requiresDedicatedAllocation,
-
14083  bool prefersDedicatedAllocation,
-
14084  const VmaAllocationCreateInfo& createInfo,
-
14085  VmaAllocation allocation)
-
14086 {
-
14087  CallParams callParams;
-
14088  GetBasicParams(callParams);
-
14089 
-
14090  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14091  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14092  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14093  vkMemReq.size,
-
14094  vkMemReq.alignment,
-
14095  vkMemReq.memoryTypeBits,
-
14096  requiresDedicatedAllocation ? 1 : 0,
-
14097  prefersDedicatedAllocation ? 1 : 0,
-
14098  createInfo.flags,
-
14099  createInfo.usage,
-
14100  createInfo.requiredFlags,
-
14101  createInfo.preferredFlags,
-
14102  createInfo.memoryTypeBits,
-
14103  createInfo.pool,
-
14104  allocation,
-
14105  userDataStr.GetString());
-
14106  Flush();
-
14107 }
-
14108 
-
14109 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
-
14110  const VkMemoryRequirements& vkMemReq,
-
14111  bool requiresDedicatedAllocation,
-
14112  bool prefersDedicatedAllocation,
-
14113  const VmaAllocationCreateInfo& createInfo,
-
14114  VmaAllocation allocation)
-
14115 {
-
14116  CallParams callParams;
-
14117  GetBasicParams(callParams);
-
14118 
-
14119  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14120  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
-
14121  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14122  vkMemReq.size,
-
14123  vkMemReq.alignment,
-
14124  vkMemReq.memoryTypeBits,
-
14125  requiresDedicatedAllocation ? 1 : 0,
-
14126  prefersDedicatedAllocation ? 1 : 0,
-
14127  createInfo.flags,
-
14128  createInfo.usage,
-
14129  createInfo.requiredFlags,
-
14130  createInfo.preferredFlags,
-
14131  createInfo.memoryTypeBits,
-
14132  createInfo.pool,
-
14133  allocation,
-
14134  userDataStr.GetString());
-
14135  Flush();
-
14136 }
-
14137 
-
14138 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
-
14139  VmaAllocation allocation)
-
14140 {
-
14141  CallParams callParams;
-
14142  GetBasicParams(callParams);
+
14029  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14030  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14031  pool);
+
14032  Flush();
+
14033 }
+
14034 
+
14035 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
+
14036  const VkMemoryRequirements& vkMemReq,
+
14037  const VmaAllocationCreateInfo& createInfo,
+
14038  VmaAllocation allocation)
+
14039 {
+
14040  CallParams callParams;
+
14041  GetBasicParams(callParams);
+
14042 
+
14043  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14044  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
14045  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14046  vkMemReq.size,
+
14047  vkMemReq.alignment,
+
14048  vkMemReq.memoryTypeBits,
+
14049  createInfo.flags,
+
14050  createInfo.usage,
+
14051  createInfo.requiredFlags,
+
14052  createInfo.preferredFlags,
+
14053  createInfo.memoryTypeBits,
+
14054  createInfo.pool,
+
14055  allocation,
+
14056  userDataStr.GetString());
+
14057  Flush();
+
14058 }
+
14059 
+
14060 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
+
14061  const VkMemoryRequirements& vkMemReq,
+
14062  const VmaAllocationCreateInfo& createInfo,
+
14063  uint64_t allocationCount,
+
14064  const VmaAllocation* pAllocations)
+
14065 {
+
14066  CallParams callParams;
+
14067  GetBasicParams(callParams);
+
14068 
+
14069  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14070  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
14071  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
+
14072  vkMemReq.size,
+
14073  vkMemReq.alignment,
+
14074  vkMemReq.memoryTypeBits,
+
14075  createInfo.flags,
+
14076  createInfo.usage,
+
14077  createInfo.requiredFlags,
+
14078  createInfo.preferredFlags,
+
14079  createInfo.memoryTypeBits,
+
14080  createInfo.pool);
+
14081  PrintPointerList(allocationCount, pAllocations);
+
14082  fprintf(m_File, ",%s\n", userDataStr.GetString());
+
14083  Flush();
+
14084 }
+
14085 
+
14086 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+
14087  const VkMemoryRequirements& vkMemReq,
+
14088  bool requiresDedicatedAllocation,
+
14089  bool prefersDedicatedAllocation,
+
14090  const VmaAllocationCreateInfo& createInfo,
+
14091  VmaAllocation allocation)
+
14092 {
+
14093  CallParams callParams;
+
14094  GetBasicParams(callParams);
+
14095 
+
14096  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14097  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
14098  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14099  vkMemReq.size,
+
14100  vkMemReq.alignment,
+
14101  vkMemReq.memoryTypeBits,
+
14102  requiresDedicatedAllocation ? 1 : 0,
+
14103  prefersDedicatedAllocation ? 1 : 0,
+
14104  createInfo.flags,
+
14105  createInfo.usage,
+
14106  createInfo.requiredFlags,
+
14107  createInfo.preferredFlags,
+
14108  createInfo.memoryTypeBits,
+
14109  createInfo.pool,
+
14110  allocation,
+
14111  userDataStr.GetString());
+
14112  Flush();
+
14113 }
+
14114 
+
14115 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
+
14116  const VkMemoryRequirements& vkMemReq,
+
14117  bool requiresDedicatedAllocation,
+
14118  bool prefersDedicatedAllocation,
+
14119  const VmaAllocationCreateInfo& createInfo,
+
14120  VmaAllocation allocation)
+
14121 {
+
14122  CallParams callParams;
+
14123  GetBasicParams(callParams);
+
14124 
+
14125  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14126  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+
14127  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14128  vkMemReq.size,
+
14129  vkMemReq.alignment,
+
14130  vkMemReq.memoryTypeBits,
+
14131  requiresDedicatedAllocation ? 1 : 0,
+
14132  prefersDedicatedAllocation ? 1 : 0,
+
14133  createInfo.flags,
+
14134  createInfo.usage,
+
14135  createInfo.requiredFlags,
+
14136  createInfo.preferredFlags,
+
14137  createInfo.memoryTypeBits,
+
14138  createInfo.pool,
+
14139  allocation,
+
14140  userDataStr.GetString());
+
14141  Flush();
+
14142 }
14143 
-
14144  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14145  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14146  allocation);
-
14147  Flush();
-
14148 }
+
14144 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
+
14145  VmaAllocation allocation)
+
14146 {
+
14147  CallParams callParams;
+
14148  GetBasicParams(callParams);
14149 
-
14150 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
-
14151  uint64_t allocationCount,
-
14152  const VmaAllocation* pAllocations)
-
14153 {
-
14154  CallParams callParams;
-
14155  GetBasicParams(callParams);
-
14156 
-
14157  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14158  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
-
14159  PrintPointerList(allocationCount, pAllocations);
-
14160  fprintf(m_File, "\n");
-
14161  Flush();
-
14162 }
-
14163 
-
14164 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
-
14165  VmaAllocation allocation,
-
14166  const void* pUserData)
-
14167 {
-
14168  CallParams callParams;
-
14169  GetBasicParams(callParams);
-
14170 
-
14171  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14172  UserDataString userDataStr(
-
14173  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
-
14174  pUserData);
-
14175  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14176  allocation,
-
14177  userDataStr.GetString());
-
14178  Flush();
-
14179 }
-
14180 
-
14181 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
-
14182  VmaAllocation allocation)
-
14183 {
-
14184  CallParams callParams;
-
14185  GetBasicParams(callParams);
+
14150  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14151  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14152  allocation);
+
14153  Flush();
+
14154 }
+
14155 
+
14156 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
+
14157  uint64_t allocationCount,
+
14158  const VmaAllocation* pAllocations)
+
14159 {
+
14160  CallParams callParams;
+
14161  GetBasicParams(callParams);
+
14162 
+
14163  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14164  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
+
14165  PrintPointerList(allocationCount, pAllocations);
+
14166  fprintf(m_File, "\n");
+
14167  Flush();
+
14168 }
+
14169 
+
14170 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
+
14171  VmaAllocation allocation,
+
14172  const void* pUserData)
+
14173 {
+
14174  CallParams callParams;
+
14175  GetBasicParams(callParams);
+
14176 
+
14177  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14178  UserDataString userDataStr(
+
14179  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
+
14180  pUserData);
+
14181  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14182  allocation,
+
14183  userDataStr.GetString());
+
14184  Flush();
+
14185 }
14186 
-
14187  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14188  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14189  allocation);
-
14190  Flush();
-
14191 }
+
14187 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
+
14188  VmaAllocation allocation)
+
14189 {
+
14190  CallParams callParams;
+
14191  GetBasicParams(callParams);
14192 
-
14193 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
-
14194  VmaAllocation allocation)
-
14195 {
-
14196  CallParams callParams;
-
14197  GetBasicParams(callParams);
+
14193  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14194  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14195  allocation);
+
14196  Flush();
+
14197 }
14198 
-
14199  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14200  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14201  allocation);
-
14202  Flush();
-
14203 }
+
14199 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
+
14200  VmaAllocation allocation)
+
14201 {
+
14202  CallParams callParams;
+
14203  GetBasicParams(callParams);
14204 
-
14205 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
-
14206  VmaAllocation allocation)
-
14207 {
-
14208  CallParams callParams;
-
14209  GetBasicParams(callParams);
+
14205  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14206  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14207  allocation);
+
14208  Flush();
+
14209 }
14210 
-
14211  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14212  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14213  allocation);
-
14214  Flush();
-
14215 }
+
14211 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
+
14212  VmaAllocation allocation)
+
14213 {
+
14214  CallParams callParams;
+
14215  GetBasicParams(callParams);
14216 
-
14217 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
-
14218  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
14219 {
-
14220  CallParams callParams;
-
14221  GetBasicParams(callParams);
+
14217  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14218  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14219  allocation);
+
14220  Flush();
+
14221 }
14222 
-
14223  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14224  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
14225  allocation,
-
14226  offset,
-
14227  size);
-
14228  Flush();
-
14229 }
-
14230 
-
14231 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
-
14232  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
14233 {
-
14234  CallParams callParams;
-
14235  GetBasicParams(callParams);
+
14223 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
+
14224  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
14225 {
+
14226  CallParams callParams;
+
14227  GetBasicParams(callParams);
+
14228 
+
14229  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14230  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
14231  allocation,
+
14232  offset,
+
14233  size);
+
14234  Flush();
+
14235 }
14236 
-
14237  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14238  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
-
14239  allocation,
-
14240  offset,
-
14241  size);
-
14242  Flush();
-
14243 }
-
14244 
-
14245 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
-
14246  const VkBufferCreateInfo& bufCreateInfo,
-
14247  const VmaAllocationCreateInfo& allocCreateInfo,
-
14248  VmaAllocation allocation)
-
14249 {
-
14250  CallParams callParams;
-
14251  GetBasicParams(callParams);
-
14252 
-
14253  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14254  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
-
14255  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14256  bufCreateInfo.flags,
-
14257  bufCreateInfo.size,
-
14258  bufCreateInfo.usage,
-
14259  bufCreateInfo.sharingMode,
-
14260  allocCreateInfo.flags,
-
14261  allocCreateInfo.usage,
-
14262  allocCreateInfo.requiredFlags,
-
14263  allocCreateInfo.preferredFlags,
-
14264  allocCreateInfo.memoryTypeBits,
-
14265  allocCreateInfo.pool,
-
14266  allocation,
-
14267  userDataStr.GetString());
-
14268  Flush();
-
14269 }
-
14270 
-
14271 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
-
14272  const VkImageCreateInfo& imageCreateInfo,
-
14273  const VmaAllocationCreateInfo& allocCreateInfo,
-
14274  VmaAllocation allocation)
-
14275 {
-
14276  CallParams callParams;
-
14277  GetBasicParams(callParams);
-
14278 
-
14279  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14280  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
-
14281  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14282  imageCreateInfo.flags,
-
14283  imageCreateInfo.imageType,
-
14284  imageCreateInfo.format,
-
14285  imageCreateInfo.extent.width,
-
14286  imageCreateInfo.extent.height,
-
14287  imageCreateInfo.extent.depth,
-
14288  imageCreateInfo.mipLevels,
-
14289  imageCreateInfo.arrayLayers,
-
14290  imageCreateInfo.samples,
-
14291  imageCreateInfo.tiling,
-
14292  imageCreateInfo.usage,
-
14293  imageCreateInfo.sharingMode,
-
14294  imageCreateInfo.initialLayout,
-
14295  allocCreateInfo.flags,
-
14296  allocCreateInfo.usage,
-
14297  allocCreateInfo.requiredFlags,
-
14298  allocCreateInfo.preferredFlags,
-
14299  allocCreateInfo.memoryTypeBits,
-
14300  allocCreateInfo.pool,
-
14301  allocation,
-
14302  userDataStr.GetString());
-
14303  Flush();
-
14304 }
-
14305 
-
14306 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
-
14307  VmaAllocation allocation)
-
14308 {
-
14309  CallParams callParams;
-
14310  GetBasicParams(callParams);
+
14237 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
+
14238  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
14239 {
+
14240  CallParams callParams;
+
14241  GetBasicParams(callParams);
+
14242 
+
14243  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14244  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+
14245  allocation,
+
14246  offset,
+
14247  size);
+
14248  Flush();
+
14249 }
+
14250 
+
14251 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
+
14252  const VkBufferCreateInfo& bufCreateInfo,
+
14253  const VmaAllocationCreateInfo& allocCreateInfo,
+
14254  VmaAllocation allocation)
+
14255 {
+
14256  CallParams callParams;
+
14257  GetBasicParams(callParams);
+
14258 
+
14259  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14260  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+
14261  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14262  bufCreateInfo.flags,
+
14263  bufCreateInfo.size,
+
14264  bufCreateInfo.usage,
+
14265  bufCreateInfo.sharingMode,
+
14266  allocCreateInfo.flags,
+
14267  allocCreateInfo.usage,
+
14268  allocCreateInfo.requiredFlags,
+
14269  allocCreateInfo.preferredFlags,
+
14270  allocCreateInfo.memoryTypeBits,
+
14271  allocCreateInfo.pool,
+
14272  allocation,
+
14273  userDataStr.GetString());
+
14274  Flush();
+
14275 }
+
14276 
+
14277 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
+
14278  const VkImageCreateInfo& imageCreateInfo,
+
14279  const VmaAllocationCreateInfo& allocCreateInfo,
+
14280  VmaAllocation allocation)
+
14281 {
+
14282  CallParams callParams;
+
14283  GetBasicParams(callParams);
+
14284 
+
14285  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14286  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+
14287  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14288  imageCreateInfo.flags,
+
14289  imageCreateInfo.imageType,
+
14290  imageCreateInfo.format,
+
14291  imageCreateInfo.extent.width,
+
14292  imageCreateInfo.extent.height,
+
14293  imageCreateInfo.extent.depth,
+
14294  imageCreateInfo.mipLevels,
+
14295  imageCreateInfo.arrayLayers,
+
14296  imageCreateInfo.samples,
+
14297  imageCreateInfo.tiling,
+
14298  imageCreateInfo.usage,
+
14299  imageCreateInfo.sharingMode,
+
14300  imageCreateInfo.initialLayout,
+
14301  allocCreateInfo.flags,
+
14302  allocCreateInfo.usage,
+
14303  allocCreateInfo.requiredFlags,
+
14304  allocCreateInfo.preferredFlags,
+
14305  allocCreateInfo.memoryTypeBits,
+
14306  allocCreateInfo.pool,
+
14307  allocation,
+
14308  userDataStr.GetString());
+
14309  Flush();
+
14310 }
14311 
-
14312  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14313  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14314  allocation);
-
14315  Flush();
-
14316 }
+
14312 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
+
14313  VmaAllocation allocation)
+
14314 {
+
14315  CallParams callParams;
+
14316  GetBasicParams(callParams);
14317 
-
14318 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
-
14319  VmaAllocation allocation)
-
14320 {
-
14321  CallParams callParams;
-
14322  GetBasicParams(callParams);
+
14318  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14319  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14320  allocation);
+
14321  Flush();
+
14322 }
14323 
-
14324  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14325  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14326  allocation);
-
14327  Flush();
-
14328 }
+
14324 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
+
14325  VmaAllocation allocation)
+
14326 {
+
14327  CallParams callParams;
+
14328  GetBasicParams(callParams);
14329 
-
14330 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
-
14331  VmaAllocation allocation)
-
14332 {
-
14333  CallParams callParams;
-
14334  GetBasicParams(callParams);
+
14330  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14331  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14332  allocation);
+
14333  Flush();
+
14334 }
14335 
-
14336  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14337  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14338  allocation);
-
14339  Flush();
-
14340 }
+
14336 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
+
14337  VmaAllocation allocation)
+
14338 {
+
14339  CallParams callParams;
+
14340  GetBasicParams(callParams);
14341 
-
14342 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
-
14343  VmaAllocation allocation)
-
14344 {
-
14345  CallParams callParams;
-
14346  GetBasicParams(callParams);
+
14342  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14343  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14344  allocation);
+
14345  Flush();
+
14346 }
14347 
-
14348  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14349  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14350  allocation);
-
14351  Flush();
-
14352 }
+
14348 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
+
14349  VmaAllocation allocation)
+
14350 {
+
14351  CallParams callParams;
+
14352  GetBasicParams(callParams);
14353 
-
14354 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
-
14355  VmaPool pool)
-
14356 {
-
14357  CallParams callParams;
-
14358  GetBasicParams(callParams);
+
14354  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14355  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14356  allocation);
+
14357  Flush();
+
14358 }
14359 
-
14360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14361  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14362  pool);
-
14363  Flush();
-
14364 }
+
14360 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
+
14361  VmaPool pool)
+
14362 {
+
14363  CallParams callParams;
+
14364  GetBasicParams(callParams);
14365 
-
14366 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
-
14367  const VmaDefragmentationInfo2& info,
- -
14369 {
-
14370  CallParams callParams;
-
14371  GetBasicParams(callParams);
-
14372 
-
14373  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14374  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
-
14375  info.flags);
-
14376  PrintPointerList(info.allocationCount, info.pAllocations);
-
14377  fprintf(m_File, ",");
-
14378  PrintPointerList(info.poolCount, info.pPools);
-
14379  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
-
14380  info.maxCpuBytesToMove,
- -
14382  info.maxGpuBytesToMove,
- -
14384  info.commandBuffer,
-
14385  ctx);
-
14386  Flush();
-
14387 }
-
14388 
-
14389 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
- -
14391 {
-
14392  CallParams callParams;
-
14393  GetBasicParams(callParams);
+
14366  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14367  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14368  pool);
+
14369  Flush();
+
14370 }
+
14371 
+
14372 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
+
14373  const VmaDefragmentationInfo2& info,
+ +
14375 {
+
14376  CallParams callParams;
+
14377  GetBasicParams(callParams);
+
14378 
+
14379  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14380  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
+
14381  info.flags);
+
14382  PrintPointerList(info.allocationCount, info.pAllocations);
+
14383  fprintf(m_File, ",");
+
14384  PrintPointerList(info.poolCount, info.pPools);
+
14385  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
+
14386  info.maxCpuBytesToMove,
+ +
14388  info.maxGpuBytesToMove,
+ +
14390  info.commandBuffer,
+
14391  ctx);
+
14392  Flush();
+
14393 }
14394 
-
14395  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14396  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
-
14397  ctx);
-
14398  Flush();
-
14399 }
+
14395 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
+ +
14397 {
+
14398  CallParams callParams;
+
14399  GetBasicParams(callParams);
14400 
-
14401 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
-
14402  VmaPool pool,
-
14403  const char* name)
-
14404 {
-
14405  CallParams callParams;
-
14406  GetBasicParams(callParams);
-
14407 
-
14408  VmaMutexLock lock(m_FileMutex, m_UseMutex);
-
14409  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
-
14410  pool, name != VMA_NULL ? name : "");
-
14411  Flush();
-
14412 }
+
14401  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14402  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
+
14403  ctx);
+
14404  Flush();
+
14405 }
+
14406 
+
14407 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
+
14408  VmaPool pool,
+
14409  const char* name)
+
14410 {
+
14411  CallParams callParams;
+
14412  GetBasicParams(callParams);
14413 
-
14414 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
-
14415 {
-
14416  if(pUserData != VMA_NULL)
-
14417  {
-
14418  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
-
14419  {
-
14420  m_Str = (const char*)pUserData;
-
14421  }
-
14422  else
-
14423  {
-
14424  sprintf_s(m_PtrStr, "%p", pUserData);
-
14425  m_Str = m_PtrStr;
-
14426  }
-
14427  }
-
14428  else
-
14429  {
-
14430  m_Str = "";
-
14431  }
-
14432 }
-
14433 
-
14434 void VmaRecorder::WriteConfiguration(
-
14435  const VkPhysicalDeviceProperties& devProps,
-
14436  const VkPhysicalDeviceMemoryProperties& memProps,
-
14437  uint32_t vulkanApiVersion,
-
14438  bool dedicatedAllocationExtensionEnabled,
-
14439  bool bindMemory2ExtensionEnabled,
-
14440  bool memoryBudgetExtensionEnabled)
-
14441 {
-
14442  fprintf(m_File, "Config,Begin\n");
-
14443 
-
14444  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
-
14445 
-
14446  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
-
14447  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
-
14448  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
-
14449  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
-
14450  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
-
14451  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
-
14452 
-
14453  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
-
14454  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
-
14455  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
-
14456 
-
14457  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
-
14458  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
-
14459  {
-
14460  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
-
14461  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
-
14462  }
-
14463  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
-
14464  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
+
14414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
+
14415  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+
14416  pool, name != VMA_NULL ? name : "");
+
14417  Flush();
+
14418 }
+
14419 
+
14420 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
+
14421 {
+
14422  if(pUserData != VMA_NULL)
+
14423  {
+
14424  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
+
14425  {
+
14426  m_Str = (const char*)pUserData;
+
14427  }
+
14428  else
+
14429  {
+
14430  sprintf_s(m_PtrStr, "%p", pUserData);
+
14431  m_Str = m_PtrStr;
+
14432  }
+
14433  }
+
14434  else
+
14435  {
+
14436  m_Str = "";
+
14437  }
+
14438 }
+
14439 
+
14440 void VmaRecorder::WriteConfiguration(
+
14441  const VkPhysicalDeviceProperties& devProps,
+
14442  const VkPhysicalDeviceMemoryProperties& memProps,
+
14443  uint32_t vulkanApiVersion,
+
14444  bool dedicatedAllocationExtensionEnabled,
+
14445  bool bindMemory2ExtensionEnabled,
+
14446  bool memoryBudgetExtensionEnabled)
+
14447 {
+
14448  fprintf(m_File, "Config,Begin\n");
+
14449 
+
14450  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
+
14451 
+
14452  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
+
14453  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
+
14454  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
+
14455  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
+
14456  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
+
14457  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
+
14458 
+
14459  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
+
14460  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
+
14461  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
+
14462 
+
14463  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
+
14464  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14465  {
-
14466  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
-
14467  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
+
14466  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
+
14467  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14468  }
-
14469 
-
14470  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
-
14471  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
-
14472  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
-
14473 
-
14474  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
-
14475  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
-
14476  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
-
14477  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
-
14478  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
-
14479  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
-
14480  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
-
14481  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
-
14482  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-
14483 
-
14484  fprintf(m_File, "Config,End\n");
-
14485 }
-
14486 
-
14487 void VmaRecorder::GetBasicParams(CallParams& outParams)
-
14488 {
-
14489  outParams.threadId = GetCurrentThreadId();
-
14490 
-
14491  LARGE_INTEGER counter;
-
14492  QueryPerformanceCounter(&counter);
-
14493  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
-
14494 }
-
14495 
-
14496 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
-
14497 {
-
14498  if(count)
-
14499  {
-
14500  fprintf(m_File, "%p", pItems[0]);
-
14501  for(uint64_t i = 1; i < count; ++i)
-
14502  {
-
14503  fprintf(m_File, " %p", pItems[i]);
-
14504  }
-
14505  }
-
14506 }
-
14507 
-
14508 void VmaRecorder::Flush()
-
14509 {
-
14510  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
-
14511  {
-
14512  fflush(m_File);
-
14513  }
-
14514 }
-
14515 
-
14516 #endif // #if VMA_RECORDING_ENABLED
-
14517 
-
14519 // VmaAllocationObjectAllocator
-
14520 
-
14521 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
-
14522  m_Allocator(pAllocationCallbacks, 1024)
-
14523 {
-
14524 }
-
14525 
-
14526 VmaAllocation VmaAllocationObjectAllocator::Allocate()
-
14527 {
-
14528  VmaMutexLock mutexLock(m_Mutex);
-
14529  return m_Allocator.Alloc();
+
14469  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
+
14470  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
+
14471  {
+
14472  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
+
14473  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
+
14474  }
+
14475 
+
14476  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
+
14477  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
+
14478  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
+
14479 
+
14480  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
+
14481  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
+
14482  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
+
14483  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
+
14484  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
+
14485  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
+
14486  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
+
14487  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
+
14488  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
14489 
+
14490  fprintf(m_File, "Config,End\n");
+
14491 }
+
14492 
+
14493 void VmaRecorder::GetBasicParams(CallParams& outParams)
+
14494 {
+
14495  outParams.threadId = GetCurrentThreadId();
+
14496 
+
14497  LARGE_INTEGER counter;
+
14498  QueryPerformanceCounter(&counter);
+
14499  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
+
14500 }
+
14501 
+
14502 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
+
14503 {
+
14504  if(count)
+
14505  {
+
14506  fprintf(m_File, "%p", pItems[0]);
+
14507  for(uint64_t i = 1; i < count; ++i)
+
14508  {
+
14509  fprintf(m_File, " %p", pItems[i]);
+
14510  }
+
14511  }
+
14512 }
+
14513 
+
14514 void VmaRecorder::Flush()
+
14515 {
+
14516  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
+
14517  {
+
14518  fflush(m_File);
+
14519  }
+
14520 }
+
14521 
+
14522 #endif // #if VMA_RECORDING_ENABLED
+
14523 
+
14525 // VmaAllocationObjectAllocator
+
14526 
+
14527 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
+
14528  m_Allocator(pAllocationCallbacks, 1024)
+
14529 {
14530 }
14531 
-
14532 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
+
14532 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14533 {
14534  VmaMutexLock mutexLock(m_Mutex);
-
14535  m_Allocator.Free(hAlloc);
+
14535  return m_Allocator.Alloc();
14536 }
14537 
-
14539 // VmaAllocator_T
-
14540 
-
14541 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
-
14542  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
-
14543  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
-
14544  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
-
14545  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
-
14546  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
-
14547  m_hDevice(pCreateInfo->device),
-
14548  m_hInstance(pCreateInfo->instance),
-
14549  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
-
14550  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
-
14551  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
-
14552  m_AllocationObjectAllocator(&m_AllocationCallbacks),
-
14553  m_HeapSizeLimitMask(0),
-
14554  m_PreferredLargeHeapBlockSize(0),
-
14555  m_PhysicalDevice(pCreateInfo->physicalDevice),
-
14556  m_CurrentFrameIndex(0),
-
14557  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
-
14558  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
-
14559  m_NextPoolId(0)
- -
14561  ,m_pRecorder(VMA_NULL)
-
14562 #endif
-
14563 {
-
14564  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
14565  {
-
14566  m_UseKhrDedicatedAllocation = false;
-
14567  m_UseKhrBindMemory2 = false;
-
14568  }
-
14569 
-
14570  if(VMA_DEBUG_DETECT_CORRUPTION)
+
14538 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
+
14539 {
+
14540  VmaMutexLock mutexLock(m_Mutex);
+
14541  m_Allocator.Free(hAlloc);
+
14542 }
+
14543 
+
14545 // VmaAllocator_T
+
14546 
+
14547 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
+
14548  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
+
14549  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
+
14550  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
+
14551  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
+
14552  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
+
14553  m_hDevice(pCreateInfo->device),
+
14554  m_hInstance(pCreateInfo->instance),
+
14555  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
+
14556  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
+
14557  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
+
14558  m_AllocationObjectAllocator(&m_AllocationCallbacks),
+
14559  m_HeapSizeLimitMask(0),
+
14560  m_PreferredLargeHeapBlockSize(0),
+
14561  m_PhysicalDevice(pCreateInfo->physicalDevice),
+
14562  m_CurrentFrameIndex(0),
+
14563  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
+
14564  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
+
14565  m_NextPoolId(0)
+ +
14567  ,m_pRecorder(VMA_NULL)
+
14568 #endif
+
14569 {
+
14570  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14571  {
-
14572  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
-
14573  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
+
14572  m_UseKhrDedicatedAllocation = false;
+
14573  m_UseKhrBindMemory2 = false;
14574  }
14575 
-
14576  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
-
14577 
-
14578  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-
14579  {
-
14580 #if !(VMA_DEDICATED_ALLOCATION)
- -
14582  {
-
14583  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
-
14584  }
-
14585 #endif
-
14586 #if !(VMA_BIND_MEMORY2)
-
14587  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
+
14576  if(VMA_DEBUG_DETECT_CORRUPTION)
+
14577  {
+
14578  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
+
14579  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
+
14580  }
+
14581 
+
14582  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
+
14583 
+
14584  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+
14585  {
+
14586 #if !(VMA_DEDICATED_ALLOCATION)
+
14588  {
-
14589  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
+
14589  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14590  }
14591 #endif
-
14592  }
-
14593 #if !(VMA_MEMORY_BUDGET)
-
14594  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
-
14595  {
-
14596  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
-
14597  }
-
14598 #endif
-
14599 #if VMA_VULKAN_VERSION < 1001000
-
14600  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
14592 #if !(VMA_BIND_MEMORY2)
+
14593  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
+
14594  {
+
14595  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
+
14596  }
+
14597 #endif
+
14598  }
+
14599 #if !(VMA_MEMORY_BUDGET)
+
14600  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14601  {
-
14602  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
+
14602  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14603  }
14604 #endif
-
14605 
-
14606  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
-
14607  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
-
14608  memset(&m_MemProps, 0, sizeof(m_MemProps));
-
14609 
-
14610  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
-
14611  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
-
14612  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
-
14613 
-
14614  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
-
14615  {
-
14616  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
-
14617  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
-
14618  }
+
14605 #if VMA_VULKAN_VERSION < 1001000
+
14606  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
14607  {
+
14608  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
+
14609  }
+
14610 #endif
+
14611 
+
14612  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
+
14613  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
+
14614  memset(&m_MemProps, 0, sizeof(m_MemProps));
+
14615 
+
14616  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
+
14617  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
+
14618  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14619 
-
14620  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
-
14621 
-
14622  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
-
14623  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
-
14624 
-
14625  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
-
14626  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
-
14627  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
-
14628  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
-
14629 
-
14630  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
-
14631  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-
14632 
-
14633  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
-
14634  {
-
14635  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
14636  {
-
14637  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
-
14638  if(limit != VK_WHOLE_SIZE)
-
14639  {
-
14640  m_HeapSizeLimitMask |= 1u << heapIndex;
-
14641  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
-
14642  {
-
14643  m_MemProps.memoryHeaps[heapIndex].size = limit;
-
14644  }
-
14645  }
-
14646  }
-
14647  }
-
14648 
-
14649  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
14650  {
-
14651  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
-
14652 
-
14653  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
-
14654  this,
-
14655  VK_NULL_HANDLE, // hParentPool
-
14656  memTypeIndex,
-
14657  preferredBlockSize,
-
14658  0,
-
14659  SIZE_MAX,
-
14660  GetBufferImageGranularity(),
-
14661  pCreateInfo->frameInUseCount,
-
14662  false, // explicitBlockSize
-
14663  false); // linearAlgorithm
-
14664  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
-
14665  // becase minBlockCount is 0.
-
14666  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
-
14667 
-
14668  }
-
14669 }
-
14670 
-
14671 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
-
14672 {
-
14673  VkResult res = VK_SUCCESS;
-
14674 
-
14675  if(pCreateInfo->pRecordSettings != VMA_NULL &&
-
14676  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
-
14677  {
-
14678 #if VMA_RECORDING_ENABLED
-
14679  m_pRecorder = vma_new(this, VmaRecorder)();
-
14680  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
-
14681  if(res != VK_SUCCESS)
-
14682  {
-
14683  return res;
-
14684  }
-
14685  m_pRecorder->WriteConfiguration(
-
14686  m_PhysicalDeviceProperties,
-
14687  m_MemProps,
-
14688  m_VulkanApiVersion,
-
14689  m_UseKhrDedicatedAllocation,
-
14690  m_UseKhrBindMemory2,
-
14691  m_UseExtMemoryBudget);
-
14692  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
-
14693 #else
-
14694  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
-
14695  return VK_ERROR_FEATURE_NOT_PRESENT;
-
14696 #endif
-
14697  }
-
14698 
-
14699 #if VMA_MEMORY_BUDGET
-
14700  if(m_UseExtMemoryBudget)
-
14701  {
-
14702  UpdateVulkanBudget();
+
14620  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
+
14621  {
+
14622  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
+
14623  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
+
14624  }
+
14625 
+
14626  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
+
14627 
+
14628  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
+
14629  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+
14630 
+
14631  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
+
14632  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
+
14633  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
+
14634  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
+
14635 
+
14636  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
+
14637  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
14638 
+
14639  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
+
14640  {
+
14641  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
14642  {
+
14643  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
+
14644  if(limit != VK_WHOLE_SIZE)
+
14645  {
+
14646  m_HeapSizeLimitMask |= 1u << heapIndex;
+
14647  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
+
14648  {
+
14649  m_MemProps.memoryHeaps[heapIndex].size = limit;
+
14650  }
+
14651  }
+
14652  }
+
14653  }
+
14654 
+
14655  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
14656  {
+
14657  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+
14658 
+
14659  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
+
14660  this,
+
14661  VK_NULL_HANDLE, // hParentPool
+
14662  memTypeIndex,
+
14663  preferredBlockSize,
+
14664  0,
+
14665  SIZE_MAX,
+
14666  GetBufferImageGranularity(),
+
14667  pCreateInfo->frameInUseCount,
+
14668  false, // explicitBlockSize
+
14669  false); // linearAlgorithm
+
14670  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
+
14671  // becase minBlockCount is 0.
+
14672  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
+
14673 
+
14674  }
+
14675 }
+
14676 
+
14677 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
+
14678 {
+
14679  VkResult res = VK_SUCCESS;
+
14680 
+
14681  if(pCreateInfo->pRecordSettings != VMA_NULL &&
+
14682  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
+
14683  {
+
14684 #if VMA_RECORDING_ENABLED
+
14685  m_pRecorder = vma_new(this, VmaRecorder)();
+
14686  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
+
14687  if(res != VK_SUCCESS)
+
14688  {
+
14689  return res;
+
14690  }
+
14691  m_pRecorder->WriteConfiguration(
+
14692  m_PhysicalDeviceProperties,
+
14693  m_MemProps,
+
14694  m_VulkanApiVersion,
+
14695  m_UseKhrDedicatedAllocation,
+
14696  m_UseKhrBindMemory2,
+
14697  m_UseExtMemoryBudget);
+
14698  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
+
14699 #else
+
14700  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
+
14701  return VK_ERROR_FEATURE_NOT_PRESENT;
+
14702 #endif
14703  }
-
14704 #endif // #if VMA_MEMORY_BUDGET
-
14705 
-
14706  return res;
-
14707 }
-
14708 
-
14709 VmaAllocator_T::~VmaAllocator_T()
-
14710 {
-
14711 #if VMA_RECORDING_ENABLED
-
14712  if(m_pRecorder != VMA_NULL)
-
14713  {
-
14714  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
-
14715  vma_delete(this, m_pRecorder);
-
14716  }
-
14717 #endif
-
14718 
-
14719  VMA_ASSERT(m_Pools.empty());
-
14720 
-
14721  for(size_t i = GetMemoryTypeCount(); i--; )
-
14722  {
-
14723  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
-
14724  {
-
14725  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
-
14726  }
-
14727 
-
14728  vma_delete(this, m_pDedicatedAllocations[i]);
-
14729  vma_delete(this, m_pBlockVectors[i]);
-
14730  }
-
14731 }
-
14732 
-
14733 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
-
14734 {
-
14735 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
14736  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
-
14737  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
-
14738  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-
14739  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
-
14740  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
-
14741  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
-
14742  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
-
14743  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
-
14744  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
-
14745  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
-
14746  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
-
14747  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
-
14748  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
-
14749  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
-
14750  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
-
14751  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
-
14752  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
-
14753 #if VMA_VULKAN_VERSION >= 1001000
-
14754  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
14755  {
-
14756  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
-
14757  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
-
14758  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
-
14759  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
-
14760  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
-
14761  m_VulkanFunctions.vkBindBufferMemory2KHR =
-
14762  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
-
14763  m_VulkanFunctions.vkBindImageMemory2KHR =
-
14764  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
-
14765  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
-
14766  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
-
14767  }
-
14768 #endif
-
14769 #if VMA_DEDICATED_ALLOCATION
-
14770  if(m_UseKhrDedicatedAllocation)
-
14771  {
-
14772  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
-
14773  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
-
14774  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
-
14775  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
-
14776  }
-
14777 #endif
-
14778 #if VMA_BIND_MEMORY2
-
14779  if(m_UseKhrBindMemory2)
-
14780  {
-
14781  m_VulkanFunctions.vkBindBufferMemory2KHR =
-
14782  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
-
14783  m_VulkanFunctions.vkBindImageMemory2KHR =
-
14784  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
-
14785  }
-
14786 #endif // #if VMA_BIND_MEMORY2
-
14787 #if VMA_MEMORY_BUDGET
-
14788  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-
14789  {
-
14790  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
-
14791  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
-
14792  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
-
14793  }
-
14794 #endif // #if VMA_MEMORY_BUDGET
-
14795 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
14796 
-
14797 #define VMA_COPY_IF_NOT_NULL(funcName) \
-
14798  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
-
14799 
-
14800  if(pVulkanFunctions != VMA_NULL)
-
14801  {
-
14802  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
-
14803  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
-
14804  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
-
14805  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
-
14806  VMA_COPY_IF_NOT_NULL(vkMapMemory);
-
14807  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
-
14808  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
-
14809  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
-
14810  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
-
14811  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
-
14812  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
-
14813  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
-
14814  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
-
14815  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
-
14816  VMA_COPY_IF_NOT_NULL(vkCreateImage);
-
14817  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
-
14818  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
-
14819 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
14820  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
-
14821  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
-
14822 #endif
-
14823 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-
14824  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
-
14825  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
-
14826 #endif
-
14827 #if VMA_MEMORY_BUDGET
-
14828  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
-
14829 #endif
-
14830  }
-
14831 
-
14832 #undef VMA_COPY_IF_NOT_NULL
-
14833 
-
14834  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
-
14835  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
-
14836  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
-
14837  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
-
14838  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
-
14839  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
-
14840  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
-
14841  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
-
14842  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
-
14843  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
-
14844  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
-
14845  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
-
14846  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
-
14847  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
-
14848  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
-
14849  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
-
14850  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
-
14851  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
-
14852  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
-
14853 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
14854  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
-
14855  {
-
14856  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
-
14857  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
-
14858  }
-
14859 #endif
-
14860 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-
14861  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
-
14862  {
-
14863  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
-
14864  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
-
14865  }
-
14866 #endif
-
14867 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-
14868  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
14869  {
-
14870  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+
14704 
+
14705 #if VMA_MEMORY_BUDGET
+
14706  if(m_UseExtMemoryBudget)
+
14707  {
+
14708  UpdateVulkanBudget();
+
14709  }
+
14710 #endif // #if VMA_MEMORY_BUDGET
+
14711 
+
14712  return res;
+
14713 }
+
14714 
+
14715 VmaAllocator_T::~VmaAllocator_T()
+
14716 {
+
14717 #if VMA_RECORDING_ENABLED
+
14718  if(m_pRecorder != VMA_NULL)
+
14719  {
+
14720  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
+
14721  vma_delete(this, m_pRecorder);
+
14722  }
+
14723 #endif
+
14724 
+
14725  VMA_ASSERT(m_Pools.empty());
+
14726 
+
14727  for(size_t i = GetMemoryTypeCount(); i--; )
+
14728  {
+
14729  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
+
14730  {
+
14731  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
+
14732  }
+
14733 
+
14734  vma_delete(this, m_pDedicatedAllocations[i]);
+
14735  vma_delete(this, m_pBlockVectors[i]);
+
14736  }
+
14737 }
+
14738 
+
14739 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
+
14740 {
+
14741 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
14742  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
+
14743  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
+
14744  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+
14745  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
+
14746  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
+
14747  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
+
14748  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
+
14749  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
+
14750  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
+
14751  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
+
14752  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
+
14753  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
+
14754  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
+
14755  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
+
14756  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
+
14757  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
+
14758  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+
14759 #if VMA_VULKAN_VERSION >= 1001000
+
14760  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
14761  {
+
14762  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
+
14763  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
+
14764  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
+
14765  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
+
14766  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
+
14767  m_VulkanFunctions.vkBindBufferMemory2KHR =
+
14768  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
+
14769  m_VulkanFunctions.vkBindImageMemory2KHR =
+
14770  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
+
14771  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
+
14772  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
+
14773  }
+
14774 #endif
+
14775 #if VMA_DEDICATED_ALLOCATION
+
14776  if(m_UseKhrDedicatedAllocation)
+
14777  {
+
14778  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
+
14779  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
+
14780  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
+
14781  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
+
14782  }
+
14783 #endif
+
14784 #if VMA_BIND_MEMORY2
+
14785  if(m_UseKhrBindMemory2)
+
14786  {
+
14787  m_VulkanFunctions.vkBindBufferMemory2KHR =
+
14788  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
+
14789  m_VulkanFunctions.vkBindImageMemory2KHR =
+
14790  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
+
14791  }
+
14792 #endif // #if VMA_BIND_MEMORY2
+
14793 #if VMA_MEMORY_BUDGET
+
14794  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+
14795  {
+
14796  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
+
14797  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
+
14798  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
+
14799  }
+
14800 #endif // #if VMA_MEMORY_BUDGET
+
14801 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
14802 
+
14803 #define VMA_COPY_IF_NOT_NULL(funcName) \
+
14804  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
+
14805 
+
14806  if(pVulkanFunctions != VMA_NULL)
+
14807  {
+
14808  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+
14809  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+
14810  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+
14811  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+
14812  VMA_COPY_IF_NOT_NULL(vkMapMemory);
+
14813  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+
14814  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+
14815  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+
14816  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+
14817  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+
14818  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+
14819  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+
14820  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+
14821  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+
14822  VMA_COPY_IF_NOT_NULL(vkCreateImage);
+
14823  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+
14824  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+
14825 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
14826  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+
14827  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+
14828 #endif
+
14829 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+
14830  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
+
14831  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
+
14832 #endif
+
14833 #if VMA_MEMORY_BUDGET
+
14834  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
+
14835 #endif
+
14836  }
+
14837 
+
14838 #undef VMA_COPY_IF_NOT_NULL
+
14839 
+
14840  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
+
14841  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
+
14842  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
+
14843  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
+
14844  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
+
14845  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
+
14846  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
+
14847  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
+
14848  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
+
14849  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
+
14850  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
+
14851  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
+
14852  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
+
14853  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
+
14854  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
+
14855  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
+
14856  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
+
14857  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
+
14858  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+
14859 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
14860  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
+
14861  {
+
14862  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
+
14863  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
+
14864  }
+
14865 #endif
+
14866 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+
14867  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
+
14868  {
+
14869  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
+
14870  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14871  }
14872 #endif
-
14873 }
-
14874 
-
14875 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
-
14876 {
-
14877  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
14878  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-
14879  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
-
14880  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
-
14881 }
-
14882 
-
14883 VkResult VmaAllocator_T::AllocateMemoryOfType(
-
14884  VkDeviceSize size,
-
14885  VkDeviceSize alignment,
-
14886  bool dedicatedAllocation,
-
14887  VkBuffer dedicatedBuffer,
-
14888  VkImage dedicatedImage,
-
14889  const VmaAllocationCreateInfo& createInfo,
-
14890  uint32_t memTypeIndex,
-
14891  VmaSuballocationType suballocType,
-
14892  size_t allocationCount,
-
14893  VmaAllocation* pAllocations)
-
14894 {
-
14895  VMA_ASSERT(pAllocations != VMA_NULL);
-
14896  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
-
14897 
-
14898  VmaAllocationCreateInfo finalCreateInfo = createInfo;
-
14899 
-
14900  // If memory type is not HOST_VISIBLE, disable MAPPED.
-
14901  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-
14902  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
14903  {
-
14904  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
14905  }
-
14906  // If memory is lazily allocated, it should be always dedicated.
-
14907  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
-
14908  {
- -
14910  }
-
14911 
-
14912  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
-
14913  VMA_ASSERT(blockVector);
-
14914 
-
14915  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
-
14916  bool preferDedicatedMemory =
-
14917  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
-
14918  dedicatedAllocation ||
-
14919  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
-
14920  size > preferredBlockSize / 2;
-
14921 
-
14922  if(preferDedicatedMemory &&
-
14923  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
-
14924  finalCreateInfo.pool == VK_NULL_HANDLE)
-
14925  {
- -
14927  }
-
14928 
-
14929  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-
14930  {
-
14931  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
14932  {
-
14933  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
14934  }
-
14935  else
-
14936  {
-
14937  return AllocateDedicatedMemory(
-
14938  size,
-
14939  suballocType,
-
14940  memTypeIndex,
-
14941  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
-
14942  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-
14943  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-
14944  finalCreateInfo.pUserData,
-
14945  dedicatedBuffer,
-
14946  dedicatedImage,
-
14947  allocationCount,
-
14948  pAllocations);
-
14949  }
-
14950  }
-
14951  else
-
14952  {
-
14953  VkResult res = blockVector->Allocate(
-
14954  m_CurrentFrameIndex.load(),
-
14955  size,
-
14956  alignment,
-
14957  finalCreateInfo,
-
14958  suballocType,
-
14959  allocationCount,
-
14960  pAllocations);
-
14961  if(res == VK_SUCCESS)
-
14962  {
-
14963  return res;
-
14964  }
-
14965 
-
14966  // 5. Try dedicated memory.
-
14967  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
14873 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+
14874  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
14875  {
+
14876  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+
14877  }
+
14878 #endif
+
14879 }
+
14880 
+
14881 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
+
14882 {
+
14883  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
14884  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+
14885  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
+
14886  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
+
14887 }
+
14888 
+
14889 VkResult VmaAllocator_T::AllocateMemoryOfType(
+
14890  VkDeviceSize size,
+
14891  VkDeviceSize alignment,
+
14892  bool dedicatedAllocation,
+
14893  VkBuffer dedicatedBuffer,
+
14894  VkImage dedicatedImage,
+
14895  const VmaAllocationCreateInfo& createInfo,
+
14896  uint32_t memTypeIndex,
+
14897  VmaSuballocationType suballocType,
+
14898  size_t allocationCount,
+
14899  VmaAllocation* pAllocations)
+
14900 {
+
14901  VMA_ASSERT(pAllocations != VMA_NULL);
+
14902  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
+
14903 
+
14904  VmaAllocationCreateInfo finalCreateInfo = createInfo;
+
14905 
+
14906  // If memory type is not HOST_VISIBLE, disable MAPPED.
+
14907  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+
14908  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
14909  {
+
14910  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
14911  }
+
14912  // If memory is lazily allocated, it should be always dedicated.
+
14913  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
+
14914  {
+ +
14916  }
+
14917 
+
14918  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
+
14919  VMA_ASSERT(blockVector);
+
14920 
+
14921  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
+
14922  bool preferDedicatedMemory =
+
14923  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
+
14924  dedicatedAllocation ||
+
14925  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
+
14926  size > preferredBlockSize / 2;
+
14927 
+
14928  if(preferDedicatedMemory &&
+
14929  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
+
14930  finalCreateInfo.pool == VK_NULL_HANDLE)
+
14931  {
+ +
14933  }
+
14934 
+
14935  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
+
14936  {
+
14937  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
14938  {
+
14939  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
14940  }
+
14941  else
+
14942  {
+
14943  return AllocateDedicatedMemory(
+
14944  size,
+
14945  suballocType,
+
14946  memTypeIndex,
+
14947  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
+
14948  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+
14949  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+
14950  finalCreateInfo.pUserData,
+
14951  dedicatedBuffer,
+
14952  dedicatedImage,
+
14953  allocationCount,
+
14954  pAllocations);
+
14955  }
+
14956  }
+
14957  else
+
14958  {
+
14959  VkResult res = blockVector->Allocate(
+
14960  m_CurrentFrameIndex.load(),
+
14961  size,
+
14962  alignment,
+
14963  finalCreateInfo,
+
14964  suballocType,
+
14965  allocationCount,
+
14966  pAllocations);
+
14967  if(res == VK_SUCCESS)
14968  {
-
14969  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
14969  return res;
14970  }
-
14971  else
-
14972  {
-
14973  res = AllocateDedicatedMemory(
-
14974  size,
-
14975  suballocType,
-
14976  memTypeIndex,
-
14977  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
-
14978  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-
14979  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-
14980  finalCreateInfo.pUserData,
-
14981  dedicatedBuffer,
-
14982  dedicatedImage,
-
14983  allocationCount,
-
14984  pAllocations);
-
14985  if(res == VK_SUCCESS)
-
14986  {
-
14987  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
-
14988  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-
14989  return VK_SUCCESS;
-
14990  }
-
14991  else
+
14971 
+
14972  // 5. Try dedicated memory.
+
14973  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
14974  {
+
14975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
14976  }
+
14977  else
+
14978  {
+
14979  res = AllocateDedicatedMemory(
+
14980  size,
+
14981  suballocType,
+
14982  memTypeIndex,
+
14983  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
+
14984  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+
14985  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+
14986  finalCreateInfo.pUserData,
+
14987  dedicatedBuffer,
+
14988  dedicatedImage,
+
14989  allocationCount,
+
14990  pAllocations);
+
14991  if(res == VK_SUCCESS)
14992  {
-
14993  // Everything failed: Return error code.
-
14994  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-
14995  return res;
+
14993  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
+
14994  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+
14995  return VK_SUCCESS;
14996  }
-
14997  }
-
14998  }
-
14999 }
-
15000 
-
15001 VkResult VmaAllocator_T::AllocateDedicatedMemory(
-
15002  VkDeviceSize size,
-
15003  VmaSuballocationType suballocType,
-
15004  uint32_t memTypeIndex,
-
15005  bool withinBudget,
-
15006  bool map,
-
15007  bool isUserDataString,
-
15008  void* pUserData,
-
15009  VkBuffer dedicatedBuffer,
-
15010  VkImage dedicatedImage,
-
15011  size_t allocationCount,
-
15012  VmaAllocation* pAllocations)
-
15013 {
-
15014  VMA_ASSERT(allocationCount > 0 && pAllocations);
-
15015 
-
15016  if(withinBudget)
-
15017  {
-
15018  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
15019  VmaBudget heapBudget = {};
-
15020  GetBudget(&heapBudget, heapIndex, 1);
-
15021  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
-
15022  {
-
15023  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
15024  }
-
15025  }
-
15026 
-
15027  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-
15028  allocInfo.memoryTypeIndex = memTypeIndex;
-
15029  allocInfo.allocationSize = size;
-
15030 
-
15031 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15032  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
-
15033  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15034  {
-
15035  if(dedicatedBuffer != VK_NULL_HANDLE)
-
15036  {
-
15037  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
-
15038  dedicatedAllocInfo.buffer = dedicatedBuffer;
-
15039  allocInfo.pNext = &dedicatedAllocInfo;
-
15040  }
-
15041  else if(dedicatedImage != VK_NULL_HANDLE)
+
14997  else
+
14998  {
+
14999  // Everything failed: Return error code.
+
15000  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+
15001  return res;
+
15002  }
+
15003  }
+
15004  }
+
15005 }
+
15006 
+
15007 VkResult VmaAllocator_T::AllocateDedicatedMemory(
+
15008  VkDeviceSize size,
+
15009  VmaSuballocationType suballocType,
+
15010  uint32_t memTypeIndex,
+
15011  bool withinBudget,
+
15012  bool map,
+
15013  bool isUserDataString,
+
15014  void* pUserData,
+
15015  VkBuffer dedicatedBuffer,
+
15016  VkImage dedicatedImage,
+
15017  size_t allocationCount,
+
15018  VmaAllocation* pAllocations)
+
15019 {
+
15020  VMA_ASSERT(allocationCount > 0 && pAllocations);
+
15021 
+
15022  if(withinBudget)
+
15023  {
+
15024  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
15025  VmaBudget heapBudget = {};
+
15026  GetBudget(&heapBudget, heapIndex, 1);
+
15027  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
+
15028  {
+
15029  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15030  }
+
15031  }
+
15032 
+
15033  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+
15034  allocInfo.memoryTypeIndex = memTypeIndex;
+
15035  allocInfo.allocationSize = size;
+
15036 
+
15037 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15038  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+
15039  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15040  {
+
15041  if(dedicatedBuffer != VK_NULL_HANDLE)
15042  {
-
15043  dedicatedAllocInfo.image = dedicatedImage;
-
15044  allocInfo.pNext = &dedicatedAllocInfo;
-
15045  }
-
15046  }
-
15047 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15048 
-
15049  size_t allocIndex;
-
15050  VkResult res = VK_SUCCESS;
-
15051  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
15052  {
-
15053  res = AllocateDedicatedMemoryPage(
-
15054  size,
-
15055  suballocType,
-
15056  memTypeIndex,
-
15057  allocInfo,
-
15058  map,
-
15059  isUserDataString,
-
15060  pUserData,
-
15061  pAllocations + allocIndex);
-
15062  if(res != VK_SUCCESS)
-
15063  {
-
15064  break;
-
15065  }
-
15066  }
-
15067 
-
15068  if(res == VK_SUCCESS)
-
15069  {
-
15070  // Register them in m_pDedicatedAllocations.
-
15071  {
-
15072  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
15073  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-
15074  VMA_ASSERT(pDedicatedAllocations);
-
15075  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-
15076  {
-
15077  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
-
15078  }
-
15079  }
-
15080 
-
15081  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
-
15082  }
-
15083  else
-
15084  {
-
15085  // Free all already created allocations.
-
15086  while(allocIndex--)
-
15087  {
-
15088  VmaAllocation currAlloc = pAllocations[allocIndex];
-
15089  VkDeviceMemory hMemory = currAlloc->GetMemory();
-
15090 
-
15091  /*
-
15092  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-
15093  before vkFreeMemory.
-
15094 
-
15095  if(currAlloc->GetMappedData() != VMA_NULL)
-
15096  {
-
15097  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-
15098  }
-
15099  */
-
15100 
-
15101  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
-
15102  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
-
15103  currAlloc->SetUserData(this, VMA_NULL);
-
15104  currAlloc->Dtor();
-
15105  m_AllocationObjectAllocator.Free(currAlloc);
-
15106  }
-
15107 
-
15108  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
15109  }
-
15110 
-
15111  return res;
-
15112 }
+
15043  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
+
15044  dedicatedAllocInfo.buffer = dedicatedBuffer;
+
15045  allocInfo.pNext = &dedicatedAllocInfo;
+
15046  }
+
15047  else if(dedicatedImage != VK_NULL_HANDLE)
+
15048  {
+
15049  dedicatedAllocInfo.image = dedicatedImage;
+
15050  allocInfo.pNext = &dedicatedAllocInfo;
+
15051  }
+
15052  }
+
15053 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15054 
+
15055  size_t allocIndex;
+
15056  VkResult res = VK_SUCCESS;
+
15057  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
15058  {
+
15059  res = AllocateDedicatedMemoryPage(
+
15060  size,
+
15061  suballocType,
+
15062  memTypeIndex,
+
15063  allocInfo,
+
15064  map,
+
15065  isUserDataString,
+
15066  pUserData,
+
15067  pAllocations + allocIndex);
+
15068  if(res != VK_SUCCESS)
+
15069  {
+
15070  break;
+
15071  }
+
15072  }
+
15073 
+
15074  if(res == VK_SUCCESS)
+
15075  {
+
15076  // Register them in m_pDedicatedAllocations.
+
15077  {
+
15078  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
15079  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+
15080  VMA_ASSERT(pDedicatedAllocations);
+
15081  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+
15082  {
+
15083  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
+
15084  }
+
15085  }
+
15086 
+
15087  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
+
15088  }
+
15089  else
+
15090  {
+
15091  // Free all already created allocations.
+
15092  while(allocIndex--)
+
15093  {
+
15094  VmaAllocation currAlloc = pAllocations[allocIndex];
+
15095  VkDeviceMemory hMemory = currAlloc->GetMemory();
+
15096 
+
15097  /*
+
15098  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+
15099  before vkFreeMemory.
+
15100 
+
15101  if(currAlloc->GetMappedData() != VMA_NULL)
+
15102  {
+
15103  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+
15104  }
+
15105  */
+
15106 
+
15107  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
+
15108  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
+
15109  currAlloc->SetUserData(this, VMA_NULL);
+
15110  currAlloc->Dtor();
+
15111  m_AllocationObjectAllocator.Free(currAlloc);
+
15112  }
15113 
-
15114 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
-
15115  VkDeviceSize size,
-
15116  VmaSuballocationType suballocType,
-
15117  uint32_t memTypeIndex,
-
15118  const VkMemoryAllocateInfo& allocInfo,
-
15119  bool map,
-
15120  bool isUserDataString,
-
15121  void* pUserData,
-
15122  VmaAllocation* pAllocation)
-
15123 {
-
15124  VkDeviceMemory hMemory = VK_NULL_HANDLE;
-
15125  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
-
15126  if(res < 0)
-
15127  {
-
15128  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-
15129  return res;
-
15130  }
-
15131 
-
15132  void* pMappedData = VMA_NULL;
-
15133  if(map)
-
15134  {
-
15135  res = (*m_VulkanFunctions.vkMapMemory)(
-
15136  m_hDevice,
-
15137  hMemory,
-
15138  0,
-
15139  VK_WHOLE_SIZE,
-
15140  0,
-
15141  &pMappedData);
-
15142  if(res < 0)
-
15143  {
-
15144  VMA_DEBUG_LOG(" vkMapMemory FAILED");
-
15145  FreeVulkanMemory(memTypeIndex, size, hMemory);
-
15146  return res;
-
15147  }
-
15148  }
-
15149 
-
15150  *pAllocation = m_AllocationObjectAllocator.Allocate();
-
15151  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
-
15152  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
-
15153  (*pAllocation)->SetUserData(this, pUserData);
-
15154  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
-
15155  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
15156  {
-
15157  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-
15158  }
-
15159 
-
15160  return VK_SUCCESS;
-
15161 }
-
15162 
-
15163 void VmaAllocator_T::GetBufferMemoryRequirements(
-
15164  VkBuffer hBuffer,
-
15165  VkMemoryRequirements& memReq,
-
15166  bool& requiresDedicatedAllocation,
-
15167  bool& prefersDedicatedAllocation) const
-
15168 {
-
15169 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15170  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15171  {
-
15172  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
-
15173  memReqInfo.buffer = hBuffer;
-
15174 
-
15175  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
15176 
-
15177  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-
15178  memReq2.pNext = &memDedicatedReq;
-
15179 
-
15180  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
15181 
-
15182  memReq = memReq2.memoryRequirements;
-
15183  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-
15184  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-
15185  }
-
15186  else
-
15187 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15188  {
-
15189  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
-
15190  requiresDedicatedAllocation = false;
-
15191  prefersDedicatedAllocation = false;
-
15192  }
-
15193 }
-
15194 
-
15195 void VmaAllocator_T::GetImageMemoryRequirements(
-
15196  VkImage hImage,
-
15197  VkMemoryRequirements& memReq,
-
15198  bool& requiresDedicatedAllocation,
-
15199  bool& prefersDedicatedAllocation) const
-
15200 {
-
15201 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15202  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-
15203  {
-
15204  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
-
15205  memReqInfo.image = hImage;
-
15206 
-
15207  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-
15208 
-
15209  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-
15210  memReq2.pNext = &memDedicatedReq;
-
15211 
-
15212  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-
15213 
-
15214  memReq = memReq2.memoryRequirements;
-
15215  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-
15216  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-
15217  }
-
15218  else
-
15219 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-
15220  {
-
15221  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
-
15222  requiresDedicatedAllocation = false;
-
15223  prefersDedicatedAllocation = false;
-
15224  }
-
15225 }
-
15226 
-
15227 VkResult VmaAllocator_T::AllocateMemory(
-
15228  const VkMemoryRequirements& vkMemReq,
-
15229  bool requiresDedicatedAllocation,
-
15230  bool prefersDedicatedAllocation,
-
15231  VkBuffer dedicatedBuffer,
-
15232  VkImage dedicatedImage,
-
15233  const VmaAllocationCreateInfo& createInfo,
-
15234  VmaSuballocationType suballocType,
-
15235  size_t allocationCount,
-
15236  VmaAllocation* pAllocations)
-
15237 {
-
15238  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-
15239 
-
15240  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
-
15241 
-
15242  if(vkMemReq.size == 0)
-
15243  {
-
15244  return VK_ERROR_VALIDATION_FAILED_EXT;
-
15245  }
-
15246  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-
15247  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
15248  {
-
15249  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
-
15250  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15114  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
15115  }
+
15116 
+
15117  return res;
+
15118 }
+
15119 
+
15120 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
+
15121  VkDeviceSize size,
+
15122  VmaSuballocationType suballocType,
+
15123  uint32_t memTypeIndex,
+
15124  const VkMemoryAllocateInfo& allocInfo,
+
15125  bool map,
+
15126  bool isUserDataString,
+
15127  void* pUserData,
+
15128  VmaAllocation* pAllocation)
+
15129 {
+
15130  VkDeviceMemory hMemory = VK_NULL_HANDLE;
+
15131  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
+
15132  if(res < 0)
+
15133  {
+
15134  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+
15135  return res;
+
15136  }
+
15137 
+
15138  void* pMappedData = VMA_NULL;
+
15139  if(map)
+
15140  {
+
15141  res = (*m_VulkanFunctions.vkMapMemory)(
+
15142  m_hDevice,
+
15143  hMemory,
+
15144  0,
+
15145  VK_WHOLE_SIZE,
+
15146  0,
+
15147  &pMappedData);
+
15148  if(res < 0)
+
15149  {
+
15150  VMA_DEBUG_LOG(" vkMapMemory FAILED");
+
15151  FreeVulkanMemory(memTypeIndex, size, hMemory);
+
15152  return res;
+
15153  }
+
15154  }
+
15155 
+
15156  *pAllocation = m_AllocationObjectAllocator.Allocate();
+
15157  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
+
15158  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
+
15159  (*pAllocation)->SetUserData(this, pUserData);
+
15160  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
+
15161  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
15162  {
+
15163  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+
15164  }
+
15165 
+
15166  return VK_SUCCESS;
+
15167 }
+
15168 
+
15169 void VmaAllocator_T::GetBufferMemoryRequirements(
+
15170  VkBuffer hBuffer,
+
15171  VkMemoryRequirements& memReq,
+
15172  bool& requiresDedicatedAllocation,
+
15173  bool& prefersDedicatedAllocation) const
+
15174 {
+
15175 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15176  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15177  {
+
15178  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+
15179  memReqInfo.buffer = hBuffer;
+
15180 
+
15181  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
15182 
+
15183  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+
15184  memReq2.pNext = &memDedicatedReq;
+
15185 
+
15186  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
15187 
+
15188  memReq = memReq2.memoryRequirements;
+
15189  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+
15190  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
15191  }
+
15192  else
+
15193 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15194  {
+
15195  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
+
15196  requiresDedicatedAllocation = false;
+
15197  prefersDedicatedAllocation = false;
+
15198  }
+
15199 }
+
15200 
+
15201 void VmaAllocator_T::GetImageMemoryRequirements(
+
15202  VkImage hImage,
+
15203  VkMemoryRequirements& memReq,
+
15204  bool& requiresDedicatedAllocation,
+
15205  bool& prefersDedicatedAllocation) const
+
15206 {
+
15207 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15208  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+
15209  {
+
15210  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+
15211  memReqInfo.image = hImage;
+
15212 
+
15213  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
15214 
+
15215  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+
15216  memReq2.pNext = &memDedicatedReq;
+
15217 
+
15218  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
15219 
+
15220  memReq = memReq2.memoryRequirements;
+
15221  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+
15222  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+
15223  }
+
15224  else
+
15225 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+
15226  {
+
15227  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
+
15228  requiresDedicatedAllocation = false;
+
15229  prefersDedicatedAllocation = false;
+
15230  }
+
15231 }
+
15232 
+
15233 VkResult VmaAllocator_T::AllocateMemory(
+
15234  const VkMemoryRequirements& vkMemReq,
+
15235  bool requiresDedicatedAllocation,
+
15236  bool prefersDedicatedAllocation,
+
15237  VkBuffer dedicatedBuffer,
+
15238  VkImage dedicatedImage,
+
15239  const VmaAllocationCreateInfo& createInfo,
+
15240  VmaSuballocationType suballocType,
+
15241  size_t allocationCount,
+
15242  VmaAllocation* pAllocations)
+
15243 {
+
15244  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
15245 
+
15246  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
+
15247 
+
15248  if(vkMemReq.size == 0)
+
15249  {
+
15250  return VK_ERROR_VALIDATION_FAILED_EXT;
15251  }
-
15252  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
- +
15252  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
+
15253  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15254  {
-
15255  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
+
15255  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15256  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15257  }
-
15258  if(requiresDedicatedAllocation)
-
15259  {
-
15260  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-
15261  {
-
15262  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
-
15263  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
15264  }
-
15265  if(createInfo.pool != VK_NULL_HANDLE)
-
15266  {
-
15267  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
-
15268  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
15269  }
-
15270  }
-
15271  if((createInfo.pool != VK_NULL_HANDLE) &&
-
15272  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
-
15273  {
-
15274  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
-
15275  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15258  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+ +
15260  {
+
15261  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
+
15262  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15263  }
+
15264  if(requiresDedicatedAllocation)
+
15265  {
+
15266  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+
15267  {
+
15268  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
+
15269  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15270  }
+
15271  if(createInfo.pool != VK_NULL_HANDLE)
+
15272  {
+
15273  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
+
15274  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15275  }
15276  }
-
15277 
-
15278  if(createInfo.pool != VK_NULL_HANDLE)
+
15277  if((createInfo.pool != VK_NULL_HANDLE) &&
+
15278  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15279  {
-
15280  const VkDeviceSize alignmentForPool = VMA_MAX(
-
15281  vkMemReq.alignment,
-
15282  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
+
15280  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
+
15281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15282  }
15283 
-
15284  VmaAllocationCreateInfo createInfoForPool = createInfo;
-
15285  // If memory type is not HOST_VISIBLE, disable MAPPED.
-
15286  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-
15287  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
15288  {
-
15289  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-
15290  }
-
15291 
-
15292  return createInfo.pool->m_BlockVector.Allocate(
-
15293  m_CurrentFrameIndex.load(),
-
15294  vkMemReq.size,
-
15295  alignmentForPool,
-
15296  createInfoForPool,
-
15297  suballocType,
-
15298  allocationCount,
-
15299  pAllocations);
-
15300  }
-
15301  else
-
15302  {
-
15303  // Bit mask of memory Vulkan types acceptable for this allocation.
-
15304  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
-
15305  uint32_t memTypeIndex = UINT32_MAX;
-
15306  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
-
15307  if(res == VK_SUCCESS)
-
15308  {
-
15309  VkDeviceSize alignmentForMemType = VMA_MAX(
-
15310  vkMemReq.alignment,
-
15311  GetMemoryTypeMinAlignment(memTypeIndex));
-
15312 
-
15313  res = AllocateMemoryOfType(
-
15314  vkMemReq.size,
-
15315  alignmentForMemType,
-
15316  requiresDedicatedAllocation || prefersDedicatedAllocation,
-
15317  dedicatedBuffer,
-
15318  dedicatedImage,
-
15319  createInfo,
-
15320  memTypeIndex,
-
15321  suballocType,
-
15322  allocationCount,
-
15323  pAllocations);
-
15324  // Succeeded on first try.
-
15325  if(res == VK_SUCCESS)
-
15326  {
-
15327  return res;
-
15328  }
-
15329  // Allocation from this memory type failed. Try other compatible memory types.
-
15330  else
-
15331  {
-
15332  for(;;)
-
15333  {
-
15334  // Remove old memTypeIndex from list of possibilities.
-
15335  memoryTypeBits &= ~(1u << memTypeIndex);
-
15336  // Find alternative memTypeIndex.
-
15337  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
-
15338  if(res == VK_SUCCESS)
-
15339  {
-
15340  alignmentForMemType = VMA_MAX(
-
15341  vkMemReq.alignment,
-
15342  GetMemoryTypeMinAlignment(memTypeIndex));
-
15343 
-
15344  res = AllocateMemoryOfType(
-
15345  vkMemReq.size,
-
15346  alignmentForMemType,
-
15347  requiresDedicatedAllocation || prefersDedicatedAllocation,
-
15348  dedicatedBuffer,
-
15349  dedicatedImage,
-
15350  createInfo,
-
15351  memTypeIndex,
-
15352  suballocType,
-
15353  allocationCount,
-
15354  pAllocations);
-
15355  // Allocation from this alternative memory type succeeded.
-
15356  if(res == VK_SUCCESS)
-
15357  {
-
15358  return res;
-
15359  }
-
15360  // else: Allocation from this memory type failed. Try next one - next loop iteration.
-
15361  }
-
15362  // No other matching memory type index could be found.
-
15363  else
-
15364  {
-
15365  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
-
15366  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15284  if(createInfo.pool != VK_NULL_HANDLE)
+
15285  {
+
15286  const VkDeviceSize alignmentForPool = VMA_MAX(
+
15287  vkMemReq.alignment,
+
15288  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
+
15289 
+
15290  VmaAllocationCreateInfo createInfoForPool = createInfo;
+
15291  // If memory type is not HOST_VISIBLE, disable MAPPED.
+
15292  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+
15293  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
15294  {
+
15295  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
15296  }
+
15297 
+
15298  return createInfo.pool->m_BlockVector.Allocate(
+
15299  m_CurrentFrameIndex.load(),
+
15300  vkMemReq.size,
+
15301  alignmentForPool,
+
15302  createInfoForPool,
+
15303  suballocType,
+
15304  allocationCount,
+
15305  pAllocations);
+
15306  }
+
15307  else
+
15308  {
+
15309  // Bit mask of memory Vulkan types acceptable for this allocation.
+
15310  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+
15311  uint32_t memTypeIndex = UINT32_MAX;
+
15312  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+
15313  if(res == VK_SUCCESS)
+
15314  {
+
15315  VkDeviceSize alignmentForMemType = VMA_MAX(
+
15316  vkMemReq.alignment,
+
15317  GetMemoryTypeMinAlignment(memTypeIndex));
+
15318 
+
15319  res = AllocateMemoryOfType(
+
15320  vkMemReq.size,
+
15321  alignmentForMemType,
+
15322  requiresDedicatedAllocation || prefersDedicatedAllocation,
+
15323  dedicatedBuffer,
+
15324  dedicatedImage,
+
15325  createInfo,
+
15326  memTypeIndex,
+
15327  suballocType,
+
15328  allocationCount,
+
15329  pAllocations);
+
15330  // Succeeded on first try.
+
15331  if(res == VK_SUCCESS)
+
15332  {
+
15333  return res;
+
15334  }
+
15335  // Allocation from this memory type failed. Try other compatible memory types.
+
15336  else
+
15337  {
+
15338  for(;;)
+
15339  {
+
15340  // Remove old memTypeIndex from list of possibilities.
+
15341  memoryTypeBits &= ~(1u << memTypeIndex);
+
15342  // Find alternative memTypeIndex.
+
15343  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+
15344  if(res == VK_SUCCESS)
+
15345  {
+
15346  alignmentForMemType = VMA_MAX(
+
15347  vkMemReq.alignment,
+
15348  GetMemoryTypeMinAlignment(memTypeIndex));
+
15349 
+
15350  res = AllocateMemoryOfType(
+
15351  vkMemReq.size,
+
15352  alignmentForMemType,
+
15353  requiresDedicatedAllocation || prefersDedicatedAllocation,
+
15354  dedicatedBuffer,
+
15355  dedicatedImage,
+
15356  createInfo,
+
15357  memTypeIndex,
+
15358  suballocType,
+
15359  allocationCount,
+
15360  pAllocations);
+
15361  // Allocation from this alternative memory type succeeded.
+
15362  if(res == VK_SUCCESS)
+
15363  {
+
15364  return res;
+
15365  }
+
15366  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15367  }
-
15368  }
-
15369  }
-
15370  }
-
15371  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
-
15372  else
-
15373  return res;
-
15374  }
-
15375 }
-
15376 
-
15377 void VmaAllocator_T::FreeMemory(
-
15378  size_t allocationCount,
-
15379  const VmaAllocation* pAllocations)
-
15380 {
-
15381  VMA_ASSERT(pAllocations);
+
15368  // No other matching memory type index could be found.
+
15369  else
+
15370  {
+
15371  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
+
15372  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15373  }
+
15374  }
+
15375  }
+
15376  }
+
15377  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
+
15378  else
+
15379  return res;
+
15380  }
+
15381 }
15382 
-
15383  for(size_t allocIndex = allocationCount; allocIndex--; )
-
15384  {
-
15385  VmaAllocation allocation = pAllocations[allocIndex];
-
15386 
-
15387  if(allocation != VK_NULL_HANDLE)
-
15388  {
-
15389  if(TouchAllocation(allocation))
-
15390  {
-
15391  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-
15392  {
-
15393  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
-
15394  }
-
15395 
-
15396  switch(allocation->GetType())
-
15397  {
-
15398  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
15399  {
-
15400  VmaBlockVector* pBlockVector = VMA_NULL;
-
15401  VmaPool hPool = allocation->GetBlock()->GetParentPool();
-
15402  if(hPool != VK_NULL_HANDLE)
-
15403  {
-
15404  pBlockVector = &hPool->m_BlockVector;
-
15405  }
-
15406  else
-
15407  {
-
15408  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
15409  pBlockVector = m_pBlockVectors[memTypeIndex];
-
15410  }
-
15411  pBlockVector->Free(allocation);
-
15412  }
-
15413  break;
-
15414  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
15415  FreeDedicatedMemory(allocation);
-
15416  break;
-
15417  default:
-
15418  VMA_ASSERT(0);
-
15419  }
-
15420  }
-
15421 
-
15422  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
-
15423  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
-
15424  allocation->SetUserData(this, VMA_NULL);
-
15425  allocation->Dtor();
-
15426  m_AllocationObjectAllocator.Free(allocation);
-
15427  }
-
15428  }
-
15429 }
-
15430 
-
15431 VkResult VmaAllocator_T::ResizeAllocation(
-
15432  const VmaAllocation alloc,
-
15433  VkDeviceSize newSize)
-
15434 {
-
15435  // This function is deprecated and so it does nothing. It's left for backward compatibility.
-
15436  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
-
15437  {
-
15438  return VK_ERROR_VALIDATION_FAILED_EXT;
-
15439  }
-
15440  if(newSize == alloc->GetSize())
-
15441  {
-
15442  return VK_SUCCESS;
-
15443  }
-
15444  return VK_ERROR_OUT_OF_POOL_MEMORY;
-
15445 }
-
15446 
-
15447 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
-
15448 {
-
15449  // Initialize.
-
15450  InitStatInfo(pStats->total);
-
15451  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
-
15452  InitStatInfo(pStats->memoryType[i]);
-
15453  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
-
15454  InitStatInfo(pStats->memoryHeap[i]);
-
15455 
-
15456  // Process default pools.
-
15457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
15458  {
-
15459  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-
15460  VMA_ASSERT(pBlockVector);
-
15461  pBlockVector->AddStats(pStats);
-
15462  }
-
15463 
-
15464  // Process custom pools.
-
15465  {
-
15466  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
15467  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
15468  {
-
15469  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
-
15470  }
-
15471  }
-
15472 
-
15473  // Process dedicated allocations.
-
15474  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
15475  {
-
15476  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-
15477  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
15478  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-
15479  VMA_ASSERT(pDedicatedAllocVector);
-
15480  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
-
15481  {
-
15482  VmaStatInfo allocationStatInfo;
-
15483  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
-
15484  VmaAddStatInfo(pStats->total, allocationStatInfo);
-
15485  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
-
15486  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
-
15487  }
-
15488  }
-
15489 
-
15490  // Postprocess.
-
15491  VmaPostprocessCalcStatInfo(pStats->total);
-
15492  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
-
15493  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
-
15494  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
-
15495  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
-
15496 }
-
15497 
-
15498 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
-
15499 {
-
15500 #if VMA_MEMORY_BUDGET
-
15501  if(m_UseExtMemoryBudget)
-
15502  {
-
15503  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
-
15504  {
-
15505  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
-
15506  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
15507  {
-
15508  const uint32_t heapIndex = firstHeap + i;
-
15509 
-
15510  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
15511  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-
15512 
-
15513  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
-
15514  {
-
15515  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
-
15516  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-
15517  }
-
15518  else
-
15519  {
-
15520  outBudget->usage = 0;
-
15521  }
-
15522 
-
15523  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
-
15524  outBudget->budget = VMA_MIN(
-
15525  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
-
15526  }
-
15527  }
-
15528  else
-
15529  {
-
15530  UpdateVulkanBudget(); // Outside of mutex lock
-
15531  GetBudget(outBudget, firstHeap, heapCount); // Recursion
-
15532  }
-
15533  }
-
15534  else
-
15535 #endif
-
15536  {
-
15537  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
-
15538  {
-
15539  const uint32_t heapIndex = firstHeap + i;
-
15540 
-
15541  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
15542  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-
15543 
-
15544  outBudget->usage = outBudget->blockBytes;
-
15545  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-
15546  }
-
15547  }
-
15548 }
+
15383 void VmaAllocator_T::FreeMemory(
+
15384  size_t allocationCount,
+
15385  const VmaAllocation* pAllocations)
+
15386 {
+
15387  VMA_ASSERT(pAllocations);
+
15388 
+
15389  for(size_t allocIndex = allocationCount; allocIndex--; )
+
15390  {
+
15391  VmaAllocation allocation = pAllocations[allocIndex];
+
15392 
+
15393  if(allocation != VK_NULL_HANDLE)
+
15394  {
+
15395  if(TouchAllocation(allocation))
+
15396  {
+
15397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+
15398  {
+
15399  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
+
15400  }
+
15401 
+
15402  switch(allocation->GetType())
+
15403  {
+
15404  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
15405  {
+
15406  VmaBlockVector* pBlockVector = VMA_NULL;
+
15407  VmaPool hPool = allocation->GetBlock()->GetParentPool();
+
15408  if(hPool != VK_NULL_HANDLE)
+
15409  {
+
15410  pBlockVector = &hPool->m_BlockVector;
+
15411  }
+
15412  else
+
15413  {
+
15414  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
15415  pBlockVector = m_pBlockVectors[memTypeIndex];
+
15416  }
+
15417  pBlockVector->Free(allocation);
+
15418  }
+
15419  break;
+
15420  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
15421  FreeDedicatedMemory(allocation);
+
15422  break;
+
15423  default:
+
15424  VMA_ASSERT(0);
+
15425  }
+
15426  }
+
15427 
+
15428  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
+
15429  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
+
15430  allocation->SetUserData(this, VMA_NULL);
+
15431  allocation->Dtor();
+
15432  m_AllocationObjectAllocator.Free(allocation);
+
15433  }
+
15434  }
+
15435 }
+
15436 
+
15437 VkResult VmaAllocator_T::ResizeAllocation(
+
15438  const VmaAllocation alloc,
+
15439  VkDeviceSize newSize)
+
15440 {
+
15441  // This function is deprecated and so it does nothing. It's left for backward compatibility.
+
15442  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
+
15443  {
+
15444  return VK_ERROR_VALIDATION_FAILED_EXT;
+
15445  }
+
15446  if(newSize == alloc->GetSize())
+
15447  {
+
15448  return VK_SUCCESS;
+
15449  }
+
15450  return VK_ERROR_OUT_OF_POOL_MEMORY;
+
15451 }
+
15452 
+
15453 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
+
15454 {
+
15455  // Initialize.
+
15456  InitStatInfo(pStats->total);
+
15457  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
+
15458  InitStatInfo(pStats->memoryType[i]);
+
15459  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
+
15460  InitStatInfo(pStats->memoryHeap[i]);
+
15461 
+
15462  // Process default pools.
+
15463  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
15464  {
+
15465  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+
15466  VMA_ASSERT(pBlockVector);
+
15467  pBlockVector->AddStats(pStats);
+
15468  }
+
15469 
+
15470  // Process custom pools.
+
15471  {
+
15472  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
15473  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+
15474  {
+
15475  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
+
15476  }
+
15477  }
+
15478 
+
15479  // Process dedicated allocations.
+
15480  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
15481  {
+
15482  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+
15483  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
15484  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+
15485  VMA_ASSERT(pDedicatedAllocVector);
+
15486  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
+
15487  {
+
15488  VmaStatInfo allocationStatInfo;
+
15489  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
+
15490  VmaAddStatInfo(pStats->total, allocationStatInfo);
+
15491  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+
15492  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+
15493  }
+
15494  }
+
15495 
+
15496  // Postprocess.
+
15497  VmaPostprocessCalcStatInfo(pStats->total);
+
15498  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
+
15499  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
+
15500  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
+
15501  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
+
15502 }
+
15503 
+
15504 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
+
15505 {
+
15506 #if VMA_MEMORY_BUDGET
+
15507  if(m_UseExtMemoryBudget)
+
15508  {
+
15509  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
+
15510  {
+
15511  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
+
15512  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
15513  {
+
15514  const uint32_t heapIndex = firstHeap + i;
+
15515 
+
15516  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
15517  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
+
15518 
+
15519  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
+
15520  {
+
15521  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
+
15522  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+
15523  }
+
15524  else
+
15525  {
+
15526  outBudget->usage = 0;
+
15527  }
+
15528 
+
15529  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
+
15530  outBudget->budget = VMA_MIN(
+
15531  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
+
15532  }
+
15533  }
+
15534  else
+
15535  {
+
15536  UpdateVulkanBudget(); // Outside of mutex lock
+
15537  GetBudget(outBudget, firstHeap, heapCount); // Recursion
+
15538  }
+
15539  }
+
15540  else
+
15541 #endif
+
15542  {
+
15543  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
+
15544  {
+
15545  const uint32_t heapIndex = firstHeap + i;
+
15546 
+
15547  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
15548  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15549 
-
15550 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
-
15551 
-
15552 VkResult VmaAllocator_T::DefragmentationBegin(
-
15553  const VmaDefragmentationInfo2& info,
-
15554  VmaDefragmentationStats* pStats,
-
15555  VmaDefragmentationContext* pContext)
-
15556 {
-
15557  if(info.pAllocationsChanged != VMA_NULL)
-
15558  {
-
15559  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
-
15560  }
-
15561 
-
15562  *pContext = vma_new(this, VmaDefragmentationContext_T)(
-
15563  this, m_CurrentFrameIndex.load(), info.flags, pStats);
-
15564 
-
15565  (*pContext)->AddPools(info.poolCount, info.pPools);
-
15566  (*pContext)->AddAllocations(
- -
15568 
-
15569  VkResult res = (*pContext)->Defragment(
- - -
15572  info.commandBuffer, pStats);
-
15573 
-
15574  if(res != VK_NOT_READY)
-
15575  {
-
15576  vma_delete(this, *pContext);
-
15577  *pContext = VMA_NULL;
-
15578  }
+
15550  outBudget->usage = outBudget->blockBytes;
+
15551  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+
15552  }
+
15553  }
+
15554 }
+
15555 
+
15556 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+
15557 
+
15558 VkResult VmaAllocator_T::DefragmentationBegin(
+
15559  const VmaDefragmentationInfo2& info,
+
15560  VmaDefragmentationStats* pStats,
+
15561  VmaDefragmentationContext* pContext)
+
15562 {
+
15563  if(info.pAllocationsChanged != VMA_NULL)
+
15564  {
+
15565  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
+
15566  }
+
15567 
+
15568  *pContext = vma_new(this, VmaDefragmentationContext_T)(
+
15569  this, m_CurrentFrameIndex.load(), info.flags, pStats);
+
15570 
+
15571  (*pContext)->AddPools(info.poolCount, info.pPools);
+
15572  (*pContext)->AddAllocations(
+ +
15574 
+
15575  VkResult res = (*pContext)->Defragment(
+ + +
15578  info.commandBuffer, pStats);
15579 
-
15580  return res;
-
15581 }
-
15582 
-
15583 VkResult VmaAllocator_T::DefragmentationEnd(
-
15584  VmaDefragmentationContext context)
-
15585 {
-
15586  vma_delete(this, context);
-
15587  return VK_SUCCESS;
-
15588 }
-
15589 
-
15590 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
+
15580  if(res != VK_NOT_READY)
+
15581  {
+
15582  vma_delete(this, *pContext);
+
15583  *pContext = VMA_NULL;
+
15584  }
+
15585 
+
15586  return res;
+
15587 }
+
15588 
+
15589 VkResult VmaAllocator_T::DefragmentationEnd(
+
15590  VmaDefragmentationContext context)
15591 {
-
15592  if(hAllocation->CanBecomeLost())
-
15593  {
-
15594  /*
-
15595  Warning: This is a carefully designed algorithm.
-
15596  Do not modify unless you really know what you're doing :)
-
15597  */
-
15598  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
15599  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
15600  for(;;)
-
15601  {
-
15602  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
15603  {
-
15604  pAllocationInfo->memoryType = UINT32_MAX;
-
15605  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
-
15606  pAllocationInfo->offset = 0;
-
15607  pAllocationInfo->size = hAllocation->GetSize();
-
15608  pAllocationInfo->pMappedData = VMA_NULL;
-
15609  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
15610  return;
-
15611  }
-
15612  else if(localLastUseFrameIndex == localCurrFrameIndex)
-
15613  {
-
15614  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-
15615  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-
15616  pAllocationInfo->offset = hAllocation->GetOffset();
-
15617  pAllocationInfo->size = hAllocation->GetSize();
-
15618  pAllocationInfo->pMappedData = VMA_NULL;
-
15619  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
15620  return;
-
15621  }
-
15622  else // Last use time earlier than current time.
-
15623  {
-
15624  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
15625  {
-
15626  localLastUseFrameIndex = localCurrFrameIndex;
-
15627  }
-
15628  }
-
15629  }
-
15630  }
-
15631  else
-
15632  {
-
15633 #if VMA_STATS_STRING_ENABLED
-
15634  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
15635  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
15636  for(;;)
-
15637  {
-
15638  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-
15639  if(localLastUseFrameIndex == localCurrFrameIndex)
-
15640  {
-
15641  break;
-
15642  }
-
15643  else // Last use time earlier than current time.
-
15644  {
-
15645  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
15646  {
-
15647  localLastUseFrameIndex = localCurrFrameIndex;
-
15648  }
-
15649  }
-
15650  }
-
15651 #endif
-
15652 
-
15653  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-
15654  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-
15655  pAllocationInfo->offset = hAllocation->GetOffset();
-
15656  pAllocationInfo->size = hAllocation->GetSize();
-
15657  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
-
15658  pAllocationInfo->pUserData = hAllocation->GetUserData();
-
15659  }
-
15660 }
-
15661 
-
15662 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
-
15663 {
-
15664  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
-
15665  if(hAllocation->CanBecomeLost())
-
15666  {
-
15667  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
15668  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
15669  for(;;)
-
15670  {
-
15671  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
-
15672  {
-
15673  return false;
-
15674  }
-
15675  else if(localLastUseFrameIndex == localCurrFrameIndex)
-
15676  {
-
15677  return true;
-
15678  }
-
15679  else // Last use time earlier than current time.
-
15680  {
-
15681  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
15682  {
-
15683  localLastUseFrameIndex = localCurrFrameIndex;
-
15684  }
-
15685  }
-
15686  }
-
15687  }
-
15688  else
-
15689  {
-
15690 #if VMA_STATS_STRING_ENABLED
-
15691  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
-
15692  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
-
15693  for(;;)
-
15694  {
-
15695  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
-
15696  if(localLastUseFrameIndex == localCurrFrameIndex)
-
15697  {
-
15698  break;
-
15699  }
-
15700  else // Last use time earlier than current time.
-
15701  {
-
15702  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
-
15703  {
-
15704  localLastUseFrameIndex = localCurrFrameIndex;
-
15705  }
-
15706  }
-
15707  }
-
15708 #endif
-
15709 
-
15710  return true;
-
15711  }
-
15712 }
-
15713 
-
15714 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
-
15715 {
-
15716  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
-
15717 
-
15718  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+
15592  vma_delete(this, context);
+
15593  return VK_SUCCESS;
+
15594 }
+
15595 
+
15596 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
+
15597 {
+
15598  if(hAllocation->CanBecomeLost())
+
15599  {
+
15600  /*
+
15601  Warning: This is a carefully designed algorithm.
+
15602  Do not modify unless you really know what you're doing :)
+
15603  */
+
15604  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
15605  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
15606  for(;;)
+
15607  {
+
15608  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
15609  {
+
15610  pAllocationInfo->memoryType = UINT32_MAX;
+
15611  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
+
15612  pAllocationInfo->offset = 0;
+
15613  pAllocationInfo->size = hAllocation->GetSize();
+
15614  pAllocationInfo->pMappedData = VMA_NULL;
+
15615  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
15616  return;
+
15617  }
+
15618  else if(localLastUseFrameIndex == localCurrFrameIndex)
+
15619  {
+
15620  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+
15621  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+
15622  pAllocationInfo->offset = hAllocation->GetOffset();
+
15623  pAllocationInfo->size = hAllocation->GetSize();
+
15624  pAllocationInfo->pMappedData = VMA_NULL;
+
15625  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
15626  return;
+
15627  }
+
15628  else // Last use time earlier than current time.
+
15629  {
+
15630  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
15631  {
+
15632  localLastUseFrameIndex = localCurrFrameIndex;
+
15633  }
+
15634  }
+
15635  }
+
15636  }
+
15637  else
+
15638  {
+
15639 #if VMA_STATS_STRING_ENABLED
+
15640  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
15641  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
15642  for(;;)
+
15643  {
+
15644  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+
15645  if(localLastUseFrameIndex == localCurrFrameIndex)
+
15646  {
+
15647  break;
+
15648  }
+
15649  else // Last use time earlier than current time.
+
15650  {
+
15651  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
15652  {
+
15653  localLastUseFrameIndex = localCurrFrameIndex;
+
15654  }
+
15655  }
+
15656  }
+
15657 #endif
+
15658 
+
15659  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+
15660  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+
15661  pAllocationInfo->offset = hAllocation->GetOffset();
+
15662  pAllocationInfo->size = hAllocation->GetSize();
+
15663  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
+
15664  pAllocationInfo->pUserData = hAllocation->GetUserData();
+
15665  }
+
15666 }
+
15667 
+
15668 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
+
15669 {
+
15670  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
+
15671  if(hAllocation->CanBecomeLost())
+
15672  {
+
15673  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
15674  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
15675  for(;;)
+
15676  {
+
15677  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
+
15678  {
+
15679  return false;
+
15680  }
+
15681  else if(localLastUseFrameIndex == localCurrFrameIndex)
+
15682  {
+
15683  return true;
+
15684  }
+
15685  else // Last use time earlier than current time.
+
15686  {
+
15687  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
15688  {
+
15689  localLastUseFrameIndex = localCurrFrameIndex;
+
15690  }
+
15691  }
+
15692  }
+
15693  }
+
15694  else
+
15695  {
+
15696 #if VMA_STATS_STRING_ENABLED
+
15697  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+
15698  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+
15699  for(;;)
+
15700  {
+
15701  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+
15702  if(localLastUseFrameIndex == localCurrFrameIndex)
+
15703  {
+
15704  break;
+
15705  }
+
15706  else // Last use time earlier than current time.
+
15707  {
+
15708  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
+
15709  {
+
15710  localLastUseFrameIndex = localCurrFrameIndex;
+
15711  }
+
15712  }
+
15713  }
+
15714 #endif
+
15715 
+
15716  return true;
+
15717  }
+
15718 }
15719 
-
15720  if(newCreateInfo.maxBlockCount == 0)
-
15721  {
-
15722  newCreateInfo.maxBlockCount = SIZE_MAX;
-
15723  }
-
15724  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
-
15725  {
-
15726  return VK_ERROR_INITIALIZATION_FAILED;
-
15727  }
-
15728 
-
15729  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
-
15730 
-
15731  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
-
15732 
-
15733  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
-
15734  if(res != VK_SUCCESS)
-
15735  {
-
15736  vma_delete(this, *pPool);
-
15737  *pPool = VMA_NULL;
-
15738  return res;
-
15739  }
-
15740 
-
15741  // Add to m_Pools.
-
15742  {
-
15743  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-
15744  (*pPool)->SetId(m_NextPoolId++);
-
15745  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
-
15746  }
-
15747 
-
15748  return VK_SUCCESS;
-
15749 }
-
15750 
-
15751 void VmaAllocator_T::DestroyPool(VmaPool pool)
-
15752 {
-
15753  // Remove from m_Pools.
-
15754  {
-
15755  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-
15756  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
-
15757  VMA_ASSERT(success && "Pool not found in Allocator.");
-
15758  }
-
15759 
-
15760  vma_delete(this, pool);
-
15761 }
-
15762 
-
15763 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
-
15764 {
-
15765  pool->m_BlockVector.GetPoolStats(pPoolStats);
-
15766 }
-
15767 
-
15768 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
-
15769 {
-
15770  m_CurrentFrameIndex.store(frameIndex);
-
15771 
-
15772 #if VMA_MEMORY_BUDGET
-
15773  if(m_UseExtMemoryBudget)
-
15774  {
-
15775  UpdateVulkanBudget();
-
15776  }
-
15777 #endif // #if VMA_MEMORY_BUDGET
-
15778 }
-
15779 
-
15780 void VmaAllocator_T::MakePoolAllocationsLost(
-
15781  VmaPool hPool,
-
15782  size_t* pLostAllocationCount)
-
15783 {
-
15784  hPool->m_BlockVector.MakePoolAllocationsLost(
-
15785  m_CurrentFrameIndex.load(),
-
15786  pLostAllocationCount);
-
15787 }
-
15788 
-
15789 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
-
15790 {
-
15791  return hPool->m_BlockVector.CheckCorruption();
-
15792 }
-
15793 
-
15794 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
-
15795 {
-
15796  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
-
15797 
-
15798  // Process default pools.
-
15799  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
15800  {
-
15801  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
-
15802  {
-
15803  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-
15804  VMA_ASSERT(pBlockVector);
-
15805  VkResult localRes = pBlockVector->CheckCorruption();
-
15806  switch(localRes)
-
15807  {
-
15808  case VK_ERROR_FEATURE_NOT_PRESENT:
-
15809  break;
-
15810  case VK_SUCCESS:
-
15811  finalRes = VK_SUCCESS;
-
15812  break;
-
15813  default:
-
15814  return localRes;
-
15815  }
-
15816  }
-
15817  }
-
15818 
-
15819  // Process custom pools.
-
15820  {
-
15821  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
15822  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
-
15823  {
-
15824  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
-
15825  {
-
15826  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
-
15827  switch(localRes)
-
15828  {
-
15829  case VK_ERROR_FEATURE_NOT_PRESENT:
-
15830  break;
-
15831  case VK_SUCCESS:
-
15832  finalRes = VK_SUCCESS;
-
15833  break;
-
15834  default:
-
15835  return localRes;
-
15836  }
-
15837  }
-
15838  }
-
15839  }
-
15840 
-
15841  return finalRes;
-
15842 }
-
15843 
-
15844 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
-
15845 {
-
15846  *pAllocation = m_AllocationObjectAllocator.Allocate();
-
15847  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
-
15848  (*pAllocation)->InitLost();
-
15849 }
-
15850 
-
15851 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
-
15852 {
-
15853  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
-
15854 
-
15855  // HeapSizeLimit is in effect for this heap.
-
15856  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
-
15857  {
-
15858  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-
15859  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
-
15860  for(;;)
-
15861  {
-
15862  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
-
15863  if(blockBytesAfterAllocation > heapSize)
-
15864  {
-
15865  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
15866  }
-
15867  if(m_Budget.m_BlockBytes->compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
-
15868  {
-
15869  break;
-
15870  }
-
15871  }
-
15872  }
-
15873  else
-
15874  {
-
15875  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
-
15876  }
-
15877 
-
15878  // VULKAN CALL vkAllocateMemory.
-
15879  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
-
15880 
-
15881  if(res == VK_SUCCESS)
-
15882  {
-
15883 #if VMA_MEMORY_BUDGET
-
15884  ++m_Budget.m_OperationsSinceBudgetFetch;
-
15885 #endif
+
15720 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
+
15721 {
+
15722  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
+
15723 
+
15724  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+
15725 
+
15726  if(newCreateInfo.maxBlockCount == 0)
+
15727  {
+
15728  newCreateInfo.maxBlockCount = SIZE_MAX;
+
15729  }
+
15730  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
+
15731  {
+
15732  return VK_ERROR_INITIALIZATION_FAILED;
+
15733  }
+
15734 
+
15735  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
+
15736 
+
15737  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+
15738 
+
15739  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
+
15740  if(res != VK_SUCCESS)
+
15741  {
+
15742  vma_delete(this, *pPool);
+
15743  *pPool = VMA_NULL;
+
15744  return res;
+
15745  }
+
15746 
+
15747  // Add to m_Pools.
+
15748  {
+
15749  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+
15750  (*pPool)->SetId(m_NextPoolId++);
+
15751  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
+
15752  }
+
15753 
+
15754  return VK_SUCCESS;
+
15755 }
+
15756 
+
15757 void VmaAllocator_T::DestroyPool(VmaPool pool)
+
15758 {
+
15759  // Remove from m_Pools.
+
15760  {
+
15761  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+
15762  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
+
15763  VMA_ASSERT(success && "Pool not found in Allocator.");
+
15764  }
+
15765 
+
15766  vma_delete(this, pool);
+
15767 }
+
15768 
+
15769 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
+
15770 {
+
15771  pool->m_BlockVector.GetPoolStats(pPoolStats);
+
15772 }
+
15773 
+
15774 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
+
15775 {
+
15776  m_CurrentFrameIndex.store(frameIndex);
+
15777 
+
15778 #if VMA_MEMORY_BUDGET
+
15779  if(m_UseExtMemoryBudget)
+
15780  {
+
15781  UpdateVulkanBudget();
+
15782  }
+
15783 #endif // #if VMA_MEMORY_BUDGET
+
15784 }
+
15785 
+
15786 void VmaAllocator_T::MakePoolAllocationsLost(
+
15787  VmaPool hPool,
+
15788  size_t* pLostAllocationCount)
+
15789 {
+
15790  hPool->m_BlockVector.MakePoolAllocationsLost(
+
15791  m_CurrentFrameIndex.load(),
+
15792  pLostAllocationCount);
+
15793 }
+
15794 
+
15795 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
+
15796 {
+
15797  return hPool->m_BlockVector.CheckCorruption();
+
15798 }
+
15799 
+
15800 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
+
15801 {
+
15802  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
+
15803 
+
15804  // Process default pools.
+
15805  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
15806  {
+
15807  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
+
15808  {
+
15809  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+
15810  VMA_ASSERT(pBlockVector);
+
15811  VkResult localRes = pBlockVector->CheckCorruption();
+
15812  switch(localRes)
+
15813  {
+
15814  case VK_ERROR_FEATURE_NOT_PRESENT:
+
15815  break;
+
15816  case VK_SUCCESS:
+
15817  finalRes = VK_SUCCESS;
+
15818  break;
+
15819  default:
+
15820  return localRes;
+
15821  }
+
15822  }
+
15823  }
+
15824 
+
15825  // Process custom pools.
+
15826  {
+
15827  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
15828  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+
15829  {
+
15830  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+
15831  {
+
15832  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
+
15833  switch(localRes)
+
15834  {
+
15835  case VK_ERROR_FEATURE_NOT_PRESENT:
+
15836  break;
+
15837  case VK_SUCCESS:
+
15838  finalRes = VK_SUCCESS;
+
15839  break;
+
15840  default:
+
15841  return localRes;
+
15842  }
+
15843  }
+
15844  }
+
15845  }
+
15846 
+
15847  return finalRes;
+
15848 }
+
15849 
+
15850 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
+
15851 {
+
15852  *pAllocation = m_AllocationObjectAllocator.Allocate();
+
15853  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
+
15854  (*pAllocation)->InitLost();
+
15855 }
+
15856 
+
15857 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
+
15858 {
+
15859  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
+
15860 
+
15861  // HeapSizeLimit is in effect for this heap.
+
15862  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
+
15863  {
+
15864  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+
15865  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
+
15866  for(;;)
+
15867  {
+
15868  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
+
15869  if(blockBytesAfterAllocation > heapSize)
+
15870  {
+
15871  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
15872  }
+
15873  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
+
15874  {
+
15875  break;
+
15876  }
+
15877  }
+
15878  }
+
15879  else
+
15880  {
+
15881  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
+
15882  }
+
15883 
+
15884  // VULKAN CALL vkAllocateMemory.
+
15885  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15886 
-
15887  // Informative callback.
-
15888  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
-
15889  {
-
15890  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
-
15891  }
-
15892  }
-
15893  else
-
15894  {
-
15895  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
-
15896  }
-
15897 
-
15898  return res;
-
15899 }
-
15900 
-
15901 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
-
15902 {
-
15903  // Informative callback.
-
15904  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
-
15905  {
-
15906  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
-
15907  }
-
15908 
-
15909  // VULKAN CALL vkFreeMemory.
-
15910  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
-
15911 
-
15912  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
-
15913 }
+
15887  if(res == VK_SUCCESS)
+
15888  {
+
15889 #if VMA_MEMORY_BUDGET
+
15890  ++m_Budget.m_OperationsSinceBudgetFetch;
+
15891 #endif
+
15892 
+
15893  // Informative callback.
+
15894  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
+
15895  {
+
15896  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
+
15897  }
+
15898  }
+
15899  else
+
15900  {
+
15901  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
+
15902  }
+
15903 
+
15904  return res;
+
15905 }
+
15906 
+
15907 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
+
15908 {
+
15909  // Informative callback.
+
15910  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
+
15911  {
+
15912  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
+
15913  }
15914 
-
15915 VkResult VmaAllocator_T::BindVulkanBuffer(
-
15916  VkDeviceMemory memory,
-
15917  VkDeviceSize memoryOffset,
-
15918  VkBuffer buffer,
-
15919  const void* pNext)
-
15920 {
-
15921  if(pNext != VMA_NULL)
-
15922  {
-
15923 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
15924  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-
15925  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
-
15926  {
-
15927  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
-
15928  bindBufferMemoryInfo.pNext = pNext;
-
15929  bindBufferMemoryInfo.buffer = buffer;
-
15930  bindBufferMemoryInfo.memory = memory;
-
15931  bindBufferMemoryInfo.memoryOffset = memoryOffset;
-
15932  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
15933  }
-
15934  else
-
15935 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
15936  {
-
15937  return VK_ERROR_EXTENSION_NOT_PRESENT;
-
15938  }
-
15939  }
-
15940  else
-
15941  {
-
15942  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
-
15943  }
-
15944 }
-
15945 
-
15946 VkResult VmaAllocator_T::BindVulkanImage(
-
15947  VkDeviceMemory memory,
-
15948  VkDeviceSize memoryOffset,
-
15949  VkImage image,
-
15950  const void* pNext)
-
15951 {
-
15952  if(pNext != VMA_NULL)
-
15953  {
-
15954 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-
15955  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-
15956  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
-
15957  {
-
15958  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
-
15959  bindBufferMemoryInfo.pNext = pNext;
-
15960  bindBufferMemoryInfo.image = image;
-
15961  bindBufferMemoryInfo.memory = memory;
-
15962  bindBufferMemoryInfo.memoryOffset = memoryOffset;
-
15963  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-
15964  }
-
15965  else
-
15966 #endif // #if VMA_BIND_MEMORY2
-
15967  {
-
15968  return VK_ERROR_EXTENSION_NOT_PRESENT;
-
15969  }
-
15970  }
-
15971  else
-
15972  {
-
15973  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
-
15974  }
-
15975 }
-
15976 
-
15977 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
-
15978 {
-
15979  if(hAllocation->CanBecomeLost())
-
15980  {
-
15981  return VK_ERROR_MEMORY_MAP_FAILED;
-
15982  }
-
15983 
-
15984  switch(hAllocation->GetType())
-
15985  {
-
15986  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
15987  {
-
15988  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
15989  char *pBytes = VMA_NULL;
-
15990  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
-
15991  if(res == VK_SUCCESS)
-
15992  {
-
15993  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
-
15994  hAllocation->BlockAllocMap();
-
15995  }
-
15996  return res;
-
15997  }
-
15998  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
15999  return hAllocation->DedicatedAllocMap(this, ppData);
-
16000  default:
-
16001  VMA_ASSERT(0);
-
16002  return VK_ERROR_MEMORY_MAP_FAILED;
-
16003  }
-
16004 }
-
16005 
-
16006 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
-
16007 {
-
16008  switch(hAllocation->GetType())
-
16009  {
-
16010  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
16011  {
-
16012  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
16013  hAllocation->BlockAllocUnmap();
-
16014  pBlock->Unmap(this, 1);
-
16015  }
-
16016  break;
-
16017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
16018  hAllocation->DedicatedAllocUnmap(this);
-
16019  break;
-
16020  default:
-
16021  VMA_ASSERT(0);
-
16022  }
-
16023 }
-
16024 
-
16025 VkResult VmaAllocator_T::BindBufferMemory(
-
16026  VmaAllocation hAllocation,
-
16027  VkDeviceSize allocationLocalOffset,
-
16028  VkBuffer hBuffer,
-
16029  const void* pNext)
-
16030 {
-
16031  VkResult res = VK_SUCCESS;
-
16032  switch(hAllocation->GetType())
-
16033  {
-
16034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
16035  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
-
16036  break;
-
16037  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
16038  {
-
16039  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-
16040  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
-
16041  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
+
15915  // VULKAN CALL vkFreeMemory.
+
15916  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+
15917 
+
15918  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
+
15919 }
+
15920 
+
15921 VkResult VmaAllocator_T::BindVulkanBuffer(
+
15922  VkDeviceMemory memory,
+
15923  VkDeviceSize memoryOffset,
+
15924  VkBuffer buffer,
+
15925  const void* pNext)
+
15926 {
+
15927  if(pNext != VMA_NULL)
+
15928  {
+
15929 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
15930  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+
15931  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
+
15932  {
+
15933  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
+
15934  bindBufferMemoryInfo.pNext = pNext;
+
15935  bindBufferMemoryInfo.buffer = buffer;
+
15936  bindBufferMemoryInfo.memory = memory;
+
15937  bindBufferMemoryInfo.memoryOffset = memoryOffset;
+
15938  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
15939  }
+
15940  else
+
15941 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
15942  {
+
15943  return VK_ERROR_EXTENSION_NOT_PRESENT;
+
15944  }
+
15945  }
+
15946  else
+
15947  {
+
15948  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
+
15949  }
+
15950 }
+
15951 
+
15952 VkResult VmaAllocator_T::BindVulkanImage(
+
15953  VkDeviceMemory memory,
+
15954  VkDeviceSize memoryOffset,
+
15955  VkImage image,
+
15956  const void* pNext)
+
15957 {
+
15958  if(pNext != VMA_NULL)
+
15959  {
+
15960 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+
15961  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+
15962  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
+
15963  {
+
15964  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
+
15965  bindBufferMemoryInfo.pNext = pNext;
+
15966  bindBufferMemoryInfo.image = image;
+
15967  bindBufferMemoryInfo.memory = memory;
+
15968  bindBufferMemoryInfo.memoryOffset = memoryOffset;
+
15969  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+
15970  }
+
15971  else
+
15972 #endif // #if VMA_BIND_MEMORY2
+
15973  {
+
15974  return VK_ERROR_EXTENSION_NOT_PRESENT;
+
15975  }
+
15976  }
+
15977  else
+
15978  {
+
15979  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
+
15980  }
+
15981 }
+
15982 
+
15983 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
+
15984 {
+
15985  if(hAllocation->CanBecomeLost())
+
15986  {
+
15987  return VK_ERROR_MEMORY_MAP_FAILED;
+
15988  }
+
15989 
+
15990  switch(hAllocation->GetType())
+
15991  {
+
15992  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
15993  {
+
15994  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
15995  char *pBytes = VMA_NULL;
+
15996  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
+
15997  if(res == VK_SUCCESS)
+
15998  {
+
15999  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
+
16000  hAllocation->BlockAllocMap();
+
16001  }
+
16002  return res;
+
16003  }
+
16004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16005  return hAllocation->DedicatedAllocMap(this, ppData);
+
16006  default:
+
16007  VMA_ASSERT(0);
+
16008  return VK_ERROR_MEMORY_MAP_FAILED;
+
16009  }
+
16010 }
+
16011 
+
16012 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
+
16013 {
+
16014  switch(hAllocation->GetType())
+
16015  {
+
16016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
16017  {
+
16018  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
16019  hAllocation->BlockAllocUnmap();
+
16020  pBlock->Unmap(this, 1);
+
16021  }
+
16022  break;
+
16023  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16024  hAllocation->DedicatedAllocUnmap(this);
+
16025  break;
+
16026  default:
+
16027  VMA_ASSERT(0);
+
16028  }
+
16029 }
+
16030 
+
16031 VkResult VmaAllocator_T::BindBufferMemory(
+
16032  VmaAllocation hAllocation,
+
16033  VkDeviceSize allocationLocalOffset,
+
16034  VkBuffer hBuffer,
+
16035  const void* pNext)
+
16036 {
+
16037  VkResult res = VK_SUCCESS;
+
16038  switch(hAllocation->GetType())
+
16039  {
+
16040  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16041  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16042  break;
-
16043  }
-
16044  default:
-
16045  VMA_ASSERT(0);
-
16046  }
-
16047  return res;
-
16048 }
-
16049 
-
16050 VkResult VmaAllocator_T::BindImageMemory(
-
16051  VmaAllocation hAllocation,
-
16052  VkDeviceSize allocationLocalOffset,
-
16053  VkImage hImage,
-
16054  const void* pNext)
-
16055 {
-
16056  VkResult res = VK_SUCCESS;
-
16057  switch(hAllocation->GetType())
-
16058  {
-
16059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
16060  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
-
16061  break;
-
16062  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
16063  {
-
16064  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-
16065  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
-
16066  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
+
16043  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
16044  {
+
16045  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+
16046  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
+
16047  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
+
16048  break;
+
16049  }
+
16050  default:
+
16051  VMA_ASSERT(0);
+
16052  }
+
16053  return res;
+
16054 }
+
16055 
+
16056 VkResult VmaAllocator_T::BindImageMemory(
+
16057  VmaAllocation hAllocation,
+
16058  VkDeviceSize allocationLocalOffset,
+
16059  VkImage hImage,
+
16060  const void* pNext)
+
16061 {
+
16062  VkResult res = VK_SUCCESS;
+
16063  switch(hAllocation->GetType())
+
16064  {
+
16065  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16066  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16067  break;
-
16068  }
-
16069  default:
-
16070  VMA_ASSERT(0);
-
16071  }
-
16072  return res;
-
16073 }
-
16074 
-
16075 void VmaAllocator_T::FlushOrInvalidateAllocation(
-
16076  VmaAllocation hAllocation,
-
16077  VkDeviceSize offset, VkDeviceSize size,
-
16078  VMA_CACHE_OPERATION op)
-
16079 {
-
16080  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
-
16081  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
-
16082  {
-
16083  const VkDeviceSize allocationSize = hAllocation->GetSize();
-
16084  VMA_ASSERT(offset <= allocationSize);
-
16085 
-
16086  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-
16087 
-
16088  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
-
16089  memRange.memory = hAllocation->GetMemory();
-
16090 
-
16091  switch(hAllocation->GetType())
-
16092  {
-
16093  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-
16094  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-
16095  if(size == VK_WHOLE_SIZE)
-
16096  {
-
16097  memRange.size = allocationSize - memRange.offset;
-
16098  }
-
16099  else
-
16100  {
-
16101  VMA_ASSERT(offset + size <= allocationSize);
-
16102  memRange.size = VMA_MIN(
-
16103  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
-
16104  allocationSize - memRange.offset);
-
16105  }
-
16106  break;
-
16107 
-
16108  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-
16109  {
-
16110  // 1. Still within this allocation.
-
16111  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-
16112  if(size == VK_WHOLE_SIZE)
-
16113  {
-
16114  size = allocationSize - offset;
-
16115  }
-
16116  else
-
16117  {
-
16118  VMA_ASSERT(offset + size <= allocationSize);
-
16119  }
-
16120  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
-
16121 
-
16122  // 2. Adjust to whole block.
-
16123  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
-
16124  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
-
16125  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
-
16126  memRange.offset += allocationOffset;
-
16127  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
-
16128 
-
16129  break;
-
16130  }
-
16131 
-
16132  default:
-
16133  VMA_ASSERT(0);
-
16134  }
-
16135 
-
16136  switch(op)
-
16137  {
-
16138  case VMA_CACHE_FLUSH:
-
16139  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
16140  break;
-
16141  case VMA_CACHE_INVALIDATE:
-
16142  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
-
16143  break;
-
16144  default:
-
16145  VMA_ASSERT(0);
-
16146  }
-
16147  }
-
16148  // else: Just ignore this call.
-
16149 }
-
16150 
-
16151 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
-
16152 {
-
16153  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-
16154 
-
16155  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-
16156  {
-
16157  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
16158  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
-
16159  VMA_ASSERT(pDedicatedAllocations);
-
16160  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
-
16161  VMA_ASSERT(success);
-
16162  }
-
16163 
-
16164  VkDeviceMemory hMemory = allocation->GetMemory();
-
16165 
-
16166  /*
-
16167  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-
16168  before vkFreeMemory.
-
16169 
-
16170  if(allocation->GetMappedData() != VMA_NULL)
-
16171  {
-
16172  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-
16173  }
-
16174  */
-
16175 
-
16176  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
-
16177 
-
16178  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
-
16179 }
-
16180 
-
16181 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
-
16182 {
-
16183  VkBufferCreateInfo dummyBufCreateInfo;
-
16184  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
-
16185 
-
16186  uint32_t memoryTypeBits = 0;
-
16187 
-
16188  // Create buffer.
-
16189  VkBuffer buf = VK_NULL_HANDLE;
-
16190  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
-
16191  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
-
16192  if(res == VK_SUCCESS)
-
16193  {
-
16194  // Query for supported memory types.
-
16195  VkMemoryRequirements memReq;
-
16196  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
-
16197  memoryTypeBits = memReq.memoryTypeBits;
-
16198 
-
16199  // Destroy buffer.
-
16200  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
-
16201  }
-
16202 
-
16203  return memoryTypeBits;
-
16204 }
-
16205 
-
16206 #if VMA_MEMORY_BUDGET
-
16207 
-
16208 void VmaAllocator_T::UpdateVulkanBudget()
-
16209 {
-
16210  VMA_ASSERT(m_UseExtMemoryBudget);
+
16068  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
16069  {
+
16070  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+
16071  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
+
16072  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
+
16073  break;
+
16074  }
+
16075  default:
+
16076  VMA_ASSERT(0);
+
16077  }
+
16078  return res;
+
16079 }
+
16080 
+
16081 void VmaAllocator_T::FlushOrInvalidateAllocation(
+
16082  VmaAllocation hAllocation,
+
16083  VkDeviceSize offset, VkDeviceSize size,
+
16084  VMA_CACHE_OPERATION op)
+
16085 {
+
16086  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
+
16087  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
+
16088  {
+
16089  const VkDeviceSize allocationSize = hAllocation->GetSize();
+
16090  VMA_ASSERT(offset <= allocationSize);
+
16091 
+
16092  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+
16093 
+
16094  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+
16095  memRange.memory = hAllocation->GetMemory();
+
16096 
+
16097  switch(hAllocation->GetType())
+
16098  {
+
16099  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+
16100  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+
16101  if(size == VK_WHOLE_SIZE)
+
16102  {
+
16103  memRange.size = allocationSize - memRange.offset;
+
16104  }
+
16105  else
+
16106  {
+
16107  VMA_ASSERT(offset + size <= allocationSize);
+
16108  memRange.size = VMA_MIN(
+
16109  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
+
16110  allocationSize - memRange.offset);
+
16111  }
+
16112  break;
+
16113 
+
16114  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+
16115  {
+
16116  // 1. Still within this allocation.
+
16117  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+
16118  if(size == VK_WHOLE_SIZE)
+
16119  {
+
16120  size = allocationSize - offset;
+
16121  }
+
16122  else
+
16123  {
+
16124  VMA_ASSERT(offset + size <= allocationSize);
+
16125  }
+
16126  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
+
16127 
+
16128  // 2. Adjust to whole block.
+
16129  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
+
16130  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+
16131  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
+
16132  memRange.offset += allocationOffset;
+
16133  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
+
16134 
+
16135  break;
+
16136  }
+
16137 
+
16138  default:
+
16139  VMA_ASSERT(0);
+
16140  }
+
16141 
+
16142  switch(op)
+
16143  {
+
16144  case VMA_CACHE_FLUSH:
+
16145  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
16146  break;
+
16147  case VMA_CACHE_INVALIDATE:
+
16148  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+
16149  break;
+
16150  default:
+
16151  VMA_ASSERT(0);
+
16152  }
+
16153  }
+
16154  // else: Just ignore this call.
+
16155 }
+
16156 
+
16157 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
+
16158 {
+
16159  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+
16160 
+
16161  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+
16162  {
+
16163  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
16164  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+
16165  VMA_ASSERT(pDedicatedAllocations);
+
16166  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
+
16167  VMA_ASSERT(success);
+
16168  }
+
16169 
+
16170  VkDeviceMemory hMemory = allocation->GetMemory();
+
16171 
+
16172  /*
+
16173  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+
16174  before vkFreeMemory.
+
16175 
+
16176  if(allocation->GetMappedData() != VMA_NULL)
+
16177  {
+
16178  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+
16179  }
+
16180  */
+
16181 
+
16182  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
+
16183 
+
16184  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
+
16185 }
+
16186 
+
16187 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
+
16188 {
+
16189  VkBufferCreateInfo dummyBufCreateInfo;
+
16190  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
+
16191 
+
16192  uint32_t memoryTypeBits = 0;
+
16193 
+
16194  // Create buffer.
+
16195  VkBuffer buf = VK_NULL_HANDLE;
+
16196  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
+
16197  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
+
16198  if(res == VK_SUCCESS)
+
16199  {
+
16200  // Query for supported memory types.
+
16201  VkMemoryRequirements memReq;
+
16202  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
+
16203  memoryTypeBits = memReq.memoryTypeBits;
+
16204 
+
16205  // Destroy buffer.
+
16206  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
+
16207  }
+
16208 
+
16209  return memoryTypeBits;
+
16210 }
16211 
-
16212  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+
16212 #if VMA_MEMORY_BUDGET
16213 
-
16214  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
-
16215  memProps.pNext = &budgetProps;
-
16216 
-
16217  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
-
16218 
-
16219  {
-
16220  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
-
16221 
-
16222  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-
16223  {
-
16224  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
-
16225  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
-
16226  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
-
16227  }
-
16228  m_Budget.m_OperationsSinceBudgetFetch = 0;
-
16229  }
-
16230 }
-
16231 
-
16232 #endif // #if VMA_MEMORY_BUDGET
-
16233 
-
16234 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
-
16235 {
-
16236  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
-
16237  !hAllocation->CanBecomeLost() &&
-
16238  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
16239  {
-
16240  void* pData = VMA_NULL;
-
16241  VkResult res = Map(hAllocation, &pData);
-
16242  if(res == VK_SUCCESS)
-
16243  {
-
16244  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
-
16245  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
-
16246  Unmap(hAllocation);
-
16247  }
-
16248  else
+
16214 void VmaAllocator_T::UpdateVulkanBudget()
+
16215 {
+
16216  VMA_ASSERT(m_UseExtMemoryBudget);
+
16217 
+
16218  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+
16219 
+
16220  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
+
16221  memProps.pNext = &budgetProps;
+
16222 
+
16223  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
+
16224 
+
16225  {
+
16226  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
+
16227 
+
16228  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+
16229  {
+
16230  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
+
16231  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
+
16232  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
+
16233  }
+
16234  m_Budget.m_OperationsSinceBudgetFetch = 0;
+
16235  }
+
16236 }
+
16237 
+
16238 #endif // #if VMA_MEMORY_BUDGET
+
16239 
+
16240 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
+
16241 {
+
16242  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
+
16243  !hAllocation->CanBecomeLost() &&
+
16244  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
16245  {
+
16246  void* pData = VMA_NULL;
+
16247  VkResult res = Map(hAllocation, &pData);
+
16248  if(res == VK_SUCCESS)
16249  {
-
16250  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
-
16251  }
-
16252  }
-
16253 }
-
16254 
-
16255 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
-
16256 {
-
16257  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
-
16258  if(memoryTypeBits == UINT32_MAX)
-
16259  {
-
16260  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
-
16261  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
-
16262  }
-
16263  return memoryTypeBits;
-
16264 }
-
16265 
-
16266 #if VMA_STATS_STRING_ENABLED
-
16267 
-
16268 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
-
16269 {
-
16270  bool dedicatedAllocationsStarted = false;
-
16271  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
16272  {
-
16273  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
-
16274  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
-
16275  VMA_ASSERT(pDedicatedAllocVector);
-
16276  if(pDedicatedAllocVector->empty() == false)
-
16277  {
-
16278  if(dedicatedAllocationsStarted == false)
-
16279  {
-
16280  dedicatedAllocationsStarted = true;
-
16281  json.WriteString("DedicatedAllocations");
-
16282  json.BeginObject();
-
16283  }
-
16284 
-
16285  json.BeginString("Type ");
-
16286  json.ContinueString(memTypeIndex);
-
16287  json.EndString();
-
16288 
-
16289  json.BeginArray();
+
16250  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
+
16251  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
+
16252  Unmap(hAllocation);
+
16253  }
+
16254  else
+
16255  {
+
16256  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
+
16257  }
+
16258  }
+
16259 }
+
16260 
+
16261 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
+
16262 {
+
16263  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
+
16264  if(memoryTypeBits == UINT32_MAX)
+
16265  {
+
16266  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
+
16267  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
+
16268  }
+
16269  return memoryTypeBits;
+
16270 }
+
16271 
+
16272 #if VMA_STATS_STRING_ENABLED
+
16273 
+
16274 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
+
16275 {
+
16276  bool dedicatedAllocationsStarted = false;
+
16277  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
16278  {
+
16279  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+
16280  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+
16281  VMA_ASSERT(pDedicatedAllocVector);
+
16282  if(pDedicatedAllocVector->empty() == false)
+
16283  {
+
16284  if(dedicatedAllocationsStarted == false)
+
16285  {
+
16286  dedicatedAllocationsStarted = true;
+
16287  json.WriteString("DedicatedAllocations");
+
16288  json.BeginObject();
+
16289  }
16290 
-
16291  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
-
16292  {
-
16293  json.BeginObject(true);
-
16294  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
-
16295  hAlloc->PrintParameters(json);
-
16296  json.EndObject();
-
16297  }
-
16298 
-
16299  json.EndArray();
-
16300  }
-
16301  }
-
16302  if(dedicatedAllocationsStarted)
-
16303  {
-
16304  json.EndObject();
-
16305  }
-
16306 
-
16307  {
-
16308  bool allocationsStarted = false;
-
16309  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-
16310  {
-
16311  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
-
16312  {
-
16313  if(allocationsStarted == false)
-
16314  {
-
16315  allocationsStarted = true;
-
16316  json.WriteString("DefaultPools");
-
16317  json.BeginObject();
-
16318  }
-
16319 
-
16320  json.BeginString("Type ");
-
16321  json.ContinueString(memTypeIndex);
-
16322  json.EndString();
-
16323 
-
16324  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
-
16325  }
-
16326  }
-
16327  if(allocationsStarted)
-
16328  {
-
16329  json.EndObject();
-
16330  }
-
16331  }
-
16332 
-
16333  // Custom pools
-
16334  {
-
16335  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-
16336  const size_t poolCount = m_Pools.size();
-
16337  if(poolCount > 0)
-
16338  {
-
16339  json.WriteString("Pools");
-
16340  json.BeginObject();
-
16341  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
-
16342  {
-
16343  json.BeginString();
-
16344  json.ContinueString(m_Pools[poolIndex]->GetId());
-
16345  json.EndString();
-
16346 
-
16347  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
-
16348  }
-
16349  json.EndObject();
-
16350  }
-
16351  }
-
16352 }
-
16353 
-
16354 #endif // #if VMA_STATS_STRING_ENABLED
-
16355 
-
16357 // Public interface
-
16358 
-
16359 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-
16360  const VmaAllocatorCreateInfo* pCreateInfo,
-
16361  VmaAllocator* pAllocator)
-
16362 {
-
16363  VMA_ASSERT(pCreateInfo && pAllocator);
-
16364  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
-
16365  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
-
16366  VMA_DEBUG_LOG("vmaCreateAllocator");
-
16367  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
-
16368  return (*pAllocator)->Init(pCreateInfo);
-
16369 }
-
16370 
-
16371 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-
16372  VmaAllocator allocator)
-
16373 {
-
16374  if(allocator != VK_NULL_HANDLE)
-
16375  {
-
16376  VMA_DEBUG_LOG("vmaDestroyAllocator");
-
16377  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
-
16378  vma_delete(&allocationCallbacks, allocator);
-
16379  }
-
16380 }
-
16381 
-
16382 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-
16383  VmaAllocator allocator,
-
16384  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-
16385 {
-
16386  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
-
16387  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
-
16388 }
-
16389 
-
16390 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-
16391  VmaAllocator allocator,
-
16392  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
-
16393 {
-
16394  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
-
16395  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
-
16396 }
-
16397 
-
16398 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-
16399  VmaAllocator allocator,
-
16400  uint32_t memoryTypeIndex,
-
16401  VkMemoryPropertyFlags* pFlags)
-
16402 {
-
16403  VMA_ASSERT(allocator && pFlags);
-
16404  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
-
16405  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
-
16406 }
-
16407 
-
16408 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-
16409  VmaAllocator allocator,
-
16410  uint32_t frameIndex)
-
16411 {
-
16412  VMA_ASSERT(allocator);
-
16413  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
-
16414 
-
16415  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16416 
-
16417  allocator->SetCurrentFrameIndex(frameIndex);
-
16418 }
-
16419 
-
16420 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
-
16421  VmaAllocator allocator,
-
16422  VmaStats* pStats)
-
16423 {
-
16424  VMA_ASSERT(allocator && pStats);
-
16425  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16426  allocator->CalculateStats(pStats);
-
16427 }
-
16428 
-
16429 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
-
16430  VmaAllocator allocator,
-
16431  VmaBudget* pBudget)
-
16432 {
-
16433  VMA_ASSERT(allocator && pBudget);
-
16434  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16435  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
-
16436 }
-
16437 
-
16438 #if VMA_STATS_STRING_ENABLED
-
16439 
-
16440 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-
16441  VmaAllocator allocator,
-
16442  char** ppStatsString,
-
16443  VkBool32 detailedMap)
-
16444 {
-
16445  VMA_ASSERT(allocator && ppStatsString);
-
16446  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16447 
-
16448  VmaStringBuilder sb(allocator);
-
16449  {
-
16450  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
-
16451  json.BeginObject();
-
16452 
-
16453  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
-
16454  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
-
16455 
-
16456  VmaStats stats;
-
16457  allocator->CalculateStats(&stats);
+
16291  json.BeginString("Type ");
+
16292  json.ContinueString(memTypeIndex);
+
16293  json.EndString();
+
16294 
+
16295  json.BeginArray();
+
16296 
+
16297  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
+
16298  {
+
16299  json.BeginObject(true);
+
16300  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
+
16301  hAlloc->PrintParameters(json);
+
16302  json.EndObject();
+
16303  }
+
16304 
+
16305  json.EndArray();
+
16306  }
+
16307  }
+
16308  if(dedicatedAllocationsStarted)
+
16309  {
+
16310  json.EndObject();
+
16311  }
+
16312 
+
16313  {
+
16314  bool allocationsStarted = false;
+
16315  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+
16316  {
+
16317  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
+
16318  {
+
16319  if(allocationsStarted == false)
+
16320  {
+
16321  allocationsStarted = true;
+
16322  json.WriteString("DefaultPools");
+
16323  json.BeginObject();
+
16324  }
+
16325 
+
16326  json.BeginString("Type ");
+
16327  json.ContinueString(memTypeIndex);
+
16328  json.EndString();
+
16329 
+
16330  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+
16331  }
+
16332  }
+
16333  if(allocationsStarted)
+
16334  {
+
16335  json.EndObject();
+
16336  }
+
16337  }
+
16338 
+
16339  // Custom pools
+
16340  {
+
16341  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+
16342  const size_t poolCount = m_Pools.size();
+
16343  if(poolCount > 0)
+
16344  {
+
16345  json.WriteString("Pools");
+
16346  json.BeginObject();
+
16347  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+
16348  {
+
16349  json.BeginString();
+
16350  json.ContinueString(m_Pools[poolIndex]->GetId());
+
16351  json.EndString();
+
16352 
+
16353  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
+
16354  }
+
16355  json.EndObject();
+
16356  }
+
16357  }
+
16358 }
+
16359 
+
16360 #endif // #if VMA_STATS_STRING_ENABLED
+
16361 
+
16363 // Public interface
+
16364 
+
16365 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
+
16366  const VmaAllocatorCreateInfo* pCreateInfo,
+
16367  VmaAllocator* pAllocator)
+
16368 {
+
16369  VMA_ASSERT(pCreateInfo && pAllocator);
+
16370  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
+
16371  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
+
16372  VMA_DEBUG_LOG("vmaCreateAllocator");
+
16373  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
+
16374  return (*pAllocator)->Init(pCreateInfo);
+
16375 }
+
16376 
+
16377 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
+
16378  VmaAllocator allocator)
+
16379 {
+
16380  if(allocator != VK_NULL_HANDLE)
+
16381  {
+
16382  VMA_DEBUG_LOG("vmaDestroyAllocator");
+
16383  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
+
16384  vma_delete(&allocationCallbacks, allocator);
+
16385  }
+
16386 }
+
16387 
+
16388 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
+
16389  VmaAllocator allocator,
+
16390  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
+
16391 {
+
16392  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
+
16393  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
+
16394 }
+
16395 
+
16396 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
+
16397  VmaAllocator allocator,
+
16398  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
+
16399 {
+
16400  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
+
16401  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
+
16402 }
+
16403 
+
16404 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
+
16405  VmaAllocator allocator,
+
16406  uint32_t memoryTypeIndex,
+
16407  VkMemoryPropertyFlags* pFlags)
+
16408 {
+
16409  VMA_ASSERT(allocator && pFlags);
+
16410  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
+
16411  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
+
16412 }
+
16413 
+
16414 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
+
16415  VmaAllocator allocator,
+
16416  uint32_t frameIndex)
+
16417 {
+
16418  VMA_ASSERT(allocator);
+
16419  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
+
16420 
+
16421  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16422 
+
16423  allocator->SetCurrentFrameIndex(frameIndex);
+
16424 }
+
16425 
+
16426 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
+
16427  VmaAllocator allocator,
+
16428  VmaStats* pStats)
+
16429 {
+
16430  VMA_ASSERT(allocator && pStats);
+
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16432  allocator->CalculateStats(pStats);
+
16433 }
+
16434 
+
16435 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
+
16436  VmaAllocator allocator,
+
16437  VmaBudget* pBudget)
+
16438 {
+
16439  VMA_ASSERT(allocator && pBudget);
+
16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16441  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
+
16442 }
+
16443 
+
16444 #if VMA_STATS_STRING_ENABLED
+
16445 
+
16446 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
+
16447  VmaAllocator allocator,
+
16448  char** ppStatsString,
+
16449  VkBool32 detailedMap)
+
16450 {
+
16451  VMA_ASSERT(allocator && ppStatsString);
+
16452  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16453 
+
16454  VmaStringBuilder sb(allocator);
+
16455  {
+
16456  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
+
16457  json.BeginObject();
16458 
-
16459  json.WriteString("Total");
-
16460  VmaPrintStatInfo(json, stats.total);
-
16461 
-
16462  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
-
16463  {
-
16464  json.BeginString("Heap ");
-
16465  json.ContinueString(heapIndex);
-
16466  json.EndString();
-
16467  json.BeginObject();
-
16468 
-
16469  json.WriteString("Size");
-
16470  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
-
16471 
-
16472  json.WriteString("Flags");
-
16473  json.BeginArray(true);
-
16474  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
-
16475  {
-
16476  json.WriteString("DEVICE_LOCAL");
-
16477  }
-
16478  json.EndArray();
-
16479 
-
16480  json.WriteString("Budget");
-
16481  json.BeginObject();
-
16482  {
-
16483  json.WriteString("BlockBytes");
-
16484  json.WriteNumber(budget[heapIndex].blockBytes);
-
16485  json.WriteString("AllocationBytes");
-
16486  json.WriteNumber(budget[heapIndex].allocationBytes);
-
16487  json.WriteString("Usage");
-
16488  json.WriteNumber(budget[heapIndex].usage);
-
16489  json.WriteString("Budget");
-
16490  json.WriteNumber(budget[heapIndex].budget);
-
16491  }
-
16492  json.EndObject();
-
16493 
-
16494  if(stats.memoryHeap[heapIndex].blockCount > 0)
-
16495  {
-
16496  json.WriteString("Stats");
-
16497  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
-
16498  }
+
16459  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
+
16460  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
+
16461 
+
16462  VmaStats stats;
+
16463  allocator->CalculateStats(&stats);
+
16464 
+
16465  json.WriteString("Total");
+
16466  VmaPrintStatInfo(json, stats.total);
+
16467 
+
16468  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
+
16469  {
+
16470  json.BeginString("Heap ");
+
16471  json.ContinueString(heapIndex);
+
16472  json.EndString();
+
16473  json.BeginObject();
+
16474 
+
16475  json.WriteString("Size");
+
16476  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
+
16477 
+
16478  json.WriteString("Flags");
+
16479  json.BeginArray(true);
+
16480  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
+
16481  {
+
16482  json.WriteString("DEVICE_LOCAL");
+
16483  }
+
16484  json.EndArray();
+
16485 
+
16486  json.WriteString("Budget");
+
16487  json.BeginObject();
+
16488  {
+
16489  json.WriteString("BlockBytes");
+
16490  json.WriteNumber(budget[heapIndex].blockBytes);
+
16491  json.WriteString("AllocationBytes");
+
16492  json.WriteNumber(budget[heapIndex].allocationBytes);
+
16493  json.WriteString("Usage");
+
16494  json.WriteNumber(budget[heapIndex].usage);
+
16495  json.WriteString("Budget");
+
16496  json.WriteNumber(budget[heapIndex].budget);
+
16497  }
+
16498  json.EndObject();
16499 
-
16500  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
+
16500  if(stats.memoryHeap[heapIndex].blockCount > 0)
16501  {
-
16502  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
-
16503  {
-
16504  json.BeginString("Type ");
-
16505  json.ContinueString(typeIndex);
-
16506  json.EndString();
-
16507 
-
16508  json.BeginObject();
-
16509 
-
16510  json.WriteString("Flags");
-
16511  json.BeginArray(true);
-
16512  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
-
16513  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
-
16514  {
-
16515  json.WriteString("DEVICE_LOCAL");
-
16516  }
-
16517  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-
16518  {
-
16519  json.WriteString("HOST_VISIBLE");
-
16520  }
-
16521  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
-
16522  {
-
16523  json.WriteString("HOST_COHERENT");
-
16524  }
-
16525  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
-
16526  {
-
16527  json.WriteString("HOST_CACHED");
-
16528  }
-
16529  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
-
16530  {
-
16531  json.WriteString("LAZILY_ALLOCATED");
-
16532  }
-
16533  json.EndArray();
-
16534 
-
16535  if(stats.memoryType[typeIndex].blockCount > 0)
+
16502  json.WriteString("Stats");
+
16503  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
+
16504  }
+
16505 
+
16506  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
+
16507  {
+
16508  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
+
16509  {
+
16510  json.BeginString("Type ");
+
16511  json.ContinueString(typeIndex);
+
16512  json.EndString();
+
16513 
+
16514  json.BeginObject();
+
16515 
+
16516  json.WriteString("Flags");
+
16517  json.BeginArray(true);
+
16518  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
+
16519  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
+
16520  {
+
16521  json.WriteString("DEVICE_LOCAL");
+
16522  }
+
16523  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+
16524  {
+
16525  json.WriteString("HOST_VISIBLE");
+
16526  }
+
16527  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
+
16528  {
+
16529  json.WriteString("HOST_COHERENT");
+
16530  }
+
16531  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
+
16532  {
+
16533  json.WriteString("HOST_CACHED");
+
16534  }
+
16535  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16536  {
-
16537  json.WriteString("Stats");
-
16538  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
-
16539  }
+
16537  json.WriteString("LAZILY_ALLOCATED");
+
16538  }
+
16539  json.EndArray();
16540 
-
16541  json.EndObject();
-
16542  }
-
16543  }
-
16544 
-
16545  json.EndObject();
-
16546  }
-
16547  if(detailedMap == VK_TRUE)
-
16548  {
-
16549  allocator->PrintDetailedMap(json);
-
16550  }
-
16551 
-
16552  json.EndObject();
-
16553  }
-
16554 
-
16555  const size_t len = sb.GetLength();
-
16556  char* const pChars = vma_new_array(allocator, char, len + 1);
-
16557  if(len > 0)
-
16558  {
-
16559  memcpy(pChars, sb.GetData(), len);
-
16560  }
-
16561  pChars[len] = '\0';
-
16562  *ppStatsString = pChars;
-
16563 }
-
16564 
-
16565 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-
16566  VmaAllocator allocator,
-
16567  char* pStatsString)
-
16568 {
-
16569  if(pStatsString != VMA_NULL)
-
16570  {
-
16571  VMA_ASSERT(allocator);
-
16572  size_t len = strlen(pStatsString);
-
16573  vma_delete_array(allocator, pStatsString, len + 1);
-
16574  }
-
16575 }
-
16576 
-
16577 #endif // #if VMA_STATS_STRING_ENABLED
-
16578 
-
16579 /*
-
16580 This function is not protected by any mutex because it just reads immutable data.
-
16581 */
-
16582 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-
16583  VmaAllocator allocator,
-
16584  uint32_t memoryTypeBits,
-
16585  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
16586  uint32_t* pMemoryTypeIndex)
-
16587 {
-
16588  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
16589  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
16590  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
16591 
-
16592  if(pAllocationCreateInfo->memoryTypeBits != 0)
-
16593  {
-
16594  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
-
16595  }
-
16596 
-
16597  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
-
16598  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
-
16599  uint32_t notPreferredFlags = 0;
-
16600 
-
16601  // Convert usage to requiredFlags and preferredFlags.
-
16602  switch(pAllocationCreateInfo->usage)
-
16603  {
- -
16605  break;
- -
16607  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
16608  {
-
16609  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
16610  }
+
16541  if(stats.memoryType[typeIndex].blockCount > 0)
+
16542  {
+
16543  json.WriteString("Stats");
+
16544  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
+
16545  }
+
16546 
+
16547  json.EndObject();
+
16548  }
+
16549  }
+
16550 
+
16551  json.EndObject();
+
16552  }
+
16553  if(detailedMap == VK_TRUE)
+
16554  {
+
16555  allocator->PrintDetailedMap(json);
+
16556  }
+
16557 
+
16558  json.EndObject();
+
16559  }
+
16560 
+
16561  const size_t len = sb.GetLength();
+
16562  char* const pChars = vma_new_array(allocator, char, len + 1);
+
16563  if(len > 0)
+
16564  {
+
16565  memcpy(pChars, sb.GetData(), len);
+
16566  }
+
16567  pChars[len] = '\0';
+
16568  *ppStatsString = pChars;
+
16569 }
+
16570 
+
16571 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
+
16572  VmaAllocator allocator,
+
16573  char* pStatsString)
+
16574 {
+
16575  if(pStatsString != VMA_NULL)
+
16576  {
+
16577  VMA_ASSERT(allocator);
+
16578  size_t len = strlen(pStatsString);
+
16579  vma_delete_array(allocator, pStatsString, len + 1);
+
16580  }
+
16581 }
+
16582 
+
16583 #endif // #if VMA_STATS_STRING_ENABLED
+
16584 
+
16585 /*
+
16586 This function is not protected by any mutex because it just reads immutable data.
+
16587 */
+
16588 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
+
16589  VmaAllocator allocator,
+
16590  uint32_t memoryTypeBits,
+
16591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
16592  uint32_t* pMemoryTypeIndex)
+
16593 {
+
16594  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
16595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
16596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
16597 
+
16598  if(pAllocationCreateInfo->memoryTypeBits != 0)
+
16599  {
+
16600  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
+
16601  }
+
16602 
+
16603  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
+
16604  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
+
16605  uint32_t notPreferredFlags = 0;
+
16606 
+
16607  // Convert usage to requiredFlags and preferredFlags.
+
16608  switch(pAllocationCreateInfo->usage)
+
16609  {
+
16611  break;
- -
16613  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-
16614  break;
- -
16616  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
16617  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-
16618  {
-
16619  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
16620  }
-
16621  break;
- -
16623  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-
16624  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-
16625  break;
- -
16627  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-
16628  break;
- -
16630  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ +
16613  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
16614  {
+
16615  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
16616  }
+
16617  break;
+ +
16619  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
16620  break;
+ +
16622  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
16623  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+
16624  {
+
16625  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
16626  }
+
16627  break;
+ +
16629  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
16630  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16631  break;
-
16632  default:
-
16633  VMA_ASSERT(0);
+ +
16633  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16634  break;
-
16635  }
-
16636 
-
16637  *pMemoryTypeIndex = UINT32_MAX;
-
16638  uint32_t minCost = UINT32_MAX;
-
16639  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
-
16640  memTypeIndex < allocator->GetMemoryTypeCount();
-
16641  ++memTypeIndex, memTypeBit <<= 1)
-
16642  {
-
16643  // This memory type is acceptable according to memoryTypeBits bitmask.
-
16644  if((memTypeBit & memoryTypeBits) != 0)
-
16645  {
-
16646  const VkMemoryPropertyFlags currFlags =
-
16647  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-
16648  // This memory type contains requiredFlags.
-
16649  if((requiredFlags & ~currFlags) == 0)
-
16650  {
-
16651  // Calculate cost as number of bits from preferredFlags not present in this memory type.
-
16652  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
-
16653  VmaCountBitsSet(currFlags & notPreferredFlags);
-
16654  // Remember memory type with lowest cost.
-
16655  if(currCost < minCost)
-
16656  {
-
16657  *pMemoryTypeIndex = memTypeIndex;
-
16658  if(currCost == 0)
-
16659  {
-
16660  return VK_SUCCESS;
-
16661  }
-
16662  minCost = currCost;
-
16663  }
-
16664  }
-
16665  }
-
16666  }
-
16667  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
-
16668 }
-
16669 
-
16670 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-
16671  VmaAllocator allocator,
-
16672  const VkBufferCreateInfo* pBufferCreateInfo,
-
16673  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
16674  uint32_t* pMemoryTypeIndex)
-
16675 {
-
16676  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
16677  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
-
16678  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
16679  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
16680 
-
16681  const VkDevice hDev = allocator->m_hDevice;
-
16682  VkBuffer hBuffer = VK_NULL_HANDLE;
-
16683  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
-
16684  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
-
16685  if(res == VK_SUCCESS)
-
16686  {
-
16687  VkMemoryRequirements memReq = {};
-
16688  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
-
16689  hDev, hBuffer, &memReq);
-
16690 
-
16691  res = vmaFindMemoryTypeIndex(
-
16692  allocator,
-
16693  memReq.memoryTypeBits,
-
16694  pAllocationCreateInfo,
-
16695  pMemoryTypeIndex);
+ +
16636  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+
16637  break;
+
16638  default:
+
16639  VMA_ASSERT(0);
+
16640  break;
+
16641  }
+
16642 
+
16643  *pMemoryTypeIndex = UINT32_MAX;
+
16644  uint32_t minCost = UINT32_MAX;
+
16645  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
+
16646  memTypeIndex < allocator->GetMemoryTypeCount();
+
16647  ++memTypeIndex, memTypeBit <<= 1)
+
16648  {
+
16649  // This memory type is acceptable according to memoryTypeBits bitmask.
+
16650  if((memTypeBit & memoryTypeBits) != 0)
+
16651  {
+
16652  const VkMemoryPropertyFlags currFlags =
+
16653  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+
16654  // This memory type contains requiredFlags.
+
16655  if((requiredFlags & ~currFlags) == 0)
+
16656  {
+
16657  // Calculate cost as number of bits from preferredFlags not present in this memory type.
+
16658  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
+
16659  VmaCountBitsSet(currFlags & notPreferredFlags);
+
16660  // Remember memory type with lowest cost.
+
16661  if(currCost < minCost)
+
16662  {
+
16663  *pMemoryTypeIndex = memTypeIndex;
+
16664  if(currCost == 0)
+
16665  {
+
16666  return VK_SUCCESS;
+
16667  }
+
16668  minCost = currCost;
+
16669  }
+
16670  }
+
16671  }
+
16672  }
+
16673  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
+
16674 }
+
16675 
+
16676 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
+
16677  VmaAllocator allocator,
+
16678  const VkBufferCreateInfo* pBufferCreateInfo,
+
16679  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
16680  uint32_t* pMemoryTypeIndex)
+
16681 {
+
16682  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
16683  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
+
16684  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
16685  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
16686 
+
16687  const VkDevice hDev = allocator->m_hDevice;
+
16688  VkBuffer hBuffer = VK_NULL_HANDLE;
+
16689  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
+
16690  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
+
16691  if(res == VK_SUCCESS)
+
16692  {
+
16693  VkMemoryRequirements memReq = {};
+
16694  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
+
16695  hDev, hBuffer, &memReq);
16696 
-
16697  allocator->GetVulkanFunctions().vkDestroyBuffer(
-
16698  hDev, hBuffer, allocator->GetAllocationCallbacks());
-
16699  }
-
16700  return res;
-
16701 }
+
16697  res = vmaFindMemoryTypeIndex(
+
16698  allocator,
+
16699  memReq.memoryTypeBits,
+
16700  pAllocationCreateInfo,
+
16701  pMemoryTypeIndex);
16702 
-
16703 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-
16704  VmaAllocator allocator,
-
16705  const VkImageCreateInfo* pImageCreateInfo,
-
16706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
16707  uint32_t* pMemoryTypeIndex)
-
16708 {
-
16709  VMA_ASSERT(allocator != VK_NULL_HANDLE);
-
16710  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
-
16711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-
16712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-
16713 
-
16714  const VkDevice hDev = allocator->m_hDevice;
-
16715  VkImage hImage = VK_NULL_HANDLE;
-
16716  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
-
16717  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
-
16718  if(res == VK_SUCCESS)
-
16719  {
-
16720  VkMemoryRequirements memReq = {};
-
16721  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
-
16722  hDev, hImage, &memReq);
-
16723 
-
16724  res = vmaFindMemoryTypeIndex(
-
16725  allocator,
-
16726  memReq.memoryTypeBits,
-
16727  pAllocationCreateInfo,
-
16728  pMemoryTypeIndex);
+
16703  allocator->GetVulkanFunctions().vkDestroyBuffer(
+
16704  hDev, hBuffer, allocator->GetAllocationCallbacks());
+
16705  }
+
16706  return res;
+
16707 }
+
16708 
+
16709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
+
16710  VmaAllocator allocator,
+
16711  const VkImageCreateInfo* pImageCreateInfo,
+
16712  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
16713  uint32_t* pMemoryTypeIndex)
+
16714 {
+
16715  VMA_ASSERT(allocator != VK_NULL_HANDLE);
+
16716  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
+
16717  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+
16718  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
16719 
+
16720  const VkDevice hDev = allocator->m_hDevice;
+
16721  VkImage hImage = VK_NULL_HANDLE;
+
16722  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
+
16723  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
+
16724  if(res == VK_SUCCESS)
+
16725  {
+
16726  VkMemoryRequirements memReq = {};
+
16727  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
+
16728  hDev, hImage, &memReq);
16729 
-
16730  allocator->GetVulkanFunctions().vkDestroyImage(
-
16731  hDev, hImage, allocator->GetAllocationCallbacks());
-
16732  }
-
16733  return res;
-
16734 }
+
16730  res = vmaFindMemoryTypeIndex(
+
16731  allocator,
+
16732  memReq.memoryTypeBits,
+
16733  pAllocationCreateInfo,
+
16734  pMemoryTypeIndex);
16735 
-
16736 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-
16737  VmaAllocator allocator,
-
16738  const VmaPoolCreateInfo* pCreateInfo,
-
16739  VmaPool* pPool)
-
16740 {
-
16741  VMA_ASSERT(allocator && pCreateInfo && pPool);
-
16742 
-
16743  VMA_DEBUG_LOG("vmaCreatePool");
-
16744 
-
16745  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16746 
-
16747  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
+
16736  allocator->GetVulkanFunctions().vkDestroyImage(
+
16737  hDev, hImage, allocator->GetAllocationCallbacks());
+
16738  }
+
16739  return res;
+
16740 }
+
16741 
+
16742 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
+
16743  VmaAllocator allocator,
+
16744  const VmaPoolCreateInfo* pCreateInfo,
+
16745  VmaPool* pPool)
+
16746 {
+
16747  VMA_ASSERT(allocator && pCreateInfo && pPool);
16748 
-
16749 #if VMA_RECORDING_ENABLED
-
16750  if(allocator->GetRecorder() != VMA_NULL)
-
16751  {
-
16752  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
-
16753  }
-
16754 #endif
-
16755 
-
16756  return res;
-
16757 }
-
16758 
-
16759 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-
16760  VmaAllocator allocator,
-
16761  VmaPool pool)
-
16762 {
-
16763  VMA_ASSERT(allocator);
-
16764 
-
16765  if(pool == VK_NULL_HANDLE)
-
16766  {
-
16767  return;
-
16768  }
-
16769 
-
16770  VMA_DEBUG_LOG("vmaDestroyPool");
-
16771 
-
16772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16773 
-
16774 #if VMA_RECORDING_ENABLED
-
16775  if(allocator->GetRecorder() != VMA_NULL)
-
16776  {
-
16777  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
-
16778  }
-
16779 #endif
-
16780 
-
16781  allocator->DestroyPool(pool);
-
16782 }
-
16783 
-
16784 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
-
16785  VmaAllocator allocator,
-
16786  VmaPool pool,
-
16787  VmaPoolStats* pPoolStats)
-
16788 {
-
16789  VMA_ASSERT(allocator && pool && pPoolStats);
-
16790 
-
16791  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16792 
-
16793  allocator->GetPoolStats(pool, pPoolStats);
-
16794 }
-
16795 
-
16796 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
-
16797  VmaAllocator allocator,
-
16798  VmaPool pool,
-
16799  size_t* pLostAllocationCount)
-
16800 {
-
16801  VMA_ASSERT(allocator && pool);
-
16802 
-
16803  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16804 
-
16805 #if VMA_RECORDING_ENABLED
-
16806  if(allocator->GetRecorder() != VMA_NULL)
-
16807  {
-
16808  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
-
16809  }
-
16810 #endif
-
16811 
-
16812  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
-
16813 }
-
16814 
-
16815 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
-
16816 {
-
16817  VMA_ASSERT(allocator && pool);
-
16818 
-
16819  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16749  VMA_DEBUG_LOG("vmaCreatePool");
+
16750 
+
16751  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16752 
+
16753  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
+
16754 
+
16755 #if VMA_RECORDING_ENABLED
+
16756  if(allocator->GetRecorder() != VMA_NULL)
+
16757  {
+
16758  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
+
16759  }
+
16760 #endif
+
16761 
+
16762  return res;
+
16763 }
+
16764 
+
16765 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
+
16766  VmaAllocator allocator,
+
16767  VmaPool pool)
+
16768 {
+
16769  VMA_ASSERT(allocator);
+
16770 
+
16771  if(pool == VK_NULL_HANDLE)
+
16772  {
+
16773  return;
+
16774  }
+
16775 
+
16776  VMA_DEBUG_LOG("vmaDestroyPool");
+
16777 
+
16778  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16779 
+
16780 #if VMA_RECORDING_ENABLED
+
16781  if(allocator->GetRecorder() != VMA_NULL)
+
16782  {
+
16783  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
+
16784  }
+
16785 #endif
+
16786 
+
16787  allocator->DestroyPool(pool);
+
16788 }
+
16789 
+
16790 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
+
16791  VmaAllocator allocator,
+
16792  VmaPool pool,
+
16793  VmaPoolStats* pPoolStats)
+
16794 {
+
16795  VMA_ASSERT(allocator && pool && pPoolStats);
+
16796 
+
16797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16798 
+
16799  allocator->GetPoolStats(pool, pPoolStats);
+
16800 }
+
16801 
+
16802 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
+
16803  VmaAllocator allocator,
+
16804  VmaPool pool,
+
16805  size_t* pLostAllocationCount)
+
16806 {
+
16807  VMA_ASSERT(allocator && pool);
+
16808 
+
16809  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16810 
+
16811 #if VMA_RECORDING_ENABLED
+
16812  if(allocator->GetRecorder() != VMA_NULL)
+
16813  {
+
16814  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
+
16815  }
+
16816 #endif
+
16817 
+
16818  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
+
16819 }
16820 
-
16821  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
-
16822 
-
16823  return allocator->CheckPoolCorruption(pool);
-
16824 }
-
16825 
-
16826 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-
16827  VmaAllocator allocator,
-
16828  VmaPool pool,
-
16829  const char** ppName)
-
16830 {
-
16831  VMA_ASSERT(allocator && pool);
-
16832 
-
16833  VMA_DEBUG_LOG("vmaGetPoolName");
-
16834 
-
16835  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16836 
-
16837  *ppName = pool->GetName();
-
16838 }
-
16839 
-
16840 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-
16841  VmaAllocator allocator,
-
16842  VmaPool pool,
-
16843  const char* pName)
-
16844 {
-
16845  VMA_ASSERT(allocator && pool);
-
16846 
-
16847  VMA_DEBUG_LOG("vmaSetPoolName");
-
16848 
-
16849  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16850 
-
16851  pool->SetName(pName);
+
16821 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
+
16822 {
+
16823  VMA_ASSERT(allocator && pool);
+
16824 
+
16825  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16826 
+
16827  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
+
16828 
+
16829  return allocator->CheckPoolCorruption(pool);
+
16830 }
+
16831 
+
16832 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
+
16833  VmaAllocator allocator,
+
16834  VmaPool pool,
+
16835  const char** ppName)
+
16836 {
+
16837  VMA_ASSERT(allocator && pool);
+
16838 
+
16839  VMA_DEBUG_LOG("vmaGetPoolName");
+
16840 
+
16841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16842 
+
16843  *ppName = pool->GetName();
+
16844 }
+
16845 
+
16846 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
+
16847  VmaAllocator allocator,
+
16848  VmaPool pool,
+
16849  const char* pName)
+
16850 {
+
16851  VMA_ASSERT(allocator && pool);
16852 
-
16853 #if VMA_RECORDING_ENABLED
-
16854  if(allocator->GetRecorder() != VMA_NULL)
-
16855  {
-
16856  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
-
16857  }
-
16858 #endif
-
16859 }
-
16860 
-
16861 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-
16862  VmaAllocator allocator,
-
16863  const VkMemoryRequirements* pVkMemoryRequirements,
-
16864  const VmaAllocationCreateInfo* pCreateInfo,
-
16865  VmaAllocation* pAllocation,
-
16866  VmaAllocationInfo* pAllocationInfo)
-
16867 {
-
16868  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
-
16869 
-
16870  VMA_DEBUG_LOG("vmaAllocateMemory");
-
16871 
-
16872  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16873 
-
16874  VkResult result = allocator->AllocateMemory(
-
16875  *pVkMemoryRequirements,
-
16876  false, // requiresDedicatedAllocation
-
16877  false, // prefersDedicatedAllocation
-
16878  VK_NULL_HANDLE, // dedicatedBuffer
-
16879  VK_NULL_HANDLE, // dedicatedImage
-
16880  *pCreateInfo,
-
16881  VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
16882  1, // allocationCount
-
16883  pAllocation);
-
16884 
-
16885 #if VMA_RECORDING_ENABLED
-
16886  if(allocator->GetRecorder() != VMA_NULL)
-
16887  {
-
16888  allocator->GetRecorder()->RecordAllocateMemory(
-
16889  allocator->GetCurrentFrameIndex(),
-
16890  *pVkMemoryRequirements,
-
16891  *pCreateInfo,
-
16892  *pAllocation);
-
16893  }
-
16894 #endif
-
16895 
-
16896  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
16897  {
-
16898  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
16853  VMA_DEBUG_LOG("vmaSetPoolName");
+
16854 
+
16855  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16856 
+
16857  pool->SetName(pName);
+
16858 
+
16859 #if VMA_RECORDING_ENABLED
+
16860  if(allocator->GetRecorder() != VMA_NULL)
+
16861  {
+
16862  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
+
16863  }
+
16864 #endif
+
16865 }
+
16866 
+
16867 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
+
16868  VmaAllocator allocator,
+
16869  const VkMemoryRequirements* pVkMemoryRequirements,
+
16870  const VmaAllocationCreateInfo* pCreateInfo,
+
16871  VmaAllocation* pAllocation,
+
16872  VmaAllocationInfo* pAllocationInfo)
+
16873 {
+
16874  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
+
16875 
+
16876  VMA_DEBUG_LOG("vmaAllocateMemory");
+
16877 
+
16878  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16879 
+
16880  VkResult result = allocator->AllocateMemory(
+
16881  *pVkMemoryRequirements,
+
16882  false, // requiresDedicatedAllocation
+
16883  false, // prefersDedicatedAllocation
+
16884  VK_NULL_HANDLE, // dedicatedBuffer
+
16885  VK_NULL_HANDLE, // dedicatedImage
+
16886  *pCreateInfo,
+
16887  VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
16888  1, // allocationCount
+
16889  pAllocation);
+
16890 
+
16891 #if VMA_RECORDING_ENABLED
+
16892  if(allocator->GetRecorder() != VMA_NULL)
+
16893  {
+
16894  allocator->GetRecorder()->RecordAllocateMemory(
+
16895  allocator->GetCurrentFrameIndex(),
+
16896  *pVkMemoryRequirements,
+
16897  *pCreateInfo,
+
16898  *pAllocation);
16899  }
-
16900 
-
16901  return result;
-
16902 }
-
16903 
-
16904 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-
16905  VmaAllocator allocator,
-
16906  const VkMemoryRequirements* pVkMemoryRequirements,
-
16907  const VmaAllocationCreateInfo* pCreateInfo,
-
16908  size_t allocationCount,
-
16909  VmaAllocation* pAllocations,
-
16910  VmaAllocationInfo* pAllocationInfo)
-
16911 {
-
16912  if(allocationCount == 0)
-
16913  {
-
16914  return VK_SUCCESS;
-
16915  }
-
16916 
-
16917  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
-
16918 
-
16919  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
-
16920 
-
16921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16900 #endif
+
16901 
+
16902  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
16903  {
+
16904  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
16905  }
+
16906 
+
16907  return result;
+
16908 }
+
16909 
+
16910 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
+
16911  VmaAllocator allocator,
+
16912  const VkMemoryRequirements* pVkMemoryRequirements,
+
16913  const VmaAllocationCreateInfo* pCreateInfo,
+
16914  size_t allocationCount,
+
16915  VmaAllocation* pAllocations,
+
16916  VmaAllocationInfo* pAllocationInfo)
+
16917 {
+
16918  if(allocationCount == 0)
+
16919  {
+
16920  return VK_SUCCESS;
+
16921  }
16922 
-
16923  VkResult result = allocator->AllocateMemory(
-
16924  *pVkMemoryRequirements,
-
16925  false, // requiresDedicatedAllocation
-
16926  false, // prefersDedicatedAllocation
-
16927  VK_NULL_HANDLE, // dedicatedBuffer
-
16928  VK_NULL_HANDLE, // dedicatedImage
-
16929  *pCreateInfo,
-
16930  VMA_SUBALLOCATION_TYPE_UNKNOWN,
-
16931  allocationCount,
-
16932  pAllocations);
-
16933 
-
16934 #if VMA_RECORDING_ENABLED
-
16935  if(allocator->GetRecorder() != VMA_NULL)
-
16936  {
-
16937  allocator->GetRecorder()->RecordAllocateMemoryPages(
-
16938  allocator->GetCurrentFrameIndex(),
-
16939  *pVkMemoryRequirements,
-
16940  *pCreateInfo,
-
16941  (uint64_t)allocationCount,
-
16942  pAllocations);
-
16943  }
-
16944 #endif
-
16945 
-
16946  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-
16947  {
-
16948  for(size_t i = 0; i < allocationCount; ++i)
-
16949  {
-
16950  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
-
16951  }
-
16952  }
-
16953 
-
16954  return result;
-
16955 }
-
16956 
-
16957 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-
16958  VmaAllocator allocator,
-
16959  VkBuffer buffer,
-
16960  const VmaAllocationCreateInfo* pCreateInfo,
-
16961  VmaAllocation* pAllocation,
-
16962  VmaAllocationInfo* pAllocationInfo)
-
16963 {
-
16964  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
16965 
-
16966  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
-
16967 
-
16968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
16969 
-
16970  VkMemoryRequirements vkMemReq = {};
-
16971  bool requiresDedicatedAllocation = false;
-
16972  bool prefersDedicatedAllocation = false;
-
16973  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
-
16974  requiresDedicatedAllocation,
-
16975  prefersDedicatedAllocation);
-
16976 
-
16977  VkResult result = allocator->AllocateMemory(
-
16978  vkMemReq,
-
16979  requiresDedicatedAllocation,
-
16980  prefersDedicatedAllocation,
-
16981  buffer, // dedicatedBuffer
-
16982  VK_NULL_HANDLE, // dedicatedImage
-
16983  *pCreateInfo,
-
16984  VMA_SUBALLOCATION_TYPE_BUFFER,
-
16985  1, // allocationCount
-
16986  pAllocation);
-
16987 
-
16988 #if VMA_RECORDING_ENABLED
-
16989  if(allocator->GetRecorder() != VMA_NULL)
-
16990  {
-
16991  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
-
16992  allocator->GetCurrentFrameIndex(),
-
16993  vkMemReq,
-
16994  requiresDedicatedAllocation,
-
16995  prefersDedicatedAllocation,
-
16996  *pCreateInfo,
-
16997  *pAllocation);
-
16998  }
-
16999 #endif
-
17000 
-
17001  if(pAllocationInfo && result == VK_SUCCESS)
-
17002  {
-
17003  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
16923  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
+
16924 
+
16925  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
+
16926 
+
16927  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16928 
+
16929  VkResult result = allocator->AllocateMemory(
+
16930  *pVkMemoryRequirements,
+
16931  false, // requiresDedicatedAllocation
+
16932  false, // prefersDedicatedAllocation
+
16933  VK_NULL_HANDLE, // dedicatedBuffer
+
16934  VK_NULL_HANDLE, // dedicatedImage
+
16935  *pCreateInfo,
+
16936  VMA_SUBALLOCATION_TYPE_UNKNOWN,
+
16937  allocationCount,
+
16938  pAllocations);
+
16939 
+
16940 #if VMA_RECORDING_ENABLED
+
16941  if(allocator->GetRecorder() != VMA_NULL)
+
16942  {
+
16943  allocator->GetRecorder()->RecordAllocateMemoryPages(
+
16944  allocator->GetCurrentFrameIndex(),
+
16945  *pVkMemoryRequirements,
+
16946  *pCreateInfo,
+
16947  (uint64_t)allocationCount,
+
16948  pAllocations);
+
16949  }
+
16950 #endif
+
16951 
+
16952  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+
16953  {
+
16954  for(size_t i = 0; i < allocationCount; ++i)
+
16955  {
+
16956  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
+
16957  }
+
16958  }
+
16959 
+
16960  return result;
+
16961 }
+
16962 
+
16963 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
+
16964  VmaAllocator allocator,
+
16965  VkBuffer buffer,
+
16966  const VmaAllocationCreateInfo* pCreateInfo,
+
16967  VmaAllocation* pAllocation,
+
16968  VmaAllocationInfo* pAllocationInfo)
+
16969 {
+
16970  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
16971 
+
16972  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
+
16973 
+
16974  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
16975 
+
16976  VkMemoryRequirements vkMemReq = {};
+
16977  bool requiresDedicatedAllocation = false;
+
16978  bool prefersDedicatedAllocation = false;
+
16979  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
+
16980  requiresDedicatedAllocation,
+
16981  prefersDedicatedAllocation);
+
16982 
+
16983  VkResult result = allocator->AllocateMemory(
+
16984  vkMemReq,
+
16985  requiresDedicatedAllocation,
+
16986  prefersDedicatedAllocation,
+
16987  buffer, // dedicatedBuffer
+
16988  VK_NULL_HANDLE, // dedicatedImage
+
16989  *pCreateInfo,
+
16990  VMA_SUBALLOCATION_TYPE_BUFFER,
+
16991  1, // allocationCount
+
16992  pAllocation);
+
16993 
+
16994 #if VMA_RECORDING_ENABLED
+
16995  if(allocator->GetRecorder() != VMA_NULL)
+
16996  {
+
16997  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
+
16998  allocator->GetCurrentFrameIndex(),
+
16999  vkMemReq,
+
17000  requiresDedicatedAllocation,
+
17001  prefersDedicatedAllocation,
+
17002  *pCreateInfo,
+
17003  *pAllocation);
17004  }
-
17005 
-
17006  return result;
-
17007 }
-
17008 
-
17009 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-
17010  VmaAllocator allocator,
-
17011  VkImage image,
-
17012  const VmaAllocationCreateInfo* pCreateInfo,
-
17013  VmaAllocation* pAllocation,
-
17014  VmaAllocationInfo* pAllocationInfo)
-
17015 {
-
17016  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-
17017 
-
17018  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
-
17019 
-
17020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17021 
-
17022  VkMemoryRequirements vkMemReq = {};
-
17023  bool requiresDedicatedAllocation = false;
-
17024  bool prefersDedicatedAllocation = false;
-
17025  allocator->GetImageMemoryRequirements(image, vkMemReq,
-
17026  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
17005 #endif
+
17006 
+
17007  if(pAllocationInfo && result == VK_SUCCESS)
+
17008  {
+
17009  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
17010  }
+
17011 
+
17012  return result;
+
17013 }
+
17014 
+
17015 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
+
17016  VmaAllocator allocator,
+
17017  VkImage image,
+
17018  const VmaAllocationCreateInfo* pCreateInfo,
+
17019  VmaAllocation* pAllocation,
+
17020  VmaAllocationInfo* pAllocationInfo)
+
17021 {
+
17022  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
17023 
+
17024  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
+
17025 
+
17026  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17027 
-
17028  VkResult result = allocator->AllocateMemory(
-
17029  vkMemReq,
-
17030  requiresDedicatedAllocation,
-
17031  prefersDedicatedAllocation,
-
17032  VK_NULL_HANDLE, // dedicatedBuffer
-
17033  image, // dedicatedImage
-
17034  *pCreateInfo,
-
17035  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
-
17036  1, // allocationCount
-
17037  pAllocation);
-
17038 
-
17039 #if VMA_RECORDING_ENABLED
-
17040  if(allocator->GetRecorder() != VMA_NULL)
-
17041  {
-
17042  allocator->GetRecorder()->RecordAllocateMemoryForImage(
-
17043  allocator->GetCurrentFrameIndex(),
-
17044  vkMemReq,
-
17045  requiresDedicatedAllocation,
-
17046  prefersDedicatedAllocation,
-
17047  *pCreateInfo,
-
17048  *pAllocation);
-
17049  }
-
17050 #endif
-
17051 
-
17052  if(pAllocationInfo && result == VK_SUCCESS)
-
17053  {
-
17054  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
17028  VkMemoryRequirements vkMemReq = {};
+
17029  bool requiresDedicatedAllocation = false;
+
17030  bool prefersDedicatedAllocation = false;
+
17031  allocator->GetImageMemoryRequirements(image, vkMemReq,
+
17032  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
17033 
+
17034  VkResult result = allocator->AllocateMemory(
+
17035  vkMemReq,
+
17036  requiresDedicatedAllocation,
+
17037  prefersDedicatedAllocation,
+
17038  VK_NULL_HANDLE, // dedicatedBuffer
+
17039  image, // dedicatedImage
+
17040  *pCreateInfo,
+
17041  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
+
17042  1, // allocationCount
+
17043  pAllocation);
+
17044 
+
17045 #if VMA_RECORDING_ENABLED
+
17046  if(allocator->GetRecorder() != VMA_NULL)
+
17047  {
+
17048  allocator->GetRecorder()->RecordAllocateMemoryForImage(
+
17049  allocator->GetCurrentFrameIndex(),
+
17050  vkMemReq,
+
17051  requiresDedicatedAllocation,
+
17052  prefersDedicatedAllocation,
+
17053  *pCreateInfo,
+
17054  *pAllocation);
17055  }
-
17056 
-
17057  return result;
-
17058 }
-
17059 
-
17060 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-
17061  VmaAllocator allocator,
-
17062  VmaAllocation allocation)
-
17063 {
-
17064  VMA_ASSERT(allocator);
-
17065 
-
17066  if(allocation == VK_NULL_HANDLE)
-
17067  {
-
17068  return;
-
17069  }
-
17070 
-
17071  VMA_DEBUG_LOG("vmaFreeMemory");
-
17072 
-
17073  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17074 
-
17075 #if VMA_RECORDING_ENABLED
-
17076  if(allocator->GetRecorder() != VMA_NULL)
-
17077  {
-
17078  allocator->GetRecorder()->RecordFreeMemory(
-
17079  allocator->GetCurrentFrameIndex(),
-
17080  allocation);
-
17081  }
-
17082 #endif
-
17083 
-
17084  allocator->FreeMemory(
-
17085  1, // allocationCount
-
17086  &allocation);
-
17087 }
-
17088 
-
17089 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-
17090  VmaAllocator allocator,
-
17091  size_t allocationCount,
-
17092  VmaAllocation* pAllocations)
-
17093 {
-
17094  if(allocationCount == 0)
-
17095  {
-
17096  return;
-
17097  }
-
17098 
-
17099  VMA_ASSERT(allocator);
-
17100 
-
17101  VMA_DEBUG_LOG("vmaFreeMemoryPages");
-
17102 
-
17103  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17056 #endif
+
17057 
+
17058  if(pAllocationInfo && result == VK_SUCCESS)
+
17059  {
+
17060  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
17061  }
+
17062 
+
17063  return result;
+
17064 }
+
17065 
+
17066 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
+
17067  VmaAllocator allocator,
+
17068  VmaAllocation allocation)
+
17069 {
+
17070  VMA_ASSERT(allocator);
+
17071 
+
17072  if(allocation == VK_NULL_HANDLE)
+
17073  {
+
17074  return;
+
17075  }
+
17076 
+
17077  VMA_DEBUG_LOG("vmaFreeMemory");
+
17078 
+
17079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17080 
+
17081 #if VMA_RECORDING_ENABLED
+
17082  if(allocator->GetRecorder() != VMA_NULL)
+
17083  {
+
17084  allocator->GetRecorder()->RecordFreeMemory(
+
17085  allocator->GetCurrentFrameIndex(),
+
17086  allocation);
+
17087  }
+
17088 #endif
+
17089 
+
17090  allocator->FreeMemory(
+
17091  1, // allocationCount
+
17092  &allocation);
+
17093 }
+
17094 
+
17095 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
+
17096  VmaAllocator allocator,
+
17097  size_t allocationCount,
+
17098  VmaAllocation* pAllocations)
+
17099 {
+
17100  if(allocationCount == 0)
+
17101  {
+
17102  return;
+
17103  }
17104 
-
17105 #if VMA_RECORDING_ENABLED
-
17106  if(allocator->GetRecorder() != VMA_NULL)
-
17107  {
-
17108  allocator->GetRecorder()->RecordFreeMemoryPages(
-
17109  allocator->GetCurrentFrameIndex(),
-
17110  (uint64_t)allocationCount,
-
17111  pAllocations);
-
17112  }
-
17113 #endif
-
17114 
-
17115  allocator->FreeMemory(allocationCount, pAllocations);
-
17116 }
-
17117 
-
17118 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
-
17119  VmaAllocator allocator,
-
17120  VmaAllocation allocation,
-
17121  VkDeviceSize newSize)
-
17122 {
-
17123  VMA_ASSERT(allocator && allocation);
-
17124 
-
17125  VMA_DEBUG_LOG("vmaResizeAllocation");
-
17126 
-
17127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17128 
-
17129  return allocator->ResizeAllocation(allocation, newSize);
-
17130 }
-
17131 
-
17132 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-
17133  VmaAllocator allocator,
-
17134  VmaAllocation allocation,
-
17135  VmaAllocationInfo* pAllocationInfo)
-
17136 {
-
17137  VMA_ASSERT(allocator && allocation && pAllocationInfo);
-
17138 
-
17139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17140 
-
17141 #if VMA_RECORDING_ENABLED
-
17142  if(allocator->GetRecorder() != VMA_NULL)
-
17143  {
-
17144  allocator->GetRecorder()->RecordGetAllocationInfo(
-
17145  allocator->GetCurrentFrameIndex(),
-
17146  allocation);
-
17147  }
-
17148 #endif
-
17149 
-
17150  allocator->GetAllocationInfo(allocation, pAllocationInfo);
-
17151 }
-
17152 
-
17153 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
-
17154  VmaAllocator allocator,
-
17155  VmaAllocation allocation)
-
17156 {
-
17157  VMA_ASSERT(allocator && allocation);
+
17105  VMA_ASSERT(allocator);
+
17106 
+
17107  VMA_DEBUG_LOG("vmaFreeMemoryPages");
+
17108 
+
17109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17110 
+
17111 #if VMA_RECORDING_ENABLED
+
17112  if(allocator->GetRecorder() != VMA_NULL)
+
17113  {
+
17114  allocator->GetRecorder()->RecordFreeMemoryPages(
+
17115  allocator->GetCurrentFrameIndex(),
+
17116  (uint64_t)allocationCount,
+
17117  pAllocations);
+
17118  }
+
17119 #endif
+
17120 
+
17121  allocator->FreeMemory(allocationCount, pAllocations);
+
17122 }
+
17123 
+
17124 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
+
17125  VmaAllocator allocator,
+
17126  VmaAllocation allocation,
+
17127  VkDeviceSize newSize)
+
17128 {
+
17129  VMA_ASSERT(allocator && allocation);
+
17130 
+
17131  VMA_DEBUG_LOG("vmaResizeAllocation");
+
17132 
+
17133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17134 
+
17135  return allocator->ResizeAllocation(allocation, newSize);
+
17136 }
+
17137 
+
17138 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
+
17139  VmaAllocator allocator,
+
17140  VmaAllocation allocation,
+
17141  VmaAllocationInfo* pAllocationInfo)
+
17142 {
+
17143  VMA_ASSERT(allocator && allocation && pAllocationInfo);
+
17144 
+
17145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17146 
+
17147 #if VMA_RECORDING_ENABLED
+
17148  if(allocator->GetRecorder() != VMA_NULL)
+
17149  {
+
17150  allocator->GetRecorder()->RecordGetAllocationInfo(
+
17151  allocator->GetCurrentFrameIndex(),
+
17152  allocation);
+
17153  }
+
17154 #endif
+
17155 
+
17156  allocator->GetAllocationInfo(allocation, pAllocationInfo);
+
17157 }
17158 
-
17159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17160 
-
17161 #if VMA_RECORDING_ENABLED
-
17162  if(allocator->GetRecorder() != VMA_NULL)
-
17163  {
-
17164  allocator->GetRecorder()->RecordTouchAllocation(
-
17165  allocator->GetCurrentFrameIndex(),
-
17166  allocation);
-
17167  }
-
17168 #endif
-
17169 
-
17170  return allocator->TouchAllocation(allocation);
-
17171 }
-
17172 
-
17173 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-
17174  VmaAllocator allocator,
-
17175  VmaAllocation allocation,
-
17176  void* pUserData)
-
17177 {
-
17178  VMA_ASSERT(allocator && allocation);
-
17179 
-
17180  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17181 
-
17182  allocation->SetUserData(allocator, pUserData);
-
17183 
-
17184 #if VMA_RECORDING_ENABLED
-
17185  if(allocator->GetRecorder() != VMA_NULL)
-
17186  {
-
17187  allocator->GetRecorder()->RecordSetAllocationUserData(
-
17188  allocator->GetCurrentFrameIndex(),
-
17189  allocation,
-
17190  pUserData);
-
17191  }
-
17192 #endif
-
17193 }
-
17194 
-
17195 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
-
17196  VmaAllocator allocator,
-
17197  VmaAllocation* pAllocation)
-
17198 {
-
17199  VMA_ASSERT(allocator && pAllocation);
+
17159 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
+
17160  VmaAllocator allocator,
+
17161  VmaAllocation allocation)
+
17162 {
+
17163  VMA_ASSERT(allocator && allocation);
+
17164 
+
17165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17166 
+
17167 #if VMA_RECORDING_ENABLED
+
17168  if(allocator->GetRecorder() != VMA_NULL)
+
17169  {
+
17170  allocator->GetRecorder()->RecordTouchAllocation(
+
17171  allocator->GetCurrentFrameIndex(),
+
17172  allocation);
+
17173  }
+
17174 #endif
+
17175 
+
17176  return allocator->TouchAllocation(allocation);
+
17177 }
+
17178 
+
17179 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
+
17180  VmaAllocator allocator,
+
17181  VmaAllocation allocation,
+
17182  void* pUserData)
+
17183 {
+
17184  VMA_ASSERT(allocator && allocation);
+
17185 
+
17186  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17187 
+
17188  allocation->SetUserData(allocator, pUserData);
+
17189 
+
17190 #if VMA_RECORDING_ENABLED
+
17191  if(allocator->GetRecorder() != VMA_NULL)
+
17192  {
+
17193  allocator->GetRecorder()->RecordSetAllocationUserData(
+
17194  allocator->GetCurrentFrameIndex(),
+
17195  allocation,
+
17196  pUserData);
+
17197  }
+
17198 #endif
+
17199 }
17200 
-
17201  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-
17202 
-
17203  allocator->CreateLostAllocation(pAllocation);
-
17204 
-
17205 #if VMA_RECORDING_ENABLED
-
17206  if(allocator->GetRecorder() != VMA_NULL)
-
17207  {
-
17208  allocator->GetRecorder()->RecordCreateLostAllocation(
-
17209  allocator->GetCurrentFrameIndex(),
-
17210  *pAllocation);
-
17211  }
-
17212 #endif
-
17213 }
-
17214 
-
17215 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-
17216  VmaAllocator allocator,
-
17217  VmaAllocation allocation,
-
17218  void** ppData)
-
17219 {
-
17220  VMA_ASSERT(allocator && allocation && ppData);
-
17221 
-
17222  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17223 
-
17224  VkResult res = allocator->Map(allocation, ppData);
-
17225 
-
17226 #if VMA_RECORDING_ENABLED
-
17227  if(allocator->GetRecorder() != VMA_NULL)
-
17228  {
-
17229  allocator->GetRecorder()->RecordMapMemory(
-
17230  allocator->GetCurrentFrameIndex(),
-
17231  allocation);
-
17232  }
-
17233 #endif
-
17234 
-
17235  return res;
-
17236 }
-
17237 
-
17238 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-
17239  VmaAllocator allocator,
-
17240  VmaAllocation allocation)
-
17241 {
-
17242  VMA_ASSERT(allocator && allocation);
+
17201 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
+
17202  VmaAllocator allocator,
+
17203  VmaAllocation* pAllocation)
+
17204 {
+
17205  VMA_ASSERT(allocator && pAllocation);
+
17206 
+
17207  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+
17208 
+
17209  allocator->CreateLostAllocation(pAllocation);
+
17210 
+
17211 #if VMA_RECORDING_ENABLED
+
17212  if(allocator->GetRecorder() != VMA_NULL)
+
17213  {
+
17214  allocator->GetRecorder()->RecordCreateLostAllocation(
+
17215  allocator->GetCurrentFrameIndex(),
+
17216  *pAllocation);
+
17217  }
+
17218 #endif
+
17219 }
+
17220 
+
17221 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
+
17222  VmaAllocator allocator,
+
17223  VmaAllocation allocation,
+
17224  void** ppData)
+
17225 {
+
17226  VMA_ASSERT(allocator && allocation && ppData);
+
17227 
+
17228  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17229 
+
17230  VkResult res = allocator->Map(allocation, ppData);
+
17231 
+
17232 #if VMA_RECORDING_ENABLED
+
17233  if(allocator->GetRecorder() != VMA_NULL)
+
17234  {
+
17235  allocator->GetRecorder()->RecordMapMemory(
+
17236  allocator->GetCurrentFrameIndex(),
+
17237  allocation);
+
17238  }
+
17239 #endif
+
17240 
+
17241  return res;
+
17242 }
17243 
-
17244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17245 
-
17246 #if VMA_RECORDING_ENABLED
-
17247  if(allocator->GetRecorder() != VMA_NULL)
-
17248  {
-
17249  allocator->GetRecorder()->RecordUnmapMemory(
-
17250  allocator->GetCurrentFrameIndex(),
-
17251  allocation);
-
17252  }
-
17253 #endif
-
17254 
-
17255  allocator->Unmap(allocation);
-
17256 }
-
17257 
-
17258 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
17259 {
-
17260  VMA_ASSERT(allocator && allocation);
-
17261 
-
17262  VMA_DEBUG_LOG("vmaFlushAllocation");
+
17244 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
+
17245  VmaAllocator allocator,
+
17246  VmaAllocation allocation)
+
17247 {
+
17248  VMA_ASSERT(allocator && allocation);
+
17249 
+
17250  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17251 
+
17252 #if VMA_RECORDING_ENABLED
+
17253  if(allocator->GetRecorder() != VMA_NULL)
+
17254  {
+
17255  allocator->GetRecorder()->RecordUnmapMemory(
+
17256  allocator->GetCurrentFrameIndex(),
+
17257  allocation);
+
17258  }
+
17259 #endif
+
17260 
+
17261  allocator->Unmap(allocation);
+
17262 }
17263 
-
17264  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17265 
-
17266  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+
17264 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
17265 {
+
17266  VMA_ASSERT(allocator && allocation);
17267 
-
17268 #if VMA_RECORDING_ENABLED
-
17269  if(allocator->GetRecorder() != VMA_NULL)
-
17270  {
-
17271  allocator->GetRecorder()->RecordFlushAllocation(
-
17272  allocator->GetCurrentFrameIndex(),
-
17273  allocation, offset, size);
-
17274  }
-
17275 #endif
-
17276 }
-
17277 
-
17278 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
-
17279 {
-
17280  VMA_ASSERT(allocator && allocation);
-
17281 
-
17282  VMA_DEBUG_LOG("vmaInvalidateAllocation");
+
17268  VMA_DEBUG_LOG("vmaFlushAllocation");
+
17269 
+
17270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17271 
+
17272  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+
17273 
+
17274 #if VMA_RECORDING_ENABLED
+
17275  if(allocator->GetRecorder() != VMA_NULL)
+
17276  {
+
17277  allocator->GetRecorder()->RecordFlushAllocation(
+
17278  allocator->GetCurrentFrameIndex(),
+
17279  allocation, offset, size);
+
17280  }
+
17281 #endif
+
17282 }
17283 
-
17284  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17285 
-
17286  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+
17284 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+
17285 {
+
17286  VMA_ASSERT(allocator && allocation);
17287 
-
17288 #if VMA_RECORDING_ENABLED
-
17289  if(allocator->GetRecorder() != VMA_NULL)
-
17290  {
-
17291  allocator->GetRecorder()->RecordInvalidateAllocation(
-
17292  allocator->GetCurrentFrameIndex(),
-
17293  allocation, offset, size);
-
17294  }
-
17295 #endif
-
17296 }
-
17297 
-
17298 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
-
17299 {
-
17300  VMA_ASSERT(allocator);
-
17301 
-
17302  VMA_DEBUG_LOG("vmaCheckCorruption");
+
17288  VMA_DEBUG_LOG("vmaInvalidateAllocation");
+
17289 
+
17290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17291 
+
17292  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+
17293 
+
17294 #if VMA_RECORDING_ENABLED
+
17295  if(allocator->GetRecorder() != VMA_NULL)
+
17296  {
+
17297  allocator->GetRecorder()->RecordInvalidateAllocation(
+
17298  allocator->GetCurrentFrameIndex(),
+
17299  allocation, offset, size);
+
17300  }
+
17301 #endif
+
17302 }
17303 
-
17304  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17305 
-
17306  return allocator->CheckCorruption(memoryTypeBits);
-
17307 }
-
17308 
-
17309 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
-
17310  VmaAllocator allocator,
-
17311  VmaAllocation* pAllocations,
-
17312  size_t allocationCount,
-
17313  VkBool32* pAllocationsChanged,
-
17314  const VmaDefragmentationInfo *pDefragmentationInfo,
-
17315  VmaDefragmentationStats* pDefragmentationStats)
-
17316 {
-
17317  // Deprecated interface, reimplemented using new one.
-
17318 
-
17319  VmaDefragmentationInfo2 info2 = {};
-
17320  info2.allocationCount = (uint32_t)allocationCount;
-
17321  info2.pAllocations = pAllocations;
-
17322  info2.pAllocationsChanged = pAllocationsChanged;
-
17323  if(pDefragmentationInfo != VMA_NULL)
-
17324  {
-
17325  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
-
17326  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
-
17327  }
-
17328  else
-
17329  {
-
17330  info2.maxCpuAllocationsToMove = UINT32_MAX;
-
17331  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
-
17332  }
-
17333  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
-
17334 
- -
17336  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
-
17337  if(res == VK_NOT_READY)
-
17338  {
-
17339  res = vmaDefragmentationEnd( allocator, ctx);
-
17340  }
-
17341  return res;
-
17342 }
-
17343 
-
17344 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
-
17345  VmaAllocator allocator,
-
17346  const VmaDefragmentationInfo2* pInfo,
-
17347  VmaDefragmentationStats* pStats,
-
17348  VmaDefragmentationContext *pContext)
-
17349 {
-
17350  VMA_ASSERT(allocator && pInfo && pContext);
-
17351 
-
17352  // Degenerate case: Nothing to defragment.
-
17353  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
-
17354  {
-
17355  return VK_SUCCESS;
-
17356  }
+
17304 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
+
17305 {
+
17306  VMA_ASSERT(allocator);
+
17307 
+
17308  VMA_DEBUG_LOG("vmaCheckCorruption");
+
17309 
+
17310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17311 
+
17312  return allocator->CheckCorruption(memoryTypeBits);
+
17313 }
+
17314 
+
17315 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
+
17316  VmaAllocator allocator,
+
17317  VmaAllocation* pAllocations,
+
17318  size_t allocationCount,
+
17319  VkBool32* pAllocationsChanged,
+
17320  const VmaDefragmentationInfo *pDefragmentationInfo,
+
17321  VmaDefragmentationStats* pDefragmentationStats)
+
17322 {
+
17323  // Deprecated interface, reimplemented using new one.
+
17324 
+
17325  VmaDefragmentationInfo2 info2 = {};
+
17326  info2.allocationCount = (uint32_t)allocationCount;
+
17327  info2.pAllocations = pAllocations;
+
17328  info2.pAllocationsChanged = pAllocationsChanged;
+
17329  if(pDefragmentationInfo != VMA_NULL)
+
17330  {
+
17331  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
+
17332  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
+
17333  }
+
17334  else
+
17335  {
+
17336  info2.maxCpuAllocationsToMove = UINT32_MAX;
+
17337  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
+
17338  }
+
17339  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
+
17340 
+ +
17342  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
+
17343  if(res == VK_NOT_READY)
+
17344  {
+
17345  res = vmaDefragmentationEnd( allocator, ctx);
+
17346  }
+
17347  return res;
+
17348 }
+
17349 
+
17350 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
+
17351  VmaAllocator allocator,
+
17352  const VmaDefragmentationInfo2* pInfo,
+
17353  VmaDefragmentationStats* pStats,
+
17354  VmaDefragmentationContext *pContext)
+
17355 {
+
17356  VMA_ASSERT(allocator && pInfo && pContext);
17357 
-
17358  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
-
17359  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
-
17360  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
-
17361  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
-
17362 
-
17363  VMA_DEBUG_LOG("vmaDefragmentationBegin");
-
17364 
-
17365  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17366 
-
17367  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
17358  // Degenerate case: Nothing to defragment.
+
17359  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
+
17360  {
+
17361  return VK_SUCCESS;
+
17362  }
+
17363 
+
17364  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
+
17365  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
+
17366  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
+
17367  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17368 
-
17369 #if VMA_RECORDING_ENABLED
-
17370  if(allocator->GetRecorder() != VMA_NULL)
-
17371  {
-
17372  allocator->GetRecorder()->RecordDefragmentationBegin(
-
17373  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
-
17374  }
-
17375 #endif
-
17376 
-
17377  return res;
-
17378 }
-
17379 
-
17380 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
-
17381  VmaAllocator allocator,
-
17382  VmaDefragmentationContext context)
-
17383 {
-
17384  VMA_ASSERT(allocator);
+
17369  VMA_DEBUG_LOG("vmaDefragmentationBegin");
+
17370 
+
17371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17372 
+
17373  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
17374 
+
17375 #if VMA_RECORDING_ENABLED
+
17376  if(allocator->GetRecorder() != VMA_NULL)
+
17377  {
+
17378  allocator->GetRecorder()->RecordDefragmentationBegin(
+
17379  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
+
17380  }
+
17381 #endif
+
17382 
+
17383  return res;
+
17384 }
17385 
-
17386  VMA_DEBUG_LOG("vmaDefragmentationEnd");
-
17387 
-
17388  if(context != VK_NULL_HANDLE)
-
17389  {
-
17390  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17386 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
+
17387  VmaAllocator allocator,
+
17388  VmaDefragmentationContext context)
+
17389 {
+
17390  VMA_ASSERT(allocator);
17391 
-
17392 #if VMA_RECORDING_ENABLED
-
17393  if(allocator->GetRecorder() != VMA_NULL)
-
17394  {
-
17395  allocator->GetRecorder()->RecordDefragmentationEnd(
-
17396  allocator->GetCurrentFrameIndex(), context);
-
17397  }
-
17398 #endif
-
17399 
-
17400  return allocator->DefragmentationEnd(context);
-
17401  }
-
17402  else
-
17403  {
-
17404  return VK_SUCCESS;
-
17405  }
-
17406 }
-
17407 
-
17408 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-
17409  VmaAllocator allocator,
-
17410  VmaAllocation allocation,
-
17411  VkBuffer buffer)
-
17412 {
-
17413  VMA_ASSERT(allocator && allocation && buffer);
-
17414 
-
17415  VMA_DEBUG_LOG("vmaBindBufferMemory");
-
17416 
-
17417  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17418 
-
17419  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
-
17420 }
-
17421 
-
17422 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-
17423  VmaAllocator allocator,
-
17424  VmaAllocation allocation,
-
17425  VkDeviceSize allocationLocalOffset,
-
17426  VkBuffer buffer,
-
17427  const void* pNext)
-
17428 {
-
17429  VMA_ASSERT(allocator && allocation && buffer);
-
17430 
-
17431  VMA_DEBUG_LOG("vmaBindBufferMemory2");
-
17432 
-
17433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17434 
-
17435  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
-
17436 }
-
17437 
-
17438 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-
17439  VmaAllocator allocator,
-
17440  VmaAllocation allocation,
-
17441  VkImage image)
-
17442 {
-
17443  VMA_ASSERT(allocator && allocation && image);
-
17444 
-
17445  VMA_DEBUG_LOG("vmaBindImageMemory");
-
17446 
-
17447  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17448 
-
17449  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
-
17450 }
-
17451 
-
17452 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-
17453  VmaAllocator allocator,
-
17454  VmaAllocation allocation,
-
17455  VkDeviceSize allocationLocalOffset,
-
17456  VkImage image,
-
17457  const void* pNext)
-
17458 {
-
17459  VMA_ASSERT(allocator && allocation && image);
-
17460 
-
17461  VMA_DEBUG_LOG("vmaBindImageMemory2");
-
17462 
-
17463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17464 
-
17465  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
-
17466 }
-
17467 
-
17468 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-
17469  VmaAllocator allocator,
-
17470  const VkBufferCreateInfo* pBufferCreateInfo,
-
17471  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
17472  VkBuffer* pBuffer,
-
17473  VmaAllocation* pAllocation,
-
17474  VmaAllocationInfo* pAllocationInfo)
-
17475 {
-
17476  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
-
17477 
-
17478  if(pBufferCreateInfo->size == 0)
-
17479  {
-
17480  return VK_ERROR_VALIDATION_FAILED_EXT;
-
17481  }
-
17482 
-
17483  VMA_DEBUG_LOG("vmaCreateBuffer");
-
17484 
-
17485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17486 
-
17487  *pBuffer = VK_NULL_HANDLE;
-
17488  *pAllocation = VK_NULL_HANDLE;
-
17489 
-
17490  // 1. Create VkBuffer.
-
17491  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-
17492  allocator->m_hDevice,
-
17493  pBufferCreateInfo,
-
17494  allocator->GetAllocationCallbacks(),
-
17495  pBuffer);
-
17496  if(res >= 0)
-
17497  {
-
17498  // 2. vkGetBufferMemoryRequirements.
-
17499  VkMemoryRequirements vkMemReq = {};
-
17500  bool requiresDedicatedAllocation = false;
-
17501  bool prefersDedicatedAllocation = false;
-
17502  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-
17503  requiresDedicatedAllocation, prefersDedicatedAllocation);
-
17504 
-
17505  // Make sure alignment requirements for specific buffer usages reported
-
17506  // in Physical Device Properties are included in alignment reported by memory requirements.
-
17507  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
-
17508  {
-
17509  VMA_ASSERT(vkMemReq.alignment %
-
17510  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
-
17511  }
-
17512  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
-
17513  {
-
17514  VMA_ASSERT(vkMemReq.alignment %
-
17515  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
-
17516  }
-
17517  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
-
17518  {
-
17519  VMA_ASSERT(vkMemReq.alignment %
-
17520  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
-
17521  }
-
17522 
-
17523  // 3. Allocate memory using allocator.
-
17524  res = allocator->AllocateMemory(
-
17525  vkMemReq,
-
17526  requiresDedicatedAllocation,
-
17527  prefersDedicatedAllocation,
-
17528  *pBuffer, // dedicatedBuffer
-
17529  VK_NULL_HANDLE, // dedicatedImage
-
17530  *pAllocationCreateInfo,
-
17531  VMA_SUBALLOCATION_TYPE_BUFFER,
-
17532  1, // allocationCount
-
17533  pAllocation);
-
17534 
-
17535 #if VMA_RECORDING_ENABLED
-
17536  if(allocator->GetRecorder() != VMA_NULL)
-
17537  {
-
17538  allocator->GetRecorder()->RecordCreateBuffer(
-
17539  allocator->GetCurrentFrameIndex(),
-
17540  *pBufferCreateInfo,
-
17541  *pAllocationCreateInfo,
-
17542  *pAllocation);
-
17543  }
-
17544 #endif
-
17545 
-
17546  if(res >= 0)
-
17547  {
-
17548  // 3. Bind buffer with memory.
-
17549  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-
17550  {
-
17551  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-
17552  }
-
17553  if(res >= 0)
-
17554  {
-
17555  // All steps succeeded.
-
17556  #if VMA_STATS_STRING_ENABLED
-
17557  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-
17558  #endif
-
17559  if(pAllocationInfo != VMA_NULL)
-
17560  {
-
17561  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
17562  }
-
17563 
-
17564  return VK_SUCCESS;
-
17565  }
-
17566  allocator->FreeMemory(
-
17567  1, // allocationCount
-
17568  pAllocation);
-
17569  *pAllocation = VK_NULL_HANDLE;
-
17570  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-
17571  *pBuffer = VK_NULL_HANDLE;
-
17572  return res;
-
17573  }
-
17574  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-
17575  *pBuffer = VK_NULL_HANDLE;
-
17576  return res;
-
17577  }
-
17578  return res;
-
17579 }
-
17580 
-
17581 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-
17582  VmaAllocator allocator,
-
17583  VkBuffer buffer,
-
17584  VmaAllocation allocation)
-
17585 {
-
17586  VMA_ASSERT(allocator);
-
17587 
-
17588  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
17589  {
-
17590  return;
-
17591  }
-
17592 
-
17593  VMA_DEBUG_LOG("vmaDestroyBuffer");
-
17594 
-
17595  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17596 
-
17597 #if VMA_RECORDING_ENABLED
-
17598  if(allocator->GetRecorder() != VMA_NULL)
-
17599  {
-
17600  allocator->GetRecorder()->RecordDestroyBuffer(
-
17601  allocator->GetCurrentFrameIndex(),
-
17602  allocation);
-
17603  }
-
17604 #endif
-
17605 
-
17606  if(buffer != VK_NULL_HANDLE)
-
17607  {
-
17608  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+
17392  VMA_DEBUG_LOG("vmaDefragmentationEnd");
+
17393 
+
17394  if(context != VK_NULL_HANDLE)
+
17395  {
+
17396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17397 
+
17398 #if VMA_RECORDING_ENABLED
+
17399  if(allocator->GetRecorder() != VMA_NULL)
+
17400  {
+
17401  allocator->GetRecorder()->RecordDefragmentationEnd(
+
17402  allocator->GetCurrentFrameIndex(), context);
+
17403  }
+
17404 #endif
+
17405 
+
17406  return allocator->DefragmentationEnd(context);
+
17407  }
+
17408  else
+
17409  {
+
17410  return VK_SUCCESS;
+
17411  }
+
17412 }
+
17413 
+
17414 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
+
17415  VmaAllocator allocator,
+
17416  VmaAllocation allocation,
+
17417  VkBuffer buffer)
+
17418 {
+
17419  VMA_ASSERT(allocator && allocation && buffer);
+
17420 
+
17421  VMA_DEBUG_LOG("vmaBindBufferMemory");
+
17422 
+
17423  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17424 
+
17425  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
+
17426 }
+
17427 
+
17428 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
+
17429  VmaAllocator allocator,
+
17430  VmaAllocation allocation,
+
17431  VkDeviceSize allocationLocalOffset,
+
17432  VkBuffer buffer,
+
17433  const void* pNext)
+
17434 {
+
17435  VMA_ASSERT(allocator && allocation && buffer);
+
17436 
+
17437  VMA_DEBUG_LOG("vmaBindBufferMemory2");
+
17438 
+
17439  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17440 
+
17441  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
+
17442 }
+
17443 
+
17444 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
+
17445  VmaAllocator allocator,
+
17446  VmaAllocation allocation,
+
17447  VkImage image)
+
17448 {
+
17449  VMA_ASSERT(allocator && allocation && image);
+
17450 
+
17451  VMA_DEBUG_LOG("vmaBindImageMemory");
+
17452 
+
17453  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17454 
+
17455  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
+
17456 }
+
17457 
+
17458 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
+
17459  VmaAllocator allocator,
+
17460  VmaAllocation allocation,
+
17461  VkDeviceSize allocationLocalOffset,
+
17462  VkImage image,
+
17463  const void* pNext)
+
17464 {
+
17465  VMA_ASSERT(allocator && allocation && image);
+
17466 
+
17467  VMA_DEBUG_LOG("vmaBindImageMemory2");
+
17468 
+
17469  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17470 
+
17471  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
+
17472 }
+
17473 
+
17474 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
+
17475  VmaAllocator allocator,
+
17476  const VkBufferCreateInfo* pBufferCreateInfo,
+
17477  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
17478  VkBuffer* pBuffer,
+
17479  VmaAllocation* pAllocation,
+
17480  VmaAllocationInfo* pAllocationInfo)
+
17481 {
+
17482  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
+
17483 
+
17484  if(pBufferCreateInfo->size == 0)
+
17485  {
+
17486  return VK_ERROR_VALIDATION_FAILED_EXT;
+
17487  }
+
17488 
+
17489  VMA_DEBUG_LOG("vmaCreateBuffer");
+
17490 
+
17491  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17492 
+
17493  *pBuffer = VK_NULL_HANDLE;
+
17494  *pAllocation = VK_NULL_HANDLE;
+
17495 
+
17496  // 1. Create VkBuffer.
+
17497  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+
17498  allocator->m_hDevice,
+
17499  pBufferCreateInfo,
+
17500  allocator->GetAllocationCallbacks(),
+
17501  pBuffer);
+
17502  if(res >= 0)
+
17503  {
+
17504  // 2. vkGetBufferMemoryRequirements.
+
17505  VkMemoryRequirements vkMemReq = {};
+
17506  bool requiresDedicatedAllocation = false;
+
17507  bool prefersDedicatedAllocation = false;
+
17508  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+
17509  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
17510 
+
17511  // Make sure alignment requirements for specific buffer usages reported
+
17512  // in Physical Device Properties are included in alignment reported by memory requirements.
+
17513  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
+
17514  {
+
17515  VMA_ASSERT(vkMemReq.alignment %
+
17516  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
+
17517  }
+
17518  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
+
17519  {
+
17520  VMA_ASSERT(vkMemReq.alignment %
+
17521  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
+
17522  }
+
17523  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
+
17524  {
+
17525  VMA_ASSERT(vkMemReq.alignment %
+
17526  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
+
17527  }
+
17528 
+
17529  // 3. Allocate memory using allocator.
+
17530  res = allocator->AllocateMemory(
+
17531  vkMemReq,
+
17532  requiresDedicatedAllocation,
+
17533  prefersDedicatedAllocation,
+
17534  *pBuffer, // dedicatedBuffer
+
17535  VK_NULL_HANDLE, // dedicatedImage
+
17536  *pAllocationCreateInfo,
+
17537  VMA_SUBALLOCATION_TYPE_BUFFER,
+
17538  1, // allocationCount
+
17539  pAllocation);
+
17540 
+
17541 #if VMA_RECORDING_ENABLED
+
17542  if(allocator->GetRecorder() != VMA_NULL)
+
17543  {
+
17544  allocator->GetRecorder()->RecordCreateBuffer(
+
17545  allocator->GetCurrentFrameIndex(),
+
17546  *pBufferCreateInfo,
+
17547  *pAllocationCreateInfo,
+
17548  *pAllocation);
+
17549  }
+
17550 #endif
+
17551 
+
17552  if(res >= 0)
+
17553  {
+
17554  // 3. Bind buffer with memory.
+
17555  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+
17556  {
+
17557  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
+
17558  }
+
17559  if(res >= 0)
+
17560  {
+
17561  // All steps succeeded.
+
17562  #if VMA_STATS_STRING_ENABLED
+
17563  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+
17564  #endif
+
17565  if(pAllocationInfo != VMA_NULL)
+
17566  {
+
17567  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
17568  }
+
17569 
+
17570  return VK_SUCCESS;
+
17571  }
+
17572  allocator->FreeMemory(
+
17573  1, // allocationCount
+
17574  pAllocation);
+
17575  *pAllocation = VK_NULL_HANDLE;
+
17576  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+
17577  *pBuffer = VK_NULL_HANDLE;
+
17578  return res;
+
17579  }
+
17580  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+
17581  *pBuffer = VK_NULL_HANDLE;
+
17582  return res;
+
17583  }
+
17584  return res;
+
17585 }
+
17586 
+
17587 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
+
17588  VmaAllocator allocator,
+
17589  VkBuffer buffer,
+
17590  VmaAllocation allocation)
+
17591 {
+
17592  VMA_ASSERT(allocator);
+
17593 
+
17594  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
17595  {
+
17596  return;
+
17597  }
+
17598 
+
17599  VMA_DEBUG_LOG("vmaDestroyBuffer");
+
17600 
+
17601  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17602 
+
17603 #if VMA_RECORDING_ENABLED
+
17604  if(allocator->GetRecorder() != VMA_NULL)
+
17605  {
+
17606  allocator->GetRecorder()->RecordDestroyBuffer(
+
17607  allocator->GetCurrentFrameIndex(),
+
17608  allocation);
17609  }
-
17610 
-
17611  if(allocation != VK_NULL_HANDLE)
-
17612  {
-
17613  allocator->FreeMemory(
-
17614  1, // allocationCount
-
17615  &allocation);
-
17616  }
-
17617 }
-
17618 
-
17619 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-
17620  VmaAllocator allocator,
-
17621  const VkImageCreateInfo* pImageCreateInfo,
-
17622  const VmaAllocationCreateInfo* pAllocationCreateInfo,
-
17623  VkImage* pImage,
-
17624  VmaAllocation* pAllocation,
-
17625  VmaAllocationInfo* pAllocationInfo)
-
17626 {
-
17627  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
-
17628 
-
17629  if(pImageCreateInfo->extent.width == 0 ||
-
17630  pImageCreateInfo->extent.height == 0 ||
-
17631  pImageCreateInfo->extent.depth == 0 ||
-
17632  pImageCreateInfo->mipLevels == 0 ||
-
17633  pImageCreateInfo->arrayLayers == 0)
-
17634  {
-
17635  return VK_ERROR_VALIDATION_FAILED_EXT;
-
17636  }
-
17637 
-
17638  VMA_DEBUG_LOG("vmaCreateImage");
-
17639 
-
17640  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17641 
-
17642  *pImage = VK_NULL_HANDLE;
-
17643  *pAllocation = VK_NULL_HANDLE;
-
17644 
-
17645  // 1. Create VkImage.
-
17646  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-
17647  allocator->m_hDevice,
-
17648  pImageCreateInfo,
-
17649  allocator->GetAllocationCallbacks(),
-
17650  pImage);
-
17651  if(res >= 0)
-
17652  {
-
17653  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
-
17654  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
-
17655  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
-
17656 
-
17657  // 2. Allocate memory using allocator.
-
17658  VkMemoryRequirements vkMemReq = {};
-
17659  bool requiresDedicatedAllocation = false;
-
17660  bool prefersDedicatedAllocation = false;
-
17661  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
-
17662  requiresDedicatedAllocation, prefersDedicatedAllocation);
-
17663 
-
17664  res = allocator->AllocateMemory(
-
17665  vkMemReq,
-
17666  requiresDedicatedAllocation,
-
17667  prefersDedicatedAllocation,
-
17668  VK_NULL_HANDLE, // dedicatedBuffer
-
17669  *pImage, // dedicatedImage
-
17670  *pAllocationCreateInfo,
-
17671  suballocType,
-
17672  1, // allocationCount
-
17673  pAllocation);
-
17674 
-
17675 #if VMA_RECORDING_ENABLED
-
17676  if(allocator->GetRecorder() != VMA_NULL)
-
17677  {
-
17678  allocator->GetRecorder()->RecordCreateImage(
-
17679  allocator->GetCurrentFrameIndex(),
-
17680  *pImageCreateInfo,
-
17681  *pAllocationCreateInfo,
-
17682  *pAllocation);
-
17683  }
-
17684 #endif
-
17685 
-
17686  if(res >= 0)
-
17687  {
-
17688  // 3. Bind image with memory.
-
17689  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-
17690  {
-
17691  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
-
17692  }
-
17693  if(res >= 0)
-
17694  {
-
17695  // All steps succeeded.
-
17696  #if VMA_STATS_STRING_ENABLED
-
17697  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
-
17698  #endif
-
17699  if(pAllocationInfo != VMA_NULL)
-
17700  {
-
17701  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-
17702  }
-
17703 
-
17704  return VK_SUCCESS;
-
17705  }
-
17706  allocator->FreeMemory(
-
17707  1, // allocationCount
-
17708  pAllocation);
-
17709  *pAllocation = VK_NULL_HANDLE;
-
17710  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-
17711  *pImage = VK_NULL_HANDLE;
-
17712  return res;
-
17713  }
-
17714  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-
17715  *pImage = VK_NULL_HANDLE;
-
17716  return res;
-
17717  }
-
17718  return res;
-
17719 }
-
17720 
-
17721 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-
17722  VmaAllocator allocator,
-
17723  VkImage image,
-
17724  VmaAllocation allocation)
-
17725 {
-
17726  VMA_ASSERT(allocator);
-
17727 
-
17728  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-
17729  {
-
17730  return;
-
17731  }
-
17732 
-
17733  VMA_DEBUG_LOG("vmaDestroyImage");
-
17734 
-
17735  VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
17736 
-
17737 #if VMA_RECORDING_ENABLED
-
17738  if(allocator->GetRecorder() != VMA_NULL)
-
17739  {
-
17740  allocator->GetRecorder()->RecordDestroyImage(
-
17741  allocator->GetCurrentFrameIndex(),
-
17742  allocation);
-
17743  }
-
17744 #endif
-
17745 
-
17746  if(image != VK_NULL_HANDLE)
-
17747  {
-
17748  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
+
17610 #endif
+
17611 
+
17612  if(buffer != VK_NULL_HANDLE)
+
17613  {
+
17614  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+
17615  }
+
17616 
+
17617  if(allocation != VK_NULL_HANDLE)
+
17618  {
+
17619  allocator->FreeMemory(
+
17620  1, // allocationCount
+
17621  &allocation);
+
17622  }
+
17623 }
+
17624 
+
17625 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
+
17626  VmaAllocator allocator,
+
17627  const VkImageCreateInfo* pImageCreateInfo,
+
17628  const VmaAllocationCreateInfo* pAllocationCreateInfo,
+
17629  VkImage* pImage,
+
17630  VmaAllocation* pAllocation,
+
17631  VmaAllocationInfo* pAllocationInfo)
+
17632 {
+
17633  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
+
17634 
+
17635  if(pImageCreateInfo->extent.width == 0 ||
+
17636  pImageCreateInfo->extent.height == 0 ||
+
17637  pImageCreateInfo->extent.depth == 0 ||
+
17638  pImageCreateInfo->mipLevels == 0 ||
+
17639  pImageCreateInfo->arrayLayers == 0)
+
17640  {
+
17641  return VK_ERROR_VALIDATION_FAILED_EXT;
+
17642  }
+
17643 
+
17644  VMA_DEBUG_LOG("vmaCreateImage");
+
17645 
+
17646  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17647 
+
17648  *pImage = VK_NULL_HANDLE;
+
17649  *pAllocation = VK_NULL_HANDLE;
+
17650 
+
17651  // 1. Create VkImage.
+
17652  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+
17653  allocator->m_hDevice,
+
17654  pImageCreateInfo,
+
17655  allocator->GetAllocationCallbacks(),
+
17656  pImage);
+
17657  if(res >= 0)
+
17658  {
+
17659  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
+
17660  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
+
17661  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
+
17662 
+
17663  // 2. Allocate memory using allocator.
+
17664  VkMemoryRequirements vkMemReq = {};
+
17665  bool requiresDedicatedAllocation = false;
+
17666  bool prefersDedicatedAllocation = false;
+
17667  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
+
17668  requiresDedicatedAllocation, prefersDedicatedAllocation);
+
17669 
+
17670  res = allocator->AllocateMemory(
+
17671  vkMemReq,
+
17672  requiresDedicatedAllocation,
+
17673  prefersDedicatedAllocation,
+
17674  VK_NULL_HANDLE, // dedicatedBuffer
+
17675  *pImage, // dedicatedImage
+
17676  *pAllocationCreateInfo,
+
17677  suballocType,
+
17678  1, // allocationCount
+
17679  pAllocation);
+
17680 
+
17681 #if VMA_RECORDING_ENABLED
+
17682  if(allocator->GetRecorder() != VMA_NULL)
+
17683  {
+
17684  allocator->GetRecorder()->RecordCreateImage(
+
17685  allocator->GetCurrentFrameIndex(),
+
17686  *pImageCreateInfo,
+
17687  *pAllocationCreateInfo,
+
17688  *pAllocation);
+
17689  }
+
17690 #endif
+
17691 
+
17692  if(res >= 0)
+
17693  {
+
17694  // 3. Bind image with memory.
+
17695  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+
17696  {
+
17697  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
+
17698  }
+
17699  if(res >= 0)
+
17700  {
+
17701  // All steps succeeded.
+
17702  #if VMA_STATS_STRING_ENABLED
+
17703  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
+
17704  #endif
+
17705  if(pAllocationInfo != VMA_NULL)
+
17706  {
+
17707  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+
17708  }
+
17709 
+
17710  return VK_SUCCESS;
+
17711  }
+
17712  allocator->FreeMemory(
+
17713  1, // allocationCount
+
17714  pAllocation);
+
17715  *pAllocation = VK_NULL_HANDLE;
+
17716  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+
17717  *pImage = VK_NULL_HANDLE;
+
17718  return res;
+
17719  }
+
17720  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+
17721  *pImage = VK_NULL_HANDLE;
+
17722  return res;
+
17723  }
+
17724  return res;
+
17725 }
+
17726 
+
17727 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
+
17728  VmaAllocator allocator,
+
17729  VkImage image,
+
17730  VmaAllocation allocation)
+
17731 {
+
17732  VMA_ASSERT(allocator);
+
17733 
+
17734  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+
17735  {
+
17736  return;
+
17737  }
+
17738 
+
17739  VMA_DEBUG_LOG("vmaDestroyImage");
+
17740 
+
17741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
17742 
+
17743 #if VMA_RECORDING_ENABLED
+
17744  if(allocator->GetRecorder() != VMA_NULL)
+
17745  {
+
17746  allocator->GetRecorder()->RecordDestroyImage(
+
17747  allocator->GetCurrentFrameIndex(),
+
17748  allocation);
17749  }
-
17750  if(allocation != VK_NULL_HANDLE)
-
17751  {
-
17752  allocator->FreeMemory(
-
17753  1, // allocationCount
-
17754  &allocation);
+
17750 #endif
+
17751 
+
17752  if(image != VK_NULL_HANDLE)
+
17753  {
+
17754  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17755  }
-
17756 }
-
17757 
-
17758 #endif // #ifdef VMA_IMPLEMENTATION
+
17756  if(allocation != VK_NULL_HANDLE)
+
17757  {
+
17758  allocator->FreeMemory(
+
17759  1, // allocationCount
+
17760  &allocation);
+
17761  }
+
17762 }
+
17763 
+
17764 #endif // #ifdef VMA_IMPLEMENTATION
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1979
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1937
diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index ee3f8c8..e9872d5 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -32,8 +32,8 @@ static const char* const SHADER_PATH1 = "./"; static const char* const SHADER_PATH2 = "../bin/"; static const wchar_t* const WINDOW_CLASS_NAME = L"VULKAN_MEMORY_ALLOCATOR_SAMPLE"; static const char* const VALIDATION_LAYER_NAME = "VK_LAYER_LUNARG_standard_validation"; -static const char* const APP_TITLE_A = "Vulkan Memory Allocator Sample 2.3.0-alpha.1"; -static const wchar_t* const APP_TITLE_W = L"Vulkan Memory Allocator Sample 2.3.0-alpha.1"; +static const char* const APP_TITLE_A = "Vulkan Memory Allocator Sample 2.3.0"; +static const wchar_t* const APP_TITLE_W = L"Vulkan Memory Allocator Sample 2.3.0"; static const bool VSYNC = true; static const uint32_t COMMAND_BUFFER_COUNT = 2; diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 0a1bd23..32258b4 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -29,7 +29,7 @@ extern "C" { /** \mainpage Vulkan Memory Allocator -Version 2.3.0-alpha.1 (2019-11-25) +Version 2.3.0 (2019-12-04) Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT