Add VulkanMemoryAllocator
The Vulkan backend of QRhi relies on vk_mem_alloc.h from AMD in order to get a stable, performant, and tested GPU memory allocator. It is not unthinkable that we will move away from this in the future, especially considering that a potential future D3D12 backend may need a similar solution, but until then this will do. Change-Id: I198a898f216d0795b4bf339ccea80b0cd2efbabc Reviewed-by: Lars Knoll <lars.knoll@qt.io>
This commit is contained in:
parent
53599592e0
commit
7f7eaf1cad
1
src/3rdparty/VulkanMemoryAllocator.pri
vendored
Normal file
1
src/3rdparty/VulkanMemoryAllocator.pri
vendored
Normal file
@ -0,0 +1 @@
|
||||
INCLUDEPATH += $$PWD/VulkanMemoryAllocator
|
19
src/3rdparty/VulkanMemoryAllocator/LICENSE.txt
vendored
Normal file
19
src/3rdparty/VulkanMemoryAllocator/LICENSE.txt
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
402
src/3rdparty/VulkanMemoryAllocator/patches/0001-Avoid-compiler-warnings.patch
vendored
Normal file
402
src/3rdparty/VulkanMemoryAllocator/patches/0001-Avoid-compiler-warnings.patch
vendored
Normal file
@ -0,0 +1,402 @@
|
||||
diff --git a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
index a2f7a1b..fbe6f9e 100644
|
||||
--- a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
+++ b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
@@ -3661,7 +3661,7 @@ static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
|
||||
{
|
||||
uint32_t* pDst = (uint32_t*)((char*)pData + offset);
|
||||
const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
|
||||
- for(size_t i = 0; i < numberCount; ++i, ++pDst)
|
||||
+ for(size_t i = 0; i != numberCount; ++i, ++pDst)
|
||||
{
|
||||
*pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
|
||||
}
|
||||
@@ -3671,7 +3671,7 @@ static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
|
||||
{
|
||||
const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
|
||||
const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
|
||||
- for(size_t i = 0; i < numberCount; ++i, ++pSrc)
|
||||
+ for(size_t i = 0; i != numberCount; ++i, ++pSrc)
|
||||
{
|
||||
if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
|
||||
{
|
||||
@@ -3866,7 +3866,7 @@ public:
|
||||
template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
|
||||
|
||||
T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
|
||||
- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
|
||||
+ void deallocate(T* p, size_t /*n*/) { VmaFree(m_pCallbacks, p); }
|
||||
|
||||
template<typename U>
|
||||
bool operator==(const VmaStlAllocator<U>& rhs) const
|
||||
@@ -5214,7 +5214,7 @@ public:
|
||||
virtual void FreeAtOffset(VkDeviceSize offset) = 0;
|
||||
|
||||
// Tries to resize (grow or shrink) space for given allocation, in place.
|
||||
- virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
|
||||
+ virtual bool ResizeAllocation(const VmaAllocation /*alloc*/, VkDeviceSize /*newSize*/) { return false; }
|
||||
|
||||
protected:
|
||||
const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
|
||||
@@ -5574,7 +5574,7 @@ public:
|
||||
|
||||
virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
|
||||
|
||||
- virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
|
||||
+ virtual VkResult CheckCorruption(const void* /*pBlockData*/) { return VK_ERROR_FEATURE_NOT_PRESENT; }
|
||||
|
||||
virtual void Alloc(
|
||||
const VmaAllocationRequest& request,
|
||||
@@ -6133,7 +6133,7 @@ public:
|
||||
bool overlappingMoveSupported);
|
||||
virtual ~VmaDefragmentationAlgorithm_Fast();
|
||||
|
||||
- virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
|
||||
+ virtual void AddAllocation(VmaAllocation /*hAlloc*/, VkBool32* /*pChanged*/) { ++m_AllocationCount; }
|
||||
virtual void AddAll() { m_AllAllocations = true; }
|
||||
|
||||
virtual VkResult Defragment(
|
||||
@@ -6318,7 +6318,7 @@ private:
|
||||
// Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
|
||||
VmaBlockVector* const m_pBlockVector;
|
||||
const uint32_t m_CurrFrameIndex;
|
||||
- const uint32_t m_AlgorithmFlags;
|
||||
+ /*const uint32_t m_AlgorithmFlags;*/
|
||||
// Owner of this object.
|
||||
VmaDefragmentationAlgorithm* m_pAlgorithm;
|
||||
|
||||
@@ -7073,6 +7073,7 @@ void VmaJsonWriter::BeginValue(bool isString)
|
||||
if(currItem.type == COLLECTION_TYPE_OBJECT &&
|
||||
currItem.valueCount % 2 == 0)
|
||||
{
|
||||
+ (void) isString;
|
||||
VMA_ASSERT(isString);
|
||||
}
|
||||
|
||||
@@ -7660,7 +7661,9 @@ bool VmaBlockMetadata_Generic::Validate() const
|
||||
}
|
||||
|
||||
// Margin required between allocations - every free space must be at least that large.
|
||||
+#if VMA_DEBUG_MARGIN
|
||||
VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
|
||||
+#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -7806,6 +7809,7 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest(
|
||||
{
|
||||
VMA_ASSERT(allocSize > 0);
|
||||
VMA_ASSERT(!upperAddress);
|
||||
+ (void) upperAddress;
|
||||
VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
|
||||
VMA_ASSERT(pAllocationRequest != VMA_NULL);
|
||||
VMA_HEAVY_ASSERT(Validate());
|
||||
@@ -8033,6 +8037,7 @@ void VmaBlockMetadata_Generic::Alloc(
|
||||
VmaAllocation hAllocation)
|
||||
{
|
||||
VMA_ASSERT(!upperAddress);
|
||||
+ (void) upperAddress;
|
||||
VMA_ASSERT(request.item != m_Suballocations.end());
|
||||
VmaSuballocation& suballoc = *request.item;
|
||||
// Given suballocation is a free block.
|
||||
@@ -9609,7 +9614,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
|
||||
bool upperAddress,
|
||||
VmaSuballocationType allocType,
|
||||
bool canMakeOtherLost,
|
||||
- uint32_t strategy,
|
||||
+ uint32_t /*strategy*/,
|
||||
VmaAllocationRequest* pAllocationRequest)
|
||||
{
|
||||
VMA_ASSERT(allocSize > 0);
|
||||
@@ -9651,10 +9656,12 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
|
||||
// Apply VMA_DEBUG_MARGIN at the end.
|
||||
if(VMA_DEBUG_MARGIN > 0)
|
||||
{
|
||||
+#if VMA_DEBUG_MARGIN
|
||||
if(resultOffset < VMA_DEBUG_MARGIN)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
+#endif
|
||||
resultOffset -= VMA_DEBUG_MARGIN;
|
||||
}
|
||||
|
||||
@@ -10542,18 +10549,19 @@ void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
|
||||
#endif // #if VMA_STATS_STRING_ENABLED
|
||||
|
||||
bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
|
||||
- uint32_t currentFrameIndex,
|
||||
- uint32_t frameInUseCount,
|
||||
+ uint32_t /*currentFrameIndex*/,
|
||||
+ uint32_t /*frameInUseCount*/,
|
||||
VkDeviceSize bufferImageGranularity,
|
||||
VkDeviceSize allocSize,
|
||||
VkDeviceSize allocAlignment,
|
||||
bool upperAddress,
|
||||
VmaSuballocationType allocType,
|
||||
- bool canMakeOtherLost,
|
||||
- uint32_t strategy,
|
||||
+ bool /*canMakeOtherLost*/,
|
||||
+ uint32_t /*strategy*/,
|
||||
VmaAllocationRequest* pAllocationRequest)
|
||||
{
|
||||
VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
|
||||
+ (void) upperAddress;
|
||||
|
||||
// Simple way to respect bufferImageGranularity. May be optimized some day.
|
||||
// Whenever it might be an OPTIMAL image...
|
||||
@@ -10593,8 +10601,8 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
|
||||
}
|
||||
|
||||
bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
|
||||
- uint32_t currentFrameIndex,
|
||||
- uint32_t frameInUseCount,
|
||||
+ uint32_t /*currentFrameIndex*/,
|
||||
+ uint32_t /*frameInUseCount*/,
|
||||
VmaAllocationRequest* pAllocationRequest)
|
||||
{
|
||||
/*
|
||||
@@ -10604,7 +10612,7 @@ bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
|
||||
return pAllocationRequest->itemsToMakeLostCount == 0;
|
||||
}
|
||||
|
||||
-uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
|
||||
+uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t /*currentFrameIndex*/, uint32_t /*frameInUseCount*/)
|
||||
{
|
||||
/*
|
||||
Lost allocations are not supported in buddy allocator at the moment.
|
||||
@@ -10615,9 +10623,9 @@ uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex,
|
||||
|
||||
void VmaBlockMetadata_Buddy::Alloc(
|
||||
const VmaAllocationRequest& request,
|
||||
- VmaSuballocationType type,
|
||||
+ VmaSuballocationType /*type*/,
|
||||
VkDeviceSize allocSize,
|
||||
- bool upperAddress,
|
||||
+ bool /*upperAddress*/,
|
||||
VmaAllocation hAllocation)
|
||||
{
|
||||
const uint32_t targetLevel = AllocSizeToLevel(allocSize);
|
||||
@@ -10941,7 +10949,7 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// class VmaDeviceMemoryBlock
|
||||
|
||||
-VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
|
||||
+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator /*hAllocator*/) :
|
||||
m_pMetadata(VMA_NULL),
|
||||
m_MemoryTypeIndex(UINT32_MAX),
|
||||
m_Id(0),
|
||||
@@ -11691,6 +11699,7 @@ VkResult VmaBlockVector::AllocatePage(
|
||||
if(IsCorruptionDetectionEnabled())
|
||||
{
|
||||
VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
|
||||
+ (void) res;
|
||||
VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
@@ -11729,6 +11738,7 @@ void VmaBlockVector::Free(
|
||||
if(IsCorruptionDetectionEnabled())
|
||||
{
|
||||
VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
|
||||
+ (void) res;
|
||||
VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
|
||||
}
|
||||
|
||||
@@ -11894,6 +11904,7 @@ VkResult VmaBlockVector::AllocateFromBlock(
|
||||
if(IsCorruptionDetectionEnabled())
|
||||
{
|
||||
VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
|
||||
+ (void) res;
|
||||
VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
@@ -11903,7 +11914,8 @@ VkResult VmaBlockVector::AllocateFromBlock(
|
||||
|
||||
VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
|
||||
{
|
||||
- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
|
||||
+ VkMemoryAllocateInfo allocInfo = {};
|
||||
+ allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||
allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
|
||||
allocInfo.allocationSize = blockSize;
|
||||
VkDeviceMemory mem = VK_NULL_HANDLE;
|
||||
@@ -11991,7 +12003,8 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu(
|
||||
if(pDefragCtx->res == VK_SUCCESS)
|
||||
{
|
||||
const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
|
||||
- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
|
||||
+ VkMappedMemoryRange memRange = {};
|
||||
+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
|
||||
|
||||
for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
|
||||
{
|
||||
@@ -12076,7 +12089,8 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
|
||||
|
||||
// Go over all blocks. Create and bind buffer for whole block if necessary.
|
||||
{
|
||||
- VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
|
||||
+ VkBufferCreateInfo bufCreateInfo = {};
|
||||
+ bufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||
bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
|
||||
@@ -12101,8 +12115,9 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
|
||||
// Go over all moves. Post data transfer commands to command buffer.
|
||||
if(pDefragCtx->res == VK_SUCCESS)
|
||||
{
|
||||
- const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
|
||||
- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
|
||||
+ /*const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
|
||||
+ VkMappedMemoryRange memRange = {};
|
||||
+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;*/
|
||||
|
||||
for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
|
||||
{
|
||||
@@ -12435,10 +12450,10 @@ VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
|
||||
VmaAllocator hAllocator,
|
||||
VmaBlockVector* pBlockVector,
|
||||
uint32_t currentFrameIndex,
|
||||
- bool overlappingMoveSupported) :
|
||||
+ bool /*overlappingMoveSupported*/) :
|
||||
VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
|
||||
- m_AllAllocations(false),
|
||||
m_AllocationCount(0),
|
||||
+ m_AllAllocations(false),
|
||||
m_BytesMoved(0),
|
||||
m_AllocationsMoved(0),
|
||||
m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
|
||||
@@ -12813,7 +12828,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
|
||||
size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
|
||||
VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
|
||||
VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
|
||||
- VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
|
||||
+ /*VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();*/
|
||||
|
||||
// Same block
|
||||
if(freeSpaceInfoIndex == srcBlockInfoIndex)
|
||||
@@ -13098,7 +13113,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
|
||||
VmaPool hCustomPool,
|
||||
VmaBlockVector* pBlockVector,
|
||||
uint32_t currFrameIndex,
|
||||
- uint32_t algorithmFlags) :
|
||||
+ uint32_t /*algorithmFlags*/) :
|
||||
res(VK_SUCCESS),
|
||||
mutexLocked(false),
|
||||
blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
|
||||
@@ -13106,7 +13121,7 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
|
||||
m_hCustomPool(hCustomPool),
|
||||
m_pBlockVector(pBlockVector),
|
||||
m_CurrFrameIndex(currFrameIndex),
|
||||
- m_AlgorithmFlags(algorithmFlags),
|
||||
+ /*m_AlgorithmFlags(algorithmFlags),*/
|
||||
m_pAlgorithm(VMA_NULL),
|
||||
m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
|
||||
m_AllAllocations(false)
|
||||
@@ -14311,19 +14326,21 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
|
||||
bool map,
|
||||
bool isUserDataString,
|
||||
void* pUserData,
|
||||
- VkBuffer dedicatedBuffer,
|
||||
- VkImage dedicatedImage,
|
||||
+ VkBuffer /*dedicatedBuffer*/,
|
||||
+ VkImage /*dedicatedImage*/,
|
||||
size_t allocationCount,
|
||||
VmaAllocation* pAllocations)
|
||||
{
|
||||
VMA_ASSERT(allocationCount > 0 && pAllocations);
|
||||
|
||||
- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
|
||||
+ VkMemoryAllocateInfo allocInfo = {};
|
||||
+ allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||
allocInfo.memoryTypeIndex = memTypeIndex;
|
||||
allocInfo.allocationSize = size;
|
||||
|
||||
#if VMA_DEDICATED_ALLOCATION
|
||||
- VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
|
||||
+ VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = {};
|
||||
+ dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR;
|
||||
if(m_UseKhrDedicatedAllocation)
|
||||
{
|
||||
if(dedicatedBuffer != VK_NULL_HANDLE)
|
||||
@@ -14341,7 +14358,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
|
||||
#endif // #if VMA_DEDICATED_ALLOCATION
|
||||
|
||||
size_t allocIndex;
|
||||
- VkResult res;
|
||||
+ VkResult res = VK_SUCCESS;
|
||||
for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
|
||||
{
|
||||
res = AllocateDedicatedMemoryPage(
|
||||
@@ -14460,12 +14477,15 @@ void VmaAllocator_T::GetBufferMemoryRequirements(
|
||||
#if VMA_DEDICATED_ALLOCATION
|
||||
if(m_UseKhrDedicatedAllocation)
|
||||
{
|
||||
- VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
|
||||
+ VkBufferMemoryRequirementsInfo2KHR memReqInfo = {};
|
||||
+ memReqInfo.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR;
|
||||
memReqInfo.buffer = hBuffer;
|
||||
|
||||
- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
|
||||
+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = {};
|
||||
+ memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR;
|
||||
|
||||
- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
|
||||
+ VkMemoryRequirements2KHR memReq2 = {};
|
||||
+ memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR;
|
||||
memReq2.pNext = &memDedicatedReq;
|
||||
|
||||
(*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
|
||||
@@ -14492,12 +14512,15 @@ void VmaAllocator_T::GetImageMemoryRequirements(
|
||||
#if VMA_DEDICATED_ALLOCATION
|
||||
if(m_UseKhrDedicatedAllocation)
|
||||
{
|
||||
- VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
|
||||
+ VkImageMemoryRequirementsInfo2KHR memReqInfo = {};
|
||||
+ memReqInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR;
|
||||
memReqInfo.image = hImage;
|
||||
|
||||
- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
|
||||
+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = {};
|
||||
+ memDedicatedReq.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR;
|
||||
|
||||
- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
|
||||
+ VkMemoryRequirements2KHR memReq2 = {};
|
||||
+ memReq2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR;
|
||||
memReq2.pNext = &memDedicatedReq;
|
||||
|
||||
(*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
|
||||
@@ -14734,7 +14757,7 @@ VkResult VmaAllocator_T::ResizeAllocation(
|
||||
}
|
||||
else
|
||||
{
|
||||
- return VK_ERROR_OUT_OF_POOL_MEMORY;
|
||||
+ return VkResult(-1000069000); // VK_ERROR_OUT_OF_POOL_MEMORY
|
||||
}
|
||||
default:
|
||||
VMA_ASSERT(0);
|
||||
@@ -15000,6 +15023,7 @@ void VmaAllocator_T::DestroyPool(VmaPool pool)
|
||||
{
|
||||
VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
|
||||
bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
|
||||
+ (void) success;
|
||||
VMA_ASSERT(success && "Pool not found in Allocator.");
|
||||
}
|
||||
|
||||
@@ -15248,7 +15272,8 @@ void VmaAllocator_T::FlushOrInvalidateAllocation(
|
||||
|
||||
const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
|
||||
|
||||
- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
|
||||
+ VkMappedMemoryRange memRange = {};
|
||||
+ memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
|
||||
memRange.memory = hAllocation->GetMemory();
|
||||
|
||||
switch(hAllocation->GetType())
|
||||
@@ -15321,6 +15346,7 @@ void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
|
||||
AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
|
||||
VMA_ASSERT(pDedicatedAllocations);
|
||||
bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
|
||||
+ (void) success;
|
||||
VMA_ASSERT(success);
|
||||
}
|
||||
|
14
src/3rdparty/VulkanMemoryAllocator/patches/0002-Fix-gcc8-warning.patch
vendored
Normal file
14
src/3rdparty/VulkanMemoryAllocator/patches/0002-Fix-gcc8-warning.patch
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
diff --git a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
index fbe6f9e3e8..f043bdc289 100644
|
||||
--- a/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
+++ b/src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
|
||||
@@ -12074,7 +12074,8 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
|
||||
const size_t blockCount = m_Blocks.size();
|
||||
|
||||
pDefragCtx->blockContexts.resize(blockCount);
|
||||
- memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
|
||||
+ for (size_t i = 0; i < blockCount; ++i)
|
||||
+ pDefragCtx->blockContexts[i] = VmaBlockDefragmentationContext();
|
||||
|
||||
// Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
|
||||
const size_t moveCount = moves.size();
|
16
src/3rdparty/VulkanMemoryAllocator/qt_attribution.json
vendored
Normal file
16
src/3rdparty/VulkanMemoryAllocator/qt_attribution.json
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
[
|
||||
{
|
||||
"Id": "VulkanMemoryAllocator",
|
||||
"Name": "Vulkan Memory Allocator",
|
||||
"QDocModule": "qtrhi",
|
||||
"Description": "Vulkan Memory Allocator",
|
||||
"QtUsage": "Memory management for the Vulkan backend of QRhi.",
|
||||
|
||||
"Homepage": "https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator",
|
||||
"Version": "2.2.0",
|
||||
"License": "MIT License",
|
||||
"LicenseId": "MIT",
|
||||
"LicenseFile": "LICENSE.txt",
|
||||
"Copyright": "Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved."
|
||||
}
|
||||
]
|
16790
src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
vendored
Normal file
16790
src/3rdparty/VulkanMemoryAllocator/vk_mem_alloc.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user