diff --git a/docs/html/allocation_annotation.html b/docs/html/allocation_annotation.html index 237c388..6ba3888 100644 --- a/docs/html/allocation_annotation.html +++ b/docs/html/allocation_annotation.html @@ -78,30 +78,30 @@ Allocation user data
MyBufferMetadata* pMetadata = CreateBufferMetadata();
VmaAllocationCreateInfo allocCreateInfo = {};
-
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
allocCreateInfo.pUserData = pMetadata;
VkBuffer buffer;
VmaAllocation allocation;
-
vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
-
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:1014
-
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1053
-
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1022
+
vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
+
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
+
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:466
+
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:1134
+
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1173
+
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1142
VmaAllocation
Represents single memory allocation.
-
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:434
-
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)

The pointer may be later retrieved as VmaAllocationInfo::pUserData:

VmaAllocationInfo allocInfo;
-
vmaGetAllocationInfo(allocator, allocation, &allocInfo);
+
vmaGetAllocationInfo(allocator, allocation, &allocInfo);
MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1142
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1189
-
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
-

It can also be changed using function vmaSetAllocationUserData().

-

Values of (non-zero) allocations' pUserData are printed in JSON report created by vmaBuildStatsString(), in hexadecimal form.

+
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
+
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1276
+
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1323
+

It can also be changed using function vmaSetAllocationUserData().

+

Values of (non-zero) allocations' pUserData are printed in JSON report created by vmaBuildStatsString(), in hexadecimal form.

Allocation names

-

There is alternative mode available where pUserData pointer is used to point to a null-terminated string, giving a name to the allocation. To use this mode, set VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. Then pUserData passed as VmaAllocationCreateInfo::pUserData or argument to vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. The library creates internal copy of the string, so the pointer you pass doesn't need to be valid for whole lifetime of the allocation. You can free it after the call.

+

There is alternative mode available where pUserData pointer is used to point to a null-terminated string, giving a name to the allocation. To use this mode, set VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. Then pUserData passed as VmaAllocationCreateInfo::pUserData or argument to vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. The library creates internal copy of the string, so the pointer you pass doesn't need to be valid for whole lifetime of the allocation. You can free it after the call.

VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
// Fill imageInfo...
@@ -109,22 +109,22 @@ Allocation names
imageName += fileName;
VmaAllocationCreateInfo allocCreateInfo = {};
-
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
-
allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
+
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+
allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
allocCreateInfo.pUserData = imageName.c_str();
VkImage image;
VmaAllocation allocation;
-
vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr);
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1016
-
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
-
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:518
+
vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr);
+
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
+
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:550
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1136

The value of pUserData pointer of the allocation will be different than the one you passed when setting allocation's name - pointing to a buffer managed internally that holds copy of the string.

VmaAllocationInfo allocInfo;
-
vmaGetAllocationInfo(allocator, allocation, &allocInfo);
+
vmaGetAllocationInfo(allocator, allocation, &allocInfo);
const char* imageName = (const char*)allocInfo.pUserData;
printf("Image name: %s\n", imageName);
-

That string is also printed in JSON report created by vmaBuildStatsString().

+

That string is also printed in JSON report created by vmaBuildStatsString().

Note
Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
diff --git a/docs/html/annotated.html b/docs/html/annotated.html index a60a1a2..ca0cb64 100644 --- a/docs/html/annotated.html +++ b/docs/html/annotated.html @@ -69,28 +69,29 @@ $(function() { - + - + - + - - - - - + + + + + +
 CVmaAllocationRepresents single memory allocation
 CVmaAllocationCreateInfo
 CVmaAllocationInfoParameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo()
 CVmaAllocationInfoParameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo()
 CVmaAllocatorRepresents main object of this library initialized
 CVmaAllocatorCreateInfoDescription of a Allocator to be created
 CVmaAllocatorInfoInformation about existing VmaAllocator object
 CVmaBudgetStatistics of current memory usage and available budget, in bytes, for specific memory heap
 CVmaDefragmentationContextRepresents Opaque object that represents started defragmentation process
 CVmaDefragmentationInfoDeprecated. Optional configuration parameters to be passed to function vmaDefragment()
 CVmaDefragmentationInfoDeprecated. Optional configuration parameters to be passed to function vmaDefragment()
 CVmaDefragmentationInfo2Parameters for defragmentation
 CVmaDefragmentationPassInfoParameters for incremental defragmentation steps
 CVmaDefragmentationPassMoveInfo
 CVmaDefragmentationStatsStatistics returned by function vmaDefragment()
 CVmaDefragmentationStatsStatistics returned by function vmaDefragment()
 CVmaDeviceMemoryCallbacksSet of callbacks that the library will call for vkAllocateMemory and vkFreeMemory
 CVmaPoolRepresents custom memory pool
 CVmaPoolCreateInfoDescribes parameter of created VmaPool
 CVmaPoolStatsDescribes parameter of existing VmaPool
 CVmaStatInfoCalculated statistics of memory usage in entire allocator
 CVmaStatsGeneral statistics from current state of Allocator
 CVmaVirtualAllocationCreateInfoParameters of created virtual allocation to be passed to vmaVirtualAllocate()
 CVmaVirtualAllocationInfoParameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo()
 CVmaVirtualBlockHandle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory
 CVmaVirtualBlockCreateInfoParameters of created VmaVirtualBlock object to be passed to vmaCreateVirtualBlock()
 CVmaVulkanFunctionsPointers to some Vulkan functions - a subset used by the library
 CVmaVirtualAllocationRepresents single memory allocation done inside VmaVirtualBlock
 CVmaVirtualAllocationCreateInfoParameters of created virtual allocation to be passed to vmaVirtualAllocate()
 CVmaVirtualAllocationInfoParameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo()
 CVmaVirtualBlockHandle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory
 CVmaVirtualBlockCreateInfoParameters of created VmaVirtualBlock object to be passed to vmaCreateVirtualBlock()
 CVmaVulkanFunctionsPointers to some Vulkan functions - a subset used by the library
diff --git a/docs/html/choosing_memory_type.html b/docs/html/choosing_memory_type.html index a63e187..6dfcd86 100644 --- a/docs/html/choosing_memory_type.html +++ b/docs/html/choosing_memory_type.html @@ -71,48 +71,48 @@ $(function() {

Physical devices in Vulkan support various combinations of memory heaps and types. Help with choosing correct and optimal memory type for your specific resource is one of the key features of this library. You can use it by filling appropriate members of VmaAllocationCreateInfo structure, as described below. You can also combine multiple methods.

    -
  1. If you just want to find memory type index that meets your requirements, you can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
  2. -
  3. If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed. vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, which may be useful for sparse binding.
  4. -
  5. If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
  6. -
  7. If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the easiest and recommended way to use this library.
  8. +
  9. If you just want to find memory type index that meets your requirements, you can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
  10. +
  11. If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed. vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, which may be useful for sparse binding.
  12. +
  13. If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
  14. +
  15. If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the easiest and recommended way to use this library.

When using 3. or 4., the library internally queries Vulkan for memory types supported for that buffer or image (function vkGetBufferMemoryRequirements()) and uses only one of these types.

If no memory type can be found that meets all the requirements, these functions return VK_ERROR_FEATURE_NOT_PRESENT.

You can leave VmaAllocationCreateInfo structure completely filled with zeros. It means no requirements are specified for memory type. It is valid, although not very useful.

Usage

-

The easiest way to specify memory requirements is to fill member VmaAllocationCreateInfo::usage using one of the values of enum VmaMemoryUsage. It defines high level, common usage types. For more details, see description of this enum.

+

The easiest way to specify memory requirements is to fill member VmaAllocationCreateInfo::usage using one of the values of enum VmaMemoryUsage. It defines high level, common usage types. For more details, see description of this enum.

For example, if you want to create a uniform buffer that will be filled using transfer only once or infrequently and used for rendering every frame, you can do it using following code:

VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
bufferInfo.size = 65536;
bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
VmaAllocationCreateInfo allocInfo = {};
- +
VkBuffer buffer;
VmaAllocation allocation;
-
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-
Definition: vk_mem_alloc.h:1014
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1022
+
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
+
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:466
+
Definition: vk_mem_alloc.h:1134
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1142
Represents single memory allocation.
-
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:434
-
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)

Required and preferred flags

You can specify more detailed requirements by filling members VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags with a combination of bits from enum VkMemoryPropertyFlags. For example, if you want to create a buffer that will be persistently mapped on host (so it must be HOST_VISIBLE) and preferably will also be HOST_COHERENT and HOST_CACHED, use following code:

VmaAllocationCreateInfo allocInfo = {};
allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
- +
VkBuffer buffer;
VmaAllocation allocation;
-
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1032
-
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1027
-
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1016
-
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:508
+
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:540
+
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1152
+
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1147
+
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1136

A memory type is chosen that has all the required flags and as many preferred flags set as possible.

If you use VmaAllocationCreateInfo::usage, it is just internally converted to a set of required and preferred flags.

@@ -126,14 +126,14 @@ Explicit memory types

VkBuffer buffer;
VmaAllocation allocation;
-
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1040
+
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1160

Custom memory pools

If you allocate from custom memory pool, all the ways of specifying memory requirements described above are not applicable and the aforementioned members of VmaAllocationCreateInfo structure are ignored. Memory type is selected explicitly when creating the pool and then used to make all the allocations from that pool. For further details, see Custom memory pools.

Dedicated allocations

-

Memory for allocations is reserved out of larger block of VkDeviceMemory allocated from Vulkan internally. That is the main feature of this whole library. You can still request a separate memory block to be created for an allocation, just like you would do in a trivial solution without using any allocator. In that case, a buffer or image is always bound to that memory at offset 0. This is called a "dedicated allocation". You can explicitly request it by using flag VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. The library can also internally decide to use dedicated allocation in some cases, e.g.:

+

Memory for allocations is reserved out of larger block of VkDeviceMemory allocated from Vulkan internally. That is the main feature of this whole library. You can still request a separate memory block to be created for an allocation, just like you would do in a trivial solution without using any allocator. In that case, a buffer or image is always bound to that memory at offset 0. This is called a "dedicated allocation". You can explicitly request it by using flag VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. The library can also internally decide to use dedicated allocation in some cases, e.g.:

diff --git a/docs/html/custom_memory_pools.html b/docs/html/custom_memory_pools.html index 7e31861..54c3ce1 100644 --- a/docs/html/custom_memory_pools.html +++ b/docs/html/custom_memory_pools.html @@ -81,7 +81,7 @@ $(function() {

To use custom memory pools:

  1. Fill VmaPoolCreateInfo structure.
  2. -
  3. Call vmaCreatePool() to obtain VmaPool handle.
  4. +
  5. Call vmaCreatePool() to obtain VmaPool handle.
  6. When making an allocation, set VmaAllocationCreateInfo::pool to this handle. You don't need to specify any other parameters of this structure, like usage.

Example:

@@ -92,7 +92,7 @@ $(function() {
poolCreateInfo.maxBlockCount = 2;
VmaPool pool;
-
vmaCreatePool(allocator, &poolCreateInfo, &pool);
+
vmaCreatePool(allocator, &poolCreateInfo, &pool);
// Allocate a buffer out of it.
VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
@@ -105,42 +105,42 @@ $(function() {
VkBuffer buf;
VmaAllocation alloc;
VmaAllocationInfo allocInfo;
-
vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:1014
-
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1046
+
vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
+
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
+
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:1134
+
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1166
VmaAllocation
Represents single memory allocation.
-
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1142
-
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1065
-
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1068
-
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1094
+
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1276
+
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1185
+
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1188
+
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1214
VmaPool
Represents custom memory pool.
-
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
-
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)

You have to free all allocations made from this pool before destroying it.

-
vmaDestroyBuffer(allocator, buf, alloc);
-
vmaDestroyPool(allocator, pool);
-
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-

New versions of this library support creating dedicated allocations in custom pools. It is supported only when VmaPoolCreateInfo::blockSize = 0. To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and VmaAllocationCreateInfo::flags to VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.

+
vmaDestroyBuffer(allocator, buf, alloc);
+
vmaDestroyPool(allocator, pool);
+
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
+
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
+

New versions of this library support creating dedicated allocations in custom pools. It is supported only when VmaPoolCreateInfo::blockSize = 0. To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and VmaAllocationCreateInfo::flags to VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.

Choosing memory type index

-

When creating a pool, you must explicitly specify memory type index. To find the one suitable for your buffers or images, you can use helper functions vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). You need to provide structures with example parameters of buffers or images that you are going to create in that pool.

+

When creating a pool, you must explicitly specify memory type index. To find the one suitable for your buffers or images, you can use helper functions vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). You need to provide structures with example parameters of buffers or images that you are going to create in that pool.

VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
exampleBufCreateInfo.size = 1024; // Whatever.
exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed.
VmaAllocationCreateInfo allocCreateInfo = {};
-
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed.
+
allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed.
uint32_t memTypeIndex;
-
vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
+
vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
VmaPoolCreateInfo poolCreateInfo = {};
poolCreateInfo.memoryTypeIndex = memTypeIndex;
// ...
-
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1022
-
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:434
-
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
+
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
+
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:466
+
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1142

When creating buffers/images allocated in that pool, provide following parameters:

-

To make allocation from the upper stack, add flag VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT to VmaAllocationCreateInfo::flags.

+

To make allocation from the upper stack, add flag VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT to VmaAllocationCreateInfo::flags.

Double stack

Double stack is available only in pools with one memory block - VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.

When the two stacks' ends meet so there is not enough space between them for a new allocation, such allocation fails with usual VK_ERROR_OUT_OF_DEVICE_MEMORY error.

@@ -184,7 +184,7 @@ Buddy allocation algorithm

There is another allocation algorithm that can be used with custom pools, called "buddy". Its internal data structure is based on a binary tree of blocks, each having size that is a power of two and a half of its parent's size. When you want to allocate memory of certain size, a free node in the tree is located. If it is too large, it is recursively split into two halves (called "buddies"). However, if requested allocation size is not a power of two, the size of the allocation is aligned up to the nearest power of two and the remaining space is wasted. When two buddy nodes become free, they are merged back into one larger node.

Buddy allocator

The advantage of buddy allocation algorithm over default algorithm is faster allocation and deallocation, as well as smaller external fragmentation. The disadvantage is more wasted space (internal fragmentation). For more information, please search the Internet for "Buddy memory allocation" - sources that describe this concept in general.

-

To use buddy allocation algorithm with a custom pool, add flag VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating VmaPool object.

+

To use buddy allocation algorithm with a custom pool, add flag VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating VmaPool object.

Several limitations apply to pools that use buddy algorithm:

If you enabled these extensions:

-

2 . Use VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating your VmaAllocator`to inform the library that you enabled required extensions and you want the library to use them.

-
+

2 . Use VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating your VmaAllocator`to inform the library that you enabled required extensions and you want the library to use them.

+
-
vmaCreateAllocator(&allocatorInfo, &allocator);
-
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
-
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:326
-

That is all. The extension will be automatically used whenever you create a buffer using vmaCreateBuffer() or image using vmaCreateImage().

+
vmaCreateAllocator(&allocatorInfo, &allocator);
+
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates VmaAllocator object.
+
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:350
+

That is all. The extension will be automatically used whenever you create a buffer using vmaCreateBuffer() or image using vmaCreateImage().

When using the extension together with Vulkan Validation Layer, you will receive warnings like this:

vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer.
 

It is OK, you should just ignore it. It happens because you use function vkGetBufferMemoryRequirements2KHR() instead of standard vkGetBufferMemoryRequirements(), while the validation layer seems to be unaware of it.

To learn more about this extension, see:

diff --git a/include/vk_mem_alloc.h b/include/vk_mem_alloc.h index ac1701a..3d6e42d 100644 --- a/include/vk_mem_alloc.h +++ b/include/vk_mem_alloc.h @@ -30,7 +30,7 @@ Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT -Documentation of all members: vk_mem_alloc.h +API documentation divided into groups: [Modules](modules.html) \section main_table_of_contents Table of contents @@ -101,8 +101,27 @@ Documentation of all members: vk_mem_alloc.h - [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) - [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. */ + #ifdef __cplusplus extern "C" { #endif @@ -291,6 +310,11 @@ extern "C" { // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. #ifndef _VMA_ENUM_DECLARATIONS +/** +\addtogroup group_init +@{ +*/ + /// Flags for created #VmaAllocator. typedef enum VmaAllocatorCreateFlagBits { @@ -409,6 +433,14 @@ typedef enum VmaAllocatorCreateFlagBits } VmaAllocatorCreateFlagBits; typedef VkFlags VmaAllocatorCreateFlags; +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. typedef enum VmaMemoryUsage { /** No intended memory usage specified. @@ -622,11 +654,24 @@ typedef enum VmaPoolCreateFlagBits */ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, + /** \brief Enables alternative, Two-Level Segregated Fit (TLSF) allocation algorithm in this pool. + + This algorithm is based on 2-level lists dividing address space into smaller + chunks. The first level is aligned to power of two which serves as buckets for requested + memory to fall into, and the second level is lineary subdivided into lists of free memory. + This algorithm aims to achieve bounded response time even in the worst case scenario. + Allocation time can be sometimes slightly longer than compared to other algorithms + but in return the application can avoid stalls in case of fragmentation, giving + predictable results, suitable for real-time use cases. + */ + VMA_POOL_CREATE_TLSF_ALGORITHM_BIT = 0x00000010, + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_POOL_CREATE_ALGORITHM_MASK = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | - VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT | + VMA_POOL_CREATE_TLSF_ALGORITHM_BIT, VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaPoolCreateFlagBits; @@ -641,6 +686,13 @@ typedef enum VmaDefragmentationFlagBits } VmaDefragmentationFlagBits; typedef VkFlags VmaDefragmentationFlags; +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. typedef enum VmaVirtualBlockCreateFlagBits { @@ -667,11 +719,24 @@ typedef enum VmaVirtualBlockCreateFlagBits */ VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT = 0x00000002, + /** \brief Enables alternative, TLSF allocation algorithm in virtual block. + + This algorithm is based on 2-level lists dividing address space into smaller + chunks. The first level is aligned to power of two which serves as buckets for requested + memory to fall into, and the second level is lineary subdivided into lists of free memory. + This algorithm aims to achieve bounded response time even in the worst case scenario. + Allocation time can be sometimes slightly longer than compared to other algorithms + but in return the application can avoid stalls in case of fragmentation, giving + predictable results, suitable for real-time use cases. + */ + VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT = 0x00000004, + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT | - VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT, + VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT | + VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT, VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaVirtualBlockCreateFlagBits; @@ -706,10 +771,16 @@ typedef enum VmaVirtualAllocationCreateFlagBits /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. typedef VkFlags VmaVirtualAllocationCreateFlags; +/** @} */ + #endif // _VMA_ENUM_DECLARATIONS #ifndef _VMA_DATA_TYPES_DECLARATIONS +/** +\addtogroup group_init +@{ */ + /** \struct VmaAllocator \brief Represents main object of this library initialized. @@ -721,6 +792,13 @@ right after Vulkan is initialized and keep it alive until before Vulkan device i */ VK_DEFINE_HANDLE(VmaAllocator) +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + /** \struct VmaPool \brief Represents custom memory pool @@ -762,6 +840,27 @@ Call function vmaDefragmentationEnd() to destroy it. */ VK_DEFINE_HANDLE(VmaDefragmentationContext) +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + /** \struct VmaVirtualBlock \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. @@ -772,6 +871,13 @@ This object is not thread-safe - should not be used from multiple threads simult */ VK_DEFINE_HANDLE(VmaVirtualBlock) +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + /// Callback function called after successful vkAllocateMemory. typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( VmaAllocator VMA_NOT_NULL allocator, @@ -948,6 +1054,13 @@ typedef struct VmaAllocatorInfo VkDevice VMA_NOT_NULL device; } VmaAllocatorInfo; +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + /// Calculated statistics of memory usage in entire allocator. typedef struct VmaStatInfo { @@ -1010,6 +1123,13 @@ typedef struct VmaBudget VkDeviceSize budget; } VmaBudget; +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + typedef struct VmaAllocationCreateInfo { /// Use #VmaAllocationCreateFlagBits enum. @@ -1117,6 +1237,13 @@ typedef struct VmaPoolCreateInfo void* VMA_NULLABLE pMemoryAllocateNext; } VmaPoolCreateInfo; +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + /// Describes parameter of existing #VmaPool. typedef struct VmaPoolStats { @@ -1137,6 +1264,13 @@ typedef struct VmaPoolStats size_t blockCount; } VmaPoolStats; +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + /// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). typedef struct VmaAllocationInfo { @@ -1313,6 +1447,13 @@ typedef struct VmaDefragmentationStats uint32_t deviceMemoryBlocksFreed; } VmaDefragmentationStats; +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). typedef struct VmaVirtualBlockCreateInfo { @@ -1360,6 +1501,11 @@ typedef struct VmaVirtualAllocationCreateInfo /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). typedef struct VmaVirtualAllocationInfo { + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; /** \brief Size of the allocation. Same value as passed in VmaVirtualAllocationCreateInfo::size. @@ -1372,11 +1518,18 @@ typedef struct VmaVirtualAllocationInfo void* VMA_NULLABLE pUserData; } VmaVirtualAllocationInfo; +/** @} */ + #endif // _VMA_DATA_TYPES_DECLARATIONS #ifndef _VMA_FUNCTION_HEADERS -/// Creates Allocator object. +/** +\addtogroup group_init +@{ +*/ + +/// Creates #VmaAllocator object. VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); @@ -1427,6 +1580,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( VmaAllocator VMA_NOT_NULL allocator, uint32_t frameIndex); +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + /** \brief Retrieves statistics from current state of the Allocator. This function is called "calculate" not "get" because it has to traverse all @@ -1455,6 +1615,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( VmaAllocator VMA_NOT_NULL allocator, VmaBudget* VMA_NOT_NULL pBudgets); +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + /** \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. @@ -1530,6 +1697,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NULLABLE pool); +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + /** \brief Retrieves statistics of existing #VmaPool object. \param allocator Allocator object. @@ -1541,6 +1715,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( VmaPool VMA_NOT_NULL pool, VmaPoolStats* VMA_NOT_NULL pPoolStats); +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, @@ -2162,6 +2343,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( VkImage VMA_NULLABLE_NON_DISPATCHABLE image, VmaAllocation VMA_NULLABLE allocation); +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + /** \brief Creates new #VmaVirtualBlock object. \param pCreateInfo Parameters for creation. @@ -2192,7 +2380,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( */ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VkDeviceSize offset, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + VmaVirtualAllocation VMA_NOT_NULL allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. @@ -2201,17 +2389,23 @@ Virtual allocations within a specific virtual block are uniquely identified by t If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned (despite the function doesn't ever allocate actual GPU memory). + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( VmaVirtualBlock VMA_NOT_NULL virtualBlock, const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VkDeviceSize* VMA_NOT_NULL pOffset); + VmaVirtualAllocation* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); /** \brief Frees virtual allocation inside given #VmaVirtualBlock. */ VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VkDeviceSize offset); + VmaVirtualAllocation VMA_NULLABLE allocation); /** \brief Frees all virtual allocations inside given #VmaVirtualBlock. @@ -2228,7 +2422,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( */ VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VkDeviceSize offset, + VmaVirtualAllocation VMA_NOT_NULL allocation, void* VMA_NULLABLE pUserData); /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. @@ -2237,6 +2431,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStats( VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaStatInfo* VMA_NOT_NULL pStatInfo); +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. \param virtualBlock Virtual block. \param[out] ppStatsString Returned string. @@ -2270,6 +2471,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( char* VMA_NULLABLE pStatsString); #endif // VMA_STATS_STRING_ENABLED +/** @} */ + #endif // _VMA_FUNCTION_HEADERS #ifdef __cplusplus @@ -2469,6 +2672,16 @@ static void vma_aligned_free(void* VMA_NULLABLE ptr) #endif #endif +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + #ifndef VMA_MIN #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) #endif @@ -2740,6 +2953,7 @@ enum VMA_CACHE_OPERATION enum class VmaAllocationRequestType { Normal, + TLSF, // Used by "Linear" algorithm. UpperAddress, EndOf1st, @@ -2749,6 +2963,9 @@ enum class VmaAllocationRequestType #endif // _VMA_ENUM_DECLARATIONS #ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); + struct VmaMutexLock; struct VmaMutexLockRead; struct VmaMutexLockWrite; @@ -2814,6 +3031,7 @@ class VmaBlockMetadata; class VmaBlockMetadata_Generic; class VmaBlockMetadata_Linear; class VmaBlockMetadata_Buddy; +class VmaBlockMetadata_TLSF; class VmaBlockVector; @@ -2846,6 +3064,74 @@ static inline uint32_t VmaCountBitsSet(uint32_t v) return c; } +static inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#ifdef _MSC_VER + DWORD pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); +#else + uint8_t pos = 0; + do + { + if (mask & (1ULL << pos)) + return pos; + } while (pos++ < 63); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + DWORD pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); +#else + uint8_t pos = 0; + do + { + if (mask & (1UL << pos)) + return pos; + } while (pos++ < 31); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#ifdef _MSC_VER + DWORD pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#else + uint8_t pos = 63; + do + { + if (mask & (1ULL << pos)) + return pos; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + DWORD pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#else + uint8_t pos = 31; + do + { + if (mask & (1UL << pos)) + return pos; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + /* Returns true if given number is a power of two. T must be unsigned integer number or signed integer but always nonnegative. @@ -2882,6 +3168,13 @@ static inline T VmaRoundDiv(T x, T y) return (x + (y / (T)2)) / y; } +// Divide by 'y' and round up to nearest integer. +template +static inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + // Returns smallest power of 2 greater or equal to v. static inline uint32_t VmaNextPow2(uint32_t v) { @@ -2946,6 +3239,8 @@ static const char* VmaAlgorithmToStr(uint32_t algorithm) return "Linear"; case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: return "Buddy"; + case VMA_POOL_CREATE_TLSF_ALGORITHM_BIT: + return "TLSF"; case 0: return "Default"; default: @@ -5249,7 +5544,8 @@ public: VkDeviceMemory newMemory, VkDeviceSize newSize, uint32_t id, - uint32_t algorithm); + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); // Always call before destruction. void Destroy(VmaAllocator allocator); @@ -5323,7 +5619,7 @@ public: void InitBlockAllocation( VmaDeviceMemoryBlock* block, - VkDeviceSize offset, + VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, uint32_t memoryTypeIndex, @@ -5350,8 +5646,9 @@ public: bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } void SetUserData(VmaAllocator hAllocator, void* pUserData); - void ChangeBlockAllocation(VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, VkDeviceSize offset); - void ChangeOffset(VkDeviceSize newOffset); + void ChangeBlockAllocation(VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, VmaAllocHandle allocHandle); + void ChangeAllocHandle(VmaAllocHandle newAllocHandle); + VmaAllocHandle GetAllocHandle() const; VkDeviceSize GetOffset() const; VmaPool GetParentPool() const; VkDeviceMemory GetMemory() const; @@ -5376,7 +5673,7 @@ private: struct BlockAllocation { VmaDeviceMemoryBlock* m_Block; - VkDeviceSize m_Offset; + VmaAllocHandle m_AllocHandle; }; // Allocation for an object that has its own private VkDeviceMemory. struct DedicatedAllocation @@ -5618,10 +5915,11 @@ item points to a FREE suballocation. */ struct VmaAllocationRequest { - VkDeviceSize offset; + VmaAllocHandle allocHandle; VkDeviceSize size; VmaSuballocationList::iterator item; void* customData; + uint64_t algorithmData; VmaAllocationRequestType type; }; #endif // _VMA_ALLOCATION_REQUEST @@ -5635,7 +5933,8 @@ class VmaBlockMetadata { public: // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. - VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual); + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata() = default; virtual void Init(VkDeviceSize size) { m_Size = size; } @@ -5648,7 +5947,8 @@ public: virtual VkDeviceSize GetSumFreeSize() const = 0; // Returns true if this block is empty - contains only single free suballocation. virtual bool IsEmpty() const = 0; - virtual void GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo) = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; // Must set blockCount to 1. virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; @@ -5663,7 +5963,6 @@ public: // If succeeded, fills pAllocationRequest and returns true. // If failed, returns false. virtual bool CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, @@ -5681,16 +5980,17 @@ public: void* userData) = 0; // Frees suballocation assigned to given memory region. - virtual void FreeAtOffset(VkDeviceSize offset) = 0; + virtual void Free(VmaAllocHandle allocHandle) = 0; // Frees all allocations. // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! virtual void Clear() = 0; - virtual void SetAllocationUserData(VkDeviceSize offset, void* userData) = 0; + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; protected: const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; } #if VMA_STATS_STRING_ENABLED @@ -5709,13 +6009,16 @@ protected: private: VkDeviceSize m_Size; const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; const bool m_IsVirtual; }; #ifndef _VMA_BLOCK_METADATA_FUNCTIONS -VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual) +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) : m_Size(0), m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), m_IsVirtual(isVirtual) {} #if VMA_STATS_STRING_ENABLED @@ -5798,6 +6101,241 @@ void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const #endif // _VMA_BLOCK_METADATA_FUNCTIONS #endif // _VMA_BLOCK_METADATA +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_IMAGE_BUFFER_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool IsConflict(VkDeviceSize allocSize, + VkDeviceSize allocOffset, + VkDeviceSize blockSize, + VkDeviceSize blockOffset, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static const uint16_t MAX_LOW_IMAGE_BUFFER_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return PageToIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return PageToIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t PageToIndex(VkDeviceSize offset) const; + void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_IMAGE_BUFFER_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::IsConflict(VkDeviceSize allocSize, + VkDeviceSize allocOffset, + VkDeviceSize blockSize, + VkDeviceSize blockOffset, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(allocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + allocOffset = VmaAlignUp(allocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + allocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(allocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::PageToIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overriden by new type + if (page.allocCount == 0 || page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + #ifndef _VMA_BLOCK_METADATA_GENERIC class VmaBlockMetadata_Generic : public VmaBlockMetadata { @@ -5805,43 +6343,44 @@ class VmaBlockMetadata_Generic : public VmaBlockMetadata friend class VmaDefragmentationAlgorithm_Fast; VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) public: - VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual); + VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_Generic() = default; - virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; } - virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } - virtual bool IsEmpty() const { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } - virtual void FreeAtOffset(VkDeviceSize offset) { FreeSuballocation(FindAtOffset(offset)); } + size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } + void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle)); } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle; }; - virtual void Init(VkDeviceSize size); - virtual bool Validate() const; + void Init(VkDeviceSize size) override; + bool Validate() const override; - virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; - virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; + void AddPoolStats(VmaPoolStats& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const; + void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif - virtual bool CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, + bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest* pAllocationRequest) override; - virtual VkResult CheckCorruption(const void* pBlockData); + VkResult CheckCorruption(const void* pBlockData) override; - virtual void Alloc( + void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, - void* userData); + void* userData) override; - virtual void GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo); - virtual void Clear(); - virtual void SetAllocationUserData(VkDeviceSize offset, void* userData); + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; // For defragmentation bool IsBufferImageGranularityConflictPossible( @@ -5863,12 +6402,11 @@ private: // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. // If yes, fills pOffset and returns true. If no, returns false. bool CheckAllocation( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, VmaSuballocationList::const_iterator suballocItem, - VkDeviceSize* pOffset) const; + VmaAllocHandle* pAllocHandle) const; // Given free suballocation, it merges it with following one, which must also be free. void MergeFreeWithNext(VmaSuballocationList::iterator item); @@ -5885,8 +6423,9 @@ private: }; #ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS -VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, isVirtual), +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), m_FreeCount(0), m_SumFreeSize(0), m_Suballocations(VmaStlAllocator(pAllocationCallbacks)), @@ -5954,7 +6493,7 @@ bool VmaBlockMetadata_Generic::Validate() const { if (!IsVirtual()) { - VMA_VALIDATE(alloc->GetOffset() == subAlloc.offset); + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset); VMA_VALIDATE(alloc->GetSize() == subAlloc.size); } @@ -6046,7 +6585,6 @@ void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const #endif // VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Generic::CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, @@ -6089,12 +6627,11 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( for (; index < freeSuballocCount; ++index) { if (CheckAllocation( - bufferImageGranularity, allocSize, allocAlignment, allocType, m_FreeSuballocationsBySize[index], - &pAllocationRequest->offset)) + &pAllocationRequest->allocHandle)) { pAllocationRequest->item = m_FreeSuballocationsBySize[index]; return true; @@ -6108,12 +6645,11 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( ++it) { if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( - bufferImageGranularity, allocSize, allocAlignment, allocType, it, - &pAllocationRequest->offset)) + &pAllocationRequest->allocHandle)) { pAllocationRequest->item = it; return true; @@ -6126,12 +6662,11 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( for (size_t index = freeSuballocCount; index--; ) { if (CheckAllocation( - bufferImageGranularity, allocSize, allocAlignment, allocType, m_FreeSuballocationsBySize[index], - &pAllocationRequest->offset)) + &pAllocationRequest->allocHandle)) { pAllocationRequest->item = m_FreeSuballocationsBySize[index]; return true; @@ -6175,9 +6710,10 @@ void VmaBlockMetadata_Generic::Alloc( VmaSuballocation& suballoc = *request.item; // Given suballocation is a free block. VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Given offset is inside this suballocation. - VMA_ASSERT(request.offset >= suballoc.offset); - const VkDeviceSize paddingBegin = request.offset - suballoc.offset; + VMA_ASSERT((VkDeviceSize)request.allocHandle >= suballoc.offset); + const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset; VMA_ASSERT(suballoc.size >= paddingBegin + request.size); const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size; @@ -6185,7 +6721,7 @@ void VmaBlockMetadata_Generic::Alloc( // it to become used. UnregisterFreeSuballocation(request.item); - suballoc.offset = request.offset; + suballoc.offset = (VkDeviceSize)request.allocHandle; suballoc.size = request.size; suballoc.type = type; suballoc.userData = userData; @@ -6194,7 +6730,7 @@ void VmaBlockMetadata_Generic::Alloc( if (paddingEnd) { VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = request.offset + request.size; + paddingSuballoc.offset = suballoc.offset + suballoc.size; paddingSuballoc.size = paddingEnd; paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; VmaSuballocationList::iterator next = request.item; @@ -6208,7 +6744,7 @@ void VmaBlockMetadata_Generic::Alloc( if (paddingBegin) { VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = request.offset - paddingBegin; + paddingSuballoc.offset = suballoc.offset - paddingBegin; paddingSuballoc.size = paddingBegin; paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; const VmaSuballocationList::iterator paddingBeginItem = @@ -6229,9 +6765,10 @@ void VmaBlockMetadata_Generic::Alloc( m_SumFreeSize -= request.size; } -void VmaBlockMetadata_Generic::GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo) +void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) { - const VmaSuballocation& suballoc = *FindAtOffset(offset); + const VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle); + outInfo.offset = (VkDeviceSize)allocHandle; outInfo.size = suballoc.size; outInfo.pUserData = suballoc.userData; } @@ -6255,9 +6792,9 @@ void VmaBlockMetadata_Generic::Clear() m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); } -void VmaBlockMetadata_Generic::SetAllocationUserData(VkDeviceSize offset, void* userData) +void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) { - VmaSuballocation& suballoc = *FindAtOffset(offset); + VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle); suballoc.userData = userData; } @@ -6309,19 +6846,19 @@ bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const } bool VmaBlockMetadata_Generic::CheckAllocation( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, VmaSuballocationList::const_iterator suballocItem, - VkDeviceSize* pOffset) const + VmaAllocHandle* pAllocHandle) const { VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(suballocItem != m_Suballocations.cend()); - VMA_ASSERT(pOffset != VMA_NULL); + VMA_ASSERT(pAllocHandle != VMA_NULL); const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); const VmaSuballocation& suballoc = *suballocItem; VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); @@ -6333,16 +6870,16 @@ bool VmaBlockMetadata_Generic::CheckAllocation( } // Start from offset equal to beginning of this suballocation. - *pOffset = suballoc.offset; + VkDeviceSize offset = suballoc.offset; // Apply debugMargin at the beginning. if (debugMargin > 0) { - *pOffset += debugMargin; + offset += debugMargin; } // Apply alignment. - *pOffset = VmaAlignUp(*pOffset, allocAlignment); + offset = VmaAlignUp(offset, allocAlignment); // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. @@ -6354,7 +6891,7 @@ bool VmaBlockMetadata_Generic::CheckAllocation( { --prevSuballocItem; const VmaSuballocation& prevSuballoc = *prevSuballocItem; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { @@ -6368,12 +6905,12 @@ bool VmaBlockMetadata_Generic::CheckAllocation( } if (bufferImageGranularityConflict) { - *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + offset = VmaAlignUp(offset, bufferImageGranularity); } } // Calculate padding at the beginning based on current offset. - const VkDeviceSize paddingBegin = *pOffset - suballoc.offset; + const VkDeviceSize paddingBegin = offset - suballoc.offset; // Calculate required margin at the end. const VkDeviceSize requiredEndMargin = debugMargin; @@ -6386,14 +6923,14 @@ bool VmaBlockMetadata_Generic::CheckAllocation( // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. - if (allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity) + if (allocSize % bufferImageGranularity || offset % bufferImageGranularity) { VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; ++nextSuballocItem; while (nextSuballocItem != m_Suballocations.cend()) { const VmaSuballocation& nextSuballoc = *nextSuballocItem; - if (VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { @@ -6409,7 +6946,8 @@ bool VmaBlockMetadata_Generic::CheckAllocation( } } - // All tests passed: Success. pOffset is already filled. + *pAllocHandle = (VmaAllocHandle)offset; + // All tests passed: Success. pAllocHandle is already filled. return true; } @@ -6645,43 +7183,44 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata { VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) public: - VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual); + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_Linear() = default; - virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } - virtual bool IsEmpty() const { return GetAllocationCount() == 0; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle; }; - virtual void Init(VkDeviceSize size); - virtual bool Validate() const; - virtual size_t GetAllocationCount() const; + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; - virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; - virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; + void AddPoolStats(VmaPoolStats& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const; + void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif - virtual bool CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, + bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest* pAllocationRequest) override; - virtual VkResult CheckCorruption(const void* pBlockData); + VkResult CheckCorruption(const void* pBlockData) override; - virtual void Alloc( + void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, - void* userData); + void* userData) override; - virtual void FreeAtOffset(VkDeviceSize offset); - virtual void GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo); - virtual void Clear(); - virtual void SetAllocationUserData(VkDeviceSize offset, void* userData); + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; private: /* @@ -6730,14 +7269,12 @@ private: void CleanupAfterFree(); bool CreateAllocationRequest_LowerAddress( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, VmaAllocationRequest* pAllocationRequest); bool CreateAllocationRequest_UpperAddress( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, @@ -6746,8 +7283,9 @@ private: }; #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, isVirtual), +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), m_SumFreeSize(0), m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), @@ -6814,7 +7352,7 @@ bool VmaBlockMetadata_Linear::Validate() const { if (!IsVirtual()) { - VMA_VALIDATE(alloc->GetOffset() == suballoc.offset); + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; @@ -6856,7 +7394,7 @@ bool VmaBlockMetadata_Linear::Validate() const { if (!IsVirtual()) { - VMA_VALIDATE(alloc->GetOffset() == suballoc.offset); + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; @@ -6890,7 +7428,7 @@ bool VmaBlockMetadata_Linear::Validate() const { if (!IsVirtual()) { - VMA_VALIDATE(alloc->GetOffset() == suballoc.offset); + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset); VMA_VALIDATE(alloc->GetSize() == suballoc.size); } sumUsedSize += suballoc.size; @@ -7565,7 +8103,6 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const #endif // VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Linear::CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, @@ -7580,10 +8117,8 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest( pAllocationRequest->size = allocSize; return upperAddress ? CreateAllocationRequest_UpperAddress( - bufferImageGranularity, allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : CreateAllocationRequest_LowerAddress( - bufferImageGranularity, allocSize, allocAlignment, allocType, strategy, pAllocationRequest); } @@ -7636,7 +8171,7 @@ void VmaBlockMetadata_Linear::Alloc( VmaSuballocationType type, void* userData) { - const VmaSuballocation newSuballoc = { request.offset, request.size, userData, type }; + const VmaSuballocation newSuballoc = { (VkDeviceSize)request.allocHandle, request.size, userData, type }; switch (request.type) { @@ -7654,9 +8189,9 @@ void VmaBlockMetadata_Linear::Alloc( SuballocationVectorType& suballocations1st = AccessSuballocations1st(); VMA_ASSERT(suballocations1st.empty() || - request.offset >= suballocations1st.back().offset + suballocations1st.back().size); + (VkDeviceSize)request.allocHandle >= suballocations1st.back().offset + suballocations1st.back().size); // Check if it fits before the end of the block. - VMA_ASSERT(request.offset + request.size <= GetSize()); + VMA_ASSERT((VkDeviceSize)request.allocHandle + request.size <= GetSize()); suballocations1st.push_back(newSuballoc); } @@ -7666,7 +8201,7 @@ void VmaBlockMetadata_Linear::Alloc( SuballocationVectorType& suballocations1st = AccessSuballocations1st(); // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. VMA_ASSERT(!suballocations1st.empty() && - request.offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + (VkDeviceSize)request.allocHandle + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); switch (m_2ndVectorMode) @@ -7697,10 +8232,11 @@ void VmaBlockMetadata_Linear::Alloc( m_SumFreeSize -= newSuballoc.size; } -void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) { SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle; if (!suballocations1st.empty()) { @@ -7785,9 +8321,10 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); } -void VmaBlockMetadata_Linear::GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo) +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) { - VmaSuballocation& suballoc = FindSuballocation(offset); + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle); + outInfo.offset = (VkDeviceSize)allocHandle; outInfo.size = suballoc.size; outInfo.pUserData = suballoc.userData; } @@ -7804,9 +8341,9 @@ void VmaBlockMetadata_Linear::Clear() m_2ndNullItemsCount = 0; } -void VmaBlockMetadata_Linear::SetAllocationUserData(VkDeviceSize offset, void* userData) +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) { - VmaSuballocation& suballoc = FindSuballocation(offset); + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle); suballoc.userData = userData; } @@ -7961,7 +8498,6 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() } bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, @@ -7970,6 +8506,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( { const VkDeviceSize blockSize = GetSize(); const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); @@ -8051,7 +8588,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } // All tests passed: Success. - pAllocationRequest->offset = resultOffset; + pAllocationRequest->allocHandle = (VmaAllocHandle)resultOffset; // pAllocationRequest->item, customData unused. pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; return true; @@ -8140,7 +8677,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } // All tests passed: Success. - pAllocationRequest->offset = resultOffset; + pAllocationRequest->allocHandle = (VmaAllocHandle)resultOffset; pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; // pAllocationRequest->item, customData unused. return true; @@ -8151,7 +8688,6 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, @@ -8159,6 +8695,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( VmaAllocationRequest* pAllocationRequest) { const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); @@ -8257,7 +8794,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( } // All tests passed: Success. - pAllocationRequest->offset = resultOffset; + pAllocationRequest->allocHandle = (VmaAllocHandle)resultOffset; // pAllocationRequest->item unused. pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; return true; @@ -8284,42 +8821,43 @@ class VmaBlockMetadata_Buddy : public VmaBlockMetadata { VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) public: - VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual); + VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_Buddy(); - virtual size_t GetAllocationCount() const { return m_AllocationCount; } - virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); } - virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; } - virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } + size_t GetAllocationCount() const override { return m_AllocationCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); } + bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; } + VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle; }; - virtual void Init(VkDeviceSize size); - virtual bool Validate() const; + void Init(VkDeviceSize size) override; + bool Validate() const override; - virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; - virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; + void AddPoolStats(VmaPoolStats& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const; + void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif - virtual bool CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, + bool CreateAllocationRequest( VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest* pAllocationRequest) override; - virtual void Alloc( + void Alloc( const VmaAllocationRequest& request, VmaSuballocationType type, - void* userData); + void* userData) override; - virtual void FreeAtOffset(VkDeviceSize offset); - virtual void GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo); - virtual void Clear(); - virtual void SetAllocationUserData(VkDeviceSize offset, void* userData); + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; private: static const size_t MAX_LEVELS = 48; @@ -8411,8 +8949,9 @@ private: }; #ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS -VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, isVirtual), +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity m_Root(VMA_NULL), m_AllocationCount(0), @@ -8527,7 +9066,6 @@ void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const } #if VMA_STATS_STRING_ENABLED - void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const { VmaStatInfo stat; @@ -8554,7 +9092,6 @@ void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const #endif // VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Buddy::CreateAllocationRequest( - VkDeviceSize bufferImageGranularity, VkDeviceSize allocSize, VkDeviceSize allocAlignment, bool upperAddress, @@ -8572,8 +9109,8 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest( allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) { - allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity); - allocSize = VMA_MAX(allocSize, bufferImageGranularity); + allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity()); + allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity()); } if (allocSize > m_UsableSize) @@ -8591,7 +9128,7 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest( if (freeNode->offset % allocAlignment == 0) { pAllocationRequest->type = VmaAllocationRequestType::Normal; - pAllocationRequest->offset = freeNode->offset; + pAllocationRequest->allocHandle = (VmaAllocHandle)freeNode->offset; pAllocationRequest->size = allocSize; pAllocationRequest->customData = (void*)(uintptr_t)level; return true; @@ -8614,7 +9151,7 @@ void VmaBlockMetadata_Buddy::Alloc( Node* currNode = m_FreeList[currLevel].front; VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - while (currNode->offset != request.offset) + while (currNode->offset != (VkDeviceSize)request.allocHandle) { currNode = currNode->free.next; VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); @@ -8676,10 +9213,11 @@ void VmaBlockMetadata_Buddy::Alloc( m_SumFreeSize -= request.size; } -void VmaBlockMetadata_Buddy::GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo) +void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) { uint32_t level = 0; - const Node* const node = FindAllocationNode(offset, level); + const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle, level); + outInfo.offset = (VkDeviceSize)allocHandle; outInfo.size = LevelToNodeSize(level); outInfo.pUserData = node->allocation.userData; } @@ -8705,10 +9243,10 @@ void VmaBlockMetadata_Buddy::Clear() m_SumFreeSize = m_UsableSize; } -void VmaBlockMetadata_Buddy::SetAllocationUserData(VkDeviceSize offset, void* userData) +void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) { uint32_t level = 0; - Node* const node = FindAllocationNode(offset, level); + Node* const node = FindAllocationNode((VkDeviceSize)allocHandle, level); node->allocation.userData = userData; } @@ -8799,10 +9337,10 @@ uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const return level; } -void VmaBlockMetadata_Buddy::FreeAtOffset(VkDeviceSize offset) +void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) { uint32_t level = 0; - Node* node = FindAllocationNode(offset, level); + Node* node = FindAllocationNode((VkDeviceSize)allocHandle, level); ++m_FreeCount; --m_AllocationCount; @@ -8933,6 +9471,764 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con #endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS #endif // _VMA_BLOCK_METADATA_BUDDY +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT as strategy in CreateAllocationRequest() +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_TLSF(); + + size_t GetAllocationCount() const override { return m_AllocCount; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }; + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; + void AddPoolStats(VmaPoolStats& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static const uint8_t SECOND_LEVEL_INDEX = 5; + static const uint16_t SMALL_BUFFER_SIZE = 256; + static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static const uint8_t MEMORY_CLASS_SHIFT = 7; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFree; + uint8_t m_MemoryClasses; + uint16_t* m_InnerIsFree; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + uint8_t SizeToMemoryClass(VkDeviceSize size) const; + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFree(0), + m_MemoryClasses(0), + m_InnerIsFree(VMA_NULL), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT * sizeof(Block)), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_InnerIsFree) + vma_delete_array(GetAllocationCallbacks(), m_InnerIsFree, m_MemoryClasses); + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 5; + + m_MemoryClasses = memoryClass + 2; + m_InnerIsFree = vma_new_array(GetAllocationCallbacks(), uint16_t, m_MemoryClasses); + memset(m_InnerIsFree, 0, m_MemoryClasses * sizeof(uint16_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + VmaInitStatInfo(outInfo); + outInfo.blockCount = 1; + if (m_NullBlock->size > 0) + VmaAddStatInfoUnusedRange(outInfo, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddStatInfoUnusedRange(outInfo, block->size); + else + VmaAddStatInfoAllocation(outInfo, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddPoolStats(VmaPoolStats& inoutStats) const +{ + inoutStats.size += GetSize(); + inoutStats.unusedSize += GetSumFreeSize(); + inoutStats.allocationCount += m_AllocCount; + inoutStats.unusedRangeCount += m_BlocksFreeCount; + if(m_NullBlock->size > 0) + ++inoutStats.unusedRangeCount; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaStatInfo stat; + CalcAllocationStatInfo(stat); + + PrintDetailedMap_Begin(json, + stat.unusedBytes, + stat.allocationCount, + stat.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->PrevFree()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + VkDeviceSize roundedSize = allocSize; + if (allocSize >= (1 << SECOND_LEVEL_INDEX)) + { + // Round up to the next block + roundedSize += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)) - 1; + } + + uint32_t listIndex = 0; + Block* block = FindFreeBlock(roundedSize, listIndex); + while (block) + { + if (CheckBlock(*block, listIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + block = block->NextFree(); + // Region does not meet requirements + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + break; + } + + if ((strategy & VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) == 0) + { + // No region in previous bucket, check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // No other region found, check previous bucket + uint32_t prevListIndex = 0; + Block* prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + prevListBlock = prevListBlock->NextFree(); + } + + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // No region in previous bucket, check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If all searches failed and first bucket still have some free regions then check it fully + while (block) + { + if (CheckBlock(*block, listIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + block = block->NextFree(); + } + + // Worst case, if bufferImageGranularity is causing free blocks to become not suitable then full search has to be done + if (!IsVirtual() && m_GranularityHandler.IsEnabled()) + { + while (++listIndex < m_ListsCount) + { + block = m_FreeList[listIndex]; + while (block) + { + if (CheckBlock(*block, listIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + block = block->NextFree(); + } + } + } + + // No more memory sadly + return false; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VMA_ASSERT(currentBlock != VMA_NULL); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + // Append missing alignment to prev block or create new one + VMA_ASSERT(currentBlock->offset <= request.algorithmData); + VkDeviceSize misssingAlignment = request.algorithmData - currentBlock->offset; + if (misssingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree()) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += misssingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= misssingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += misssingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += misssingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = misssingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= misssingAlignment; + currentBlock->offset += misssingAlignment; + } + + if (currentBlock->size == request.size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + request.size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > request.size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - request.size; + newBlock->offset = currentBlock->offset + request.size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = request.size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree()) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFree = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFree, 0, m_MemoryClasses * sizeof(uint16_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const +{ + if (size > SMALL_BUFFER_SIZE) + return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT; + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + return static_cast((size - 1) / 64); + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + return static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFree[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFree[memClass] == 0) + m_IsFree &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFree[memClass] |= 1U << secondIndex; + m_IsFree |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t innerFreeMap = m_InnerIsFree[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for avaiable blocks + uint32_t freeMap = m_IsFree & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory avaible + + // Find lowest free region + innerFreeMap = m_InnerIsFree[VMA_BITSCAN_LSB(freeMap)]; + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(static_cast(innerFreeMap))); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.IsConflict(allocSize, alignedOffset, block.size, block.offset, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize; + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + #ifndef _VMA_BLOCK_VECTOR /* Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific @@ -9095,6 +10391,7 @@ struct VmaDefragmentationMove size_t dstBlockIndex; VkDeviceSize srcOffset; VkDeviceSize dstOffset; + VmaAllocHandle dstHandle; VkDeviceSize size; VmaAllocation hAllocation; VmaDeviceMemoryBlock* pSrcBlock; @@ -9264,7 +10561,7 @@ private: struct FreeSpace { - size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. + size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. VkDeviceSize offset; VkDeviceSize size; } m_FreeSpaces[MAX_COUNT]; @@ -9538,13 +10835,14 @@ public: VkResult Init() { return VK_SUCCESS; } bool IsEmpty() const { return m_Metadata->IsEmpty(); } - void Free(VkDeviceSize offset) { m_Metadata->FreeAtOffset(offset); } - void SetAllocationUserData(VkDeviceSize offset, void* userData) { m_Metadata->SetAllocationUserData(offset, userData); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } void Clear() { m_Metadata->Clear(); } const VkAllocationCallbacks* GetAllocationCallbacks() const; - void GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo); - VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VkDeviceSize& outOffset); + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); void CalculateStats(VmaStatInfo& outStatInfo) const; #if VMA_STATS_STRING_ENABLED void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; @@ -9563,13 +10861,16 @@ VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo switch (algorithm) { case 0: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Generic)(VK_NULL_HANDLE, true); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Generic)(VK_NULL_HANDLE, 1, true); break; case VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Buddy)(VK_NULL_HANDLE, true); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Buddy)(VK_NULL_HANDLE, 1, true); break; case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, true); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); break; default: VMA_ASSERT(0); @@ -9592,17 +10893,16 @@ const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; } -void VmaVirtualBlock_T::GetAllocationInfo(VkDeviceSize offset, VmaVirtualAllocationInfo& outInfo) +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) { - m_Metadata->GetAllocationInfo(offset, outInfo); + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); } -VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VkDeviceSize& outOffset) +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) { - outOffset = VK_WHOLE_SIZE; VmaAllocationRequest request = {}; if (m_Metadata->CreateAllocationRequest( - 1, // bufferImageGranularity createInfo.size, // allocSize VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress @@ -9613,9 +10913,12 @@ VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& creat m_Metadata->Alloc(request, VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant createInfo.pUserData); - outOffset = request.offset; + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); return VK_SUCCESS; } + outAllocation = (VmaVirtualAllocation)VK_WHOLE_SIZE; return VK_ERROR_OUT_OF_DEVICE_MEMORY; } @@ -10033,7 +11336,8 @@ void VmaDeviceMemoryBlock::Init( VkDeviceMemory newMemory, VkDeviceSize newSize, uint32_t id, - uint32_t algorithm) + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) { VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); @@ -10046,18 +11350,22 @@ void VmaDeviceMemoryBlock::Init( { case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), - false); // isVirtual + bufferImageGranularity, false); // isVirtual break; case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator->GetAllocationCallbacks(), - false); // isVirtual + bufferImageGranularity, false); // isVirtual + break; + case VMA_POOL_CREATE_TLSF_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual break; default: VMA_ASSERT(0); // Fall-through. case 0: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator->GetAllocationCallbacks(), - false); // isVirtual + bufferImageGranularity, false); // isVirtual } m_pMetadata->Init(newSize); } @@ -10267,7 +11575,7 @@ VmaAllocation_T::~VmaAllocation_T() void VmaAllocation_T::InitBlockAllocation( VmaDeviceMemoryBlock* block, - VkDeviceSize offset, + VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, uint32_t memoryTypeIndex, @@ -10283,7 +11591,7 @@ void VmaAllocation_T::InitBlockAllocation( m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; m_SuballocationType = (uint8_t)suballocationType; m_BlockAllocation.m_Block = block; - m_BlockAllocation.m_Offset = offset; + m_BlockAllocation.m_AllocHandle = allocHandle; } void VmaAllocation_T::InitDedicatedAllocation( @@ -10331,7 +11639,7 @@ void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) void VmaAllocation_T::ChangeBlockAllocation( VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, - VkDeviceSize offset) + VmaAllocHandle allocHandle) { VMA_ASSERT(block != VMA_NULL); VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); @@ -10347,13 +11655,27 @@ void VmaAllocation_T::ChangeBlockAllocation( } m_BlockAllocation.m_Block = block; - m_BlockAllocation.m_Offset = offset; + m_BlockAllocation.m_AllocHandle = allocHandle; } -void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) +void VmaAllocation_T::ChangeAllocHandle(VmaAllocHandle newAllocHandle) { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); - m_BlockAllocation.m_Offset = newOffset; + m_BlockAllocation.m_AllocHandle = newAllocHandle; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VMA_NULL; + default: + VMA_ASSERT(0); + return VMA_NULL; + } } VkDeviceSize VmaAllocation_T::GetOffset() const @@ -10361,7 +11683,7 @@ VkDeviceSize VmaAllocation_T::GetOffset() const switch (m_Type) { case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Offset; + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); case ALLOCATION_TYPE_DEDICATED: return 0; default: @@ -10407,7 +11729,7 @@ void* VmaAllocation_T::GetMappedData() const { void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); VMA_ASSERT(pBlockData != VMA_NULL); - return (char*)pBlockData + m_BlockAllocation.m_Offset; + return (char*)pBlockData + GetOffset(); } else { @@ -10941,7 +12263,7 @@ void VmaBlockVector::Free( pBlock->Unmap(m_hAllocator, 1); } - pBlock->m_pMetadata->FreeAtOffset(hAllocation->GetOffset()); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); VMA_HEAVY_ASSERT(pBlock->Validate()); VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); @@ -11043,7 +12365,6 @@ VkResult VmaBlockVector::AllocateFromBlock( VmaAllocationRequest currRequest = {}; if (pBlock->m_pMetadata->CreateAllocationRequest( - m_BufferImageGranularity, size, alignment, isUpperAddress, @@ -11066,7 +12387,7 @@ VkResult VmaBlockVector::AllocateFromBlock( UpdateHasEmptyBlock(); (*pAllocation)->InitBlockAllocation( pBlock, - currRequest.offset, + currRequest.allocHandle, alignment, currRequest.size, // Not size, as actual allocation size may be larger than requested! m_MemoryTypeIndex, @@ -11081,7 +12402,7 @@ VkResult VmaBlockVector::AllocateFromBlock( } if (IsCorruptionDetectionEnabled()) { - VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, currRequest.size); + VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, (*pAllocation)->GetOffset(), currRequest.size); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); } return VK_SUCCESS; @@ -11143,7 +12464,8 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn mem, allocInfo.allocationSize, m_NextBlockId++, - m_Algorithm); + m_Algorithm, + m_BufferImageGranularity); m_Blocks.push_back(pBlock); if (pNewBlockIndex != VMA_NULL) @@ -11638,8 +12960,8 @@ void VmaBlockVector::CommitDefragmentations( { const VmaDefragmentationMove& move = pCtx->defragmentationMoves[i]; - move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset); - move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset); + move.pSrcBlock->m_pMetadata->Free(move.hAllocation->GetAllocHandle()); + move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstHandle); } pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; @@ -11841,9 +13163,9 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( for (size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) { BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; + VmaBlockMetadata* pMetadata = pDstBlockInfo->m_pBlock->m_pMetadata; VmaAllocationRequest dstAllocRequest; - if (pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( - m_pBlockVector->GetBufferImageGranularity(), + if (pMetadata->CreateAllocationRequest( size, alignment, false, // upperAddress @@ -11851,7 +13173,7 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( strategy, &dstAllocRequest) && MoveMakesSense( - dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) + dstBlockIndex, pMetadata->GetAllocationOffset(dstAllocRequest.allocHandle), srcBlockIndex, srcOffset)) { // Reached limit on number of allocations or bytes to move. if ((m_AllocationsMoved + 1 > maxAllocationsToMove) || @@ -11864,11 +13186,12 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; move.srcOffset = srcOffset; - move.dstOffset = dstAllocRequest.offset; + move.dstOffset = pMetadata->GetAllocationOffset(dstAllocRequest.allocHandle); move.size = size; move.hAllocation = allocInfo.m_hAllocation; move.pSrcBlock = pSrcBlockInfo->m_pBlock; move.pDstBlock = pDstBlockInfo->m_pBlock; + move.dstHandle = dstAllocRequest.allocHandle; moves.push_back(move); @@ -11876,8 +13199,8 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( if (freeOldAllocations) { - pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); - allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + pSrcBlockInfo->m_pBlock->m_pMetadata->Free(allocInfo.m_hAllocation->GetAllocHandle()); + allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.allocHandle); } if (allocInfo.m_pChanged != VMA_NULL) @@ -12081,7 +13404,7 @@ VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( } VkResult VmaDefragmentationAlgorithm_Fast::Defragment( - VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VmaVector>& moves, VkDeviceSize maxBytesToMove, uint32_t maxAllocationsToMove, VmaDefragmentationFlags flags) @@ -12160,7 +13483,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( VmaSuballocation suballoc = *srcSuballocIt; suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeOffset(dstAllocOffset); + ((VmaAllocation)(suballoc.userData))->ChangeAllocHandle((VmaAllocHandle)dstAllocOffset); m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; @@ -12175,6 +13498,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( move.dstBlockIndex = freeSpaceOrigBlockIndex; move.srcOffset = srcAllocOffset; move.dstOffset = dstAllocOffset; + move.dstHandle = (VmaAllocHandle)dstAllocOffset; move.size = srcAllocSize; moves.push_back(move); @@ -12188,7 +13512,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( VmaSuballocation suballoc = *srcSuballocIt; suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset); + ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, (VmaAllocHandle)dstAllocOffset); m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; @@ -12203,6 +13527,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( move.dstBlockIndex = freeSpaceOrigBlockIndex; move.srcOffset = srcAllocOffset; move.dstOffset = dstAllocOffset; + move.dstHandle = (VmaAllocHandle)dstAllocOffset; move.size = srcAllocSize; moves.push_back(move); @@ -12254,7 +13579,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( else { srcSuballocIt->offset = dstAllocOffset; - ((VmaAllocation)(srcSuballocIt->userData))->ChangeOffset(dstAllocOffset); + ((VmaAllocation)(srcSuballocIt->userData))->ChangeAllocHandle((VmaAllocHandle)dstAllocOffset); dstOffset = dstAllocOffset + srcAllocSize; m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; @@ -12264,6 +13589,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( move.dstBlockIndex = dstOrigBlockIndex; move.srcOffset = srcAllocOffset; move.dstOffset = dstAllocOffset; + move.dstHandle = (VmaAllocHandle)dstAllocOffset; move.size = srcAllocSize; moves.push_back(move); @@ -12279,7 +13605,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( VmaSuballocation suballoc = *srcSuballocIt; suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset); + ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pDstBlock, (VmaAllocHandle)dstAllocOffset); dstOffset = dstAllocOffset + srcAllocSize; m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; @@ -12295,6 +13621,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( move.dstBlockIndex = dstOrigBlockIndex; move.srcOffset = srcAllocOffset; move.dstOffset = dstAllocOffset; + move.dstHandle = (VmaAllocHandle)dstAllocOffset; move.size = srcAllocSize; moves.push_back(move); @@ -15668,7 +16995,11 @@ VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( allocator->Unmap(allocation); } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) { VMA_ASSERT(allocator && allocation); @@ -15681,7 +17012,11 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, V return res; } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) { VMA_ASSERT(allocator && allocation); @@ -15744,7 +17079,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( return res; } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) { VMA_ASSERT(allocator); @@ -16306,29 +17643,33 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_N } VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VkDeviceSize offset, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) + VmaVirtualAllocation VMA_NOT_NULL allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->GetAllocationInfo(offset, *pVirtualAllocInfo); + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VkDeviceSize* VMA_NOT_NULL pOffset) + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pOffset != VMA_NULL); + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); VMA_DEBUG_LOG("vmaVirtualAllocate"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; - return virtualBlock->Allocate(*pCreateInfo, *pOffset); + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); } -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VkDeviceSize offset) +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE allocation) { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaVirtualFree"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->Free(offset); + if(virtualBlock != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } } VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) @@ -16340,12 +17681,12 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NUL } VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VkDeviceSize offset, void* VMA_NULLABLE pUserData) + VmaVirtualAllocation VMA_NOT_NULL allocation, void* VMA_NULLABLE pUserData) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->SetAllocationUserData(offset, pUserData); + virtualBlock->SetAllocationUserData(allocation, pUserData); } VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStats(VmaVirtualBlock VMA_NOT_NULL virtualBlock, @@ -17586,14 +18927,14 @@ VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); #VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions using the same code as the main Vulkan memory allocator. -However, there is no "virtual allocation" object. -When you request a new allocation, a `VkDeviceSize` number is returned. -It is an offset inside the block where the allocation has been placed, but it also uniquely identifies the allocation within this block. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation withing the virtual block. -In order to make an allocation: +In order to make such allocation: -# Fill in #VmaVirtualAllocationCreateInfo structure. --# Call vmaVirtualAllocate(). Get new `VkDeviceSize offset` that identifies the allocation. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. Example: @@ -17601,11 +18942,12 @@ Example: VmaVirtualAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.size = 4096; // 4 KB -VkDeviceSize allocOffset; -res = vmaVirtualAllocate(block, &allocCreateInfo, &allocOffset); +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); if(res == VK_SUCCESS) { - // Use the 4 KB of your memory starting at allocOffset. + // Use the 4 KB of your memory starting at offset. } else { @@ -17616,8 +18958,8 @@ else \section virtual_allocator_deallocation Deallocation When no longer needed, an allocation can be freed by calling vmaVirtualFree(). -You can only pass to this function the exact offset that was previously returned by vmaVirtualAllocate() -and not any other location within the memory. +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). All allocations must be freed before the block is destroyed, which is checked internally by an assert. @@ -17625,7 +18967,7 @@ However, if you don't want to call vmaVirtualFree() for each allocation, you can a feature not available in normal Vulkan memory allocator. Example: \code -vmaVirtualFree(block, allocOffset); +vmaVirtualFree(block, alloc); vmaDestroyVirtualBlock(block); \endcode @@ -17643,20 +18985,20 @@ struct CustomAllocData }; CustomAllocData* allocData = new CustomAllocData(); allocData->m_AllocName = "My allocation 1"; -vmaSetVirtualAllocationUserData(block, allocOffset, allocData); +vmaSetVirtualAllocationUserData(block, alloc, allocData); \endcode -The pointer can later be fetched, along with allocation size, by passing the allocation offset to function +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! Example: \code VmaVirtualAllocationInfo allocInfo; -vmaGetVirtualAllocationInfo(block, allocOffset, &allocInfo); +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); delete (CustomAllocData*)allocInfo.pUserData; -vmaVirtualFree(block, allocOffset); +vmaVirtualFree(block, alloc); \endcode \section virtual_allocator_alignment_and_units Alignment and units @@ -17670,8 +19012,8 @@ VmaVirtualAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.size = 4096; // 4 KB allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B -VkDeviceSize allocOffset; -res = vmaVirtualAllocate(block, &allocCreateInfo, &allocOffset); +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); \endcode Alignments of different allocations made from one block may vary. @@ -17681,7 +19023,7 @@ It might be more convenient, but you need to make sure to use this new unit cons - VmaVirtualBlockCreateInfo::size - VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment -- Using offset returned by vmaVirtualAllocate() +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset \section virtual_allocator_statistics Statistics diff --git a/src/Tests.cpp b/src/Tests.cpp index 1ce4ad4..c937c71 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -75,6 +75,8 @@ static const char* AlgorithmToStr(uint32_t algorithm) return "Linear"; case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: return "Buddy"; + case VMA_POOL_CREATE_TLSF_ALGORITHM_BIT: + return "TLSF"; case 0: return "Default"; default: @@ -2677,6 +2679,7 @@ static void TestVirtualBlocks() const VkDeviceSize blockSize = 16 * MEGABYTE; const VkDeviceSize alignment = 256; + VkDeviceSize offset; // # Create block 16 MB @@ -2686,50 +2689,55 @@ static void TestVirtualBlocks() VmaVirtualBlock block; TEST(vmaCreateVirtualBlock(&blockCreateInfo, &block) == VK_SUCCESS && block); - // # Allocate 8 MB + // # Allocate 8 MB (also fetch offset from the allocation) VmaVirtualAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.alignment = alignment; allocCreateInfo.pUserData = (void*)(uintptr_t)1; allocCreateInfo.size = 8 * MEGABYTE; - VkDeviceSize alloc0Offset; - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &alloc0Offset) == VK_SUCCESS); - TEST(alloc0Offset < blockSize); + VmaVirtualAllocation allocation0; + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocation0, &offset) == VK_SUCCESS); // # Validate the allocation - VmaVirtualAllocationInfo allocInfo = {}; - vmaGetVirtualAllocationInfo(block, alloc0Offset, &allocInfo); - TEST(allocInfo.size == allocCreateInfo.size); - TEST(allocInfo.pUserData = allocCreateInfo.pUserData); + VmaVirtualAllocationInfo allocInfo0 = {}; + vmaGetVirtualAllocationInfo(block, allocation0, &allocInfo0); + TEST(allocInfo0.offset < blockSize); + TEST(allocInfo0.offset == offset); + TEST(allocInfo0.size == allocCreateInfo.size); + TEST(allocInfo0.pUserData = allocCreateInfo.pUserData); // # Check SetUserData - vmaSetVirtualAllocationUserData(block, alloc0Offset, (void*)(uintptr_t)2); - vmaGetVirtualAllocationInfo(block, alloc0Offset, &allocInfo); - TEST(allocInfo.pUserData = (void*)(uintptr_t)2); + vmaSetVirtualAllocationUserData(block, allocation0, (void*)(uintptr_t)2); + vmaGetVirtualAllocationInfo(block, allocation0, &allocInfo0); + TEST(allocInfo0.pUserData = (void*)(uintptr_t)2); - // # Allocate 4 MB + // # Allocate 4 MB (also test passing null as pOffset during allocation) allocCreateInfo.size = 4 * MEGABYTE; - UINT64 alloc1Offset; - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &alloc1Offset) == VK_SUCCESS); - TEST(alloc1Offset < blockSize); - TEST(alloc1Offset + 4 * MEGABYTE <= alloc0Offset || alloc0Offset + 8 * MEGABYTE <= alloc1Offset); // Check if they don't overlap. + VmaVirtualAllocation allocation1; + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocation1, nullptr) == VK_SUCCESS); + VmaVirtualAllocationInfo allocInfo1 = {}; + vmaGetVirtualAllocationInfo(block, allocation1, &allocInfo1); + TEST(allocInfo1.offset < blockSize); + TEST(allocInfo1.offset + 4 * MEGABYTE <= allocInfo0.offset || allocInfo0.offset + 8 * MEGABYTE <= allocInfo1.offset); // Check if they don't overlap. // # Allocate another 8 MB - it should fail allocCreateInfo.size = 8 * MEGABYTE; - UINT64 alloc2Offset; - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &alloc2Offset) < 0); - TEST(alloc2Offset == VK_WHOLE_SIZE); + VmaVirtualAllocation allocation2; + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocation2, nullptr) < 0); + TEST(allocation2 == (VmaVirtualAllocation)VK_WHOLE_SIZE); // # Free the 4 MB block. Now allocation of 8 MB should succeed. - vmaVirtualFree(block, alloc1Offset); - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &alloc2Offset) == VK_SUCCESS); - TEST(alloc2Offset < blockSize); - TEST(alloc2Offset + 4 * MEGABYTE <= alloc0Offset || alloc0Offset + 8 * MEGABYTE <= alloc2Offset); // Check if they don't overlap. + vmaVirtualFree(block, allocation1); + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocation2, nullptr) == VK_SUCCESS); + VmaVirtualAllocationInfo allocInfo2 = {}; + vmaGetVirtualAllocationInfo(block, allocation2, &allocInfo2); + TEST(allocInfo2.offset < blockSize); + TEST(allocInfo2.offset + 4 * MEGABYTE <= allocInfo0.offset || allocInfo0.offset + 8 * MEGABYTE <= allocInfo2.offset); // Check if they don't overlap. // # Calculate statistics @@ -2753,34 +2761,36 @@ static void TestVirtualBlocks() // # Free alloc0, leave alloc2 unfreed. - vmaVirtualFree(block, alloc0Offset); + vmaVirtualFree(block, allocation0); // # Test alignment { constexpr size_t allocCount = 10; - VkDeviceSize allocOffset[allocCount] = {}; + VmaVirtualAllocation allocations[allocCount] = {}; for(size_t i = 0; i < allocCount; ++i) { const bool alignment0 = i == allocCount - 1; allocCreateInfo.size = i * 3 + 15; allocCreateInfo.alignment = alignment0 ? 0 : 8; - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocOffset[i]) == VK_SUCCESS); + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocations[i], nullptr) == VK_SUCCESS); if(!alignment0) { - TEST(allocOffset[i] % allocCreateInfo.alignment == 0); + VmaVirtualAllocationInfo info; + vmaGetVirtualAllocationInfo(block, allocations[i], &info); + TEST(info.offset % allocCreateInfo.alignment == 0); } } for(size_t i = allocCount; i--; ) { - vmaVirtualFree(block, allocOffset[i]); + vmaVirtualFree(block, allocations[i]); } } // # Final cleanup - vmaVirtualFree(block, alloc2Offset); + vmaVirtualFree(block, allocation2); vmaDestroyVirtualBlock(block); { @@ -2792,8 +2802,8 @@ static void TestVirtualBlocks() for(size_t i = 0; i < 8; ++i) { - VkDeviceSize offset = 0; - TEST(vmaVirtualAllocate(block, &allocCreateInfo, &offset) == VK_SUCCESS); + VmaVirtualAllocation allocation; + TEST(vmaVirtualAllocate(block, &allocCreateInfo, &allocation, nullptr) == VK_SUCCESS); } vmaClearVirtualBlock(block); @@ -2808,7 +2818,7 @@ static void TestVirtualBlocksAlgorithms() RandomNumberGenerator rand{3454335}; auto calcRandomAllocSize = [&rand]() -> VkDeviceSize { return rand.Generate() % 20 + 5; }; - for(size_t algorithmIndex = 0; algorithmIndex < 3; ++algorithmIndex) + for(size_t algorithmIndex = 0; algorithmIndex < 4; ++algorithmIndex) { // Create the block VmaVirtualBlockCreateInfo blockCreateInfo = {}; @@ -2818,6 +2828,7 @@ static void TestVirtualBlocksAlgorithms() { case 1: blockCreateInfo.flags = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT; break; case 2: blockCreateInfo.flags = VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT; break; + case 3: blockCreateInfo.flags = VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT; break; } VmaVirtualBlock block = nullptr; VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); @@ -2825,7 +2836,8 @@ static void TestVirtualBlocksAlgorithms() struct AllocData { - VkDeviceSize offset, requestedSize, allocationSize; + VmaVirtualAllocation allocation; + VkDeviceSize allocOffset, requestedSize, allocationSize; }; std::vector allocations; @@ -2843,12 +2855,13 @@ static void TestVirtualBlocksAlgorithms() AllocData alloc = {}; alloc.requestedSize = allocCreateInfo.size; - res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.offset); + res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.allocation, nullptr); TEST(res == VK_SUCCESS); VmaVirtualAllocationInfo allocInfo; - vmaGetVirtualAllocationInfo(block, alloc.offset, &allocInfo); + vmaGetVirtualAllocationInfo(block, alloc.allocation, &allocInfo); TEST(allocInfo.size >= allocCreateInfo.size); + alloc.allocOffset = allocInfo.offset; alloc.allocationSize = allocInfo.size; allocations.push_back(alloc); @@ -2858,7 +2871,7 @@ static void TestVirtualBlocksAlgorithms() for(size_t i = 0; i < 5; ++i) { const size_t index = rand.Generate() % allocations.size(); - vmaVirtualFree(block, allocations[index].offset); + vmaVirtualFree(block, allocations[index].allocation); allocations.erase(allocations.begin() + index); } @@ -2871,12 +2884,13 @@ static void TestVirtualBlocksAlgorithms() AllocData alloc = {}; alloc.requestedSize = allocCreateInfo.size; - res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.offset); + res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.allocation, nullptr); TEST(res == VK_SUCCESS); VmaVirtualAllocationInfo allocInfo; - vmaGetVirtualAllocationInfo(block, alloc.offset, &allocInfo); + vmaGetVirtualAllocationInfo(block, alloc.allocation, &allocInfo); TEST(allocInfo.size >= allocCreateInfo.size); + alloc.allocOffset = allocInfo.offset; alloc.allocationSize = allocInfo.size; allocations.push_back(alloc); @@ -2892,13 +2906,14 @@ static void TestVirtualBlocksAlgorithms() AllocData alloc = {}; alloc.requestedSize = allocCreateInfo.size; - res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.offset); + res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc.allocation, nullptr); TEST(res == VK_SUCCESS); - TEST(alloc.offset % 16 == 0); VmaVirtualAllocationInfo allocInfo; - vmaGetVirtualAllocationInfo(block, alloc.offset, &allocInfo); + vmaGetVirtualAllocationInfo(block, alloc.allocation, &allocInfo); + TEST(allocInfo.offset % 16 == 0); TEST(allocInfo.size >= allocCreateInfo.size); + alloc.allocOffset = allocInfo.offset; alloc.allocationSize = allocInfo.size; allocations.push_back(alloc); @@ -2906,21 +2921,21 @@ static void TestVirtualBlocksAlgorithms() // Check if the allocations don't overlap std::sort(allocations.begin(), allocations.end(), [](const AllocData& lhs, const AllocData& rhs) { - return lhs.offset < rhs.offset; }); + return lhs.allocOffset < rhs.allocOffset; }); for(size_t i = 0; i < allocations.size() - 1; ++i) { - TEST(allocations[i+1].offset >= allocations[i].offset + allocations[i].allocationSize); + TEST(allocations[i+1].allocOffset >= allocations[i].allocOffset + allocations[i].allocationSize); } // Check pUserData { const AllocData& alloc = allocations.back(); VmaVirtualAllocationInfo allocInfo = {}; - vmaGetVirtualAllocationInfo(block, alloc.offset, &allocInfo); + vmaGetVirtualAllocationInfo(block, alloc.allocation, &allocInfo); TEST((uintptr_t)allocInfo.pUserData == alloc.requestedSize * 10); - vmaSetVirtualAllocationUserData(block, alloc.offset, (void*)(uintptr_t)666); - vmaGetVirtualAllocationInfo(block, alloc.offset, &allocInfo); + vmaSetVirtualAllocationUserData(block, alloc.allocation, (void*)(uintptr_t)666); + vmaGetVirtualAllocationInfo(block, alloc.allocation, &allocInfo); TEST((uintptr_t)allocInfo.pUserData == 666); } @@ -4229,7 +4244,7 @@ static void BenchmarkAlgorithms(FILE* file) for(uint32_t emptyIndex = 0; emptyIndex < emptyCount; ++emptyIndex) { - for(uint32_t algorithmIndex = 0; algorithmIndex < 3; ++algorithmIndex) + for(uint32_t algorithmIndex = 0; algorithmIndex < 4; ++algorithmIndex) { uint32_t algorithm = 0; switch(algorithmIndex) @@ -4242,6 +4257,9 @@ static void BenchmarkAlgorithms(FILE* file) case 2: algorithm = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; break; + case 3: + algorithm = VMA_POOL_CREATE_TLSF_ALGORITHM_BIT; + break; default: assert(0); }