Fix various typos

This commit is contained in:
rumblehhh 2022-08-05 09:20:11 +01:00 committed by GitHub
parent f6d6e278a6
commit 177862376c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -628,7 +628,7 @@ typedef enum VmaAllocationCreateFlagBits
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
/** Allocation strategy that chooses always the lowest offset in available space.
This is not the most efficient strategy but achieves highly packed data.
Used internally by defragmentation, not recomended in typical usage.
Used internally by defragmentation, not recommended in typical usage.
*/
VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
/** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
@ -1905,7 +1905,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
/** \brief Returns current information about specified allocation.
Current paramteres of given allocation are returned in `pAllocationInfo`.
Current parameters of given allocation are returned in `pAllocationInfo`.
Although this function doesn't lock any mutex, so it should be quite efficient,
you should avoid calling it too often.
@ -2158,7 +2158,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
\param allocator Allocator object.
\param context Context object that has been created by vmaBeginDefragmentation().
\param[out] pPassInfo Computed informations for current pass.
\param[out] pPassInfo Computed information for current pass.
\returns
- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
@ -2173,7 +2173,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
\param allocator Allocator object.
\param context Context object that has been created by vmaBeginDefragmentation().
\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
@ -2682,7 +2682,7 @@ static void* vma_aligned_alloc(size_t alignment, size_t size)
// // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
// // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
// // MAC_OS_X_VERSION_10_16), even though the function is marked
// // availabe for 10.15. That is why the preprocessor checks for 10.16 but
// // available for 10.15. That is why the preprocessor checks for 10.16 but
// // the __builtin_available checks for 10.15.
// // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
// if (__builtin_available(macOS 10.15, iOS 13, *))
@ -6711,7 +6711,7 @@ uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset)
void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
{
// When current alloc type is free then it can be overriden by new type
// When current alloc type is free then it can be overridden by new type
if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
page.allocType = allocType;
@ -10774,7 +10774,7 @@ void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
{
VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
block->offset = prev->offset;
@ -10791,10 +10791,10 @@ VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize
uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
if (!innerFreeMap)
{
// Check higher levels for avaiable blocks
// Check higher levels for available blocks
uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
if (!freeMap)
return VMA_NULL; // No more memory avaible
return VMA_NULL; // No more memory available
// Find lowest free region
memoryClass = VMA_BITSCAN_LSB(freeMap);
@ -13216,7 +13216,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo
{
if (m_AlgorithmState != VMA_NULL)
{
// Avoid unnecessary tries to allocate when new free block is avaiable
// Avoid unnecessary tries to allocate when new free block is available
StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
if (state.firstFreeBlock != SIZE_MAX)
{
@ -13292,7 +13292,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo
}
default:
{
// Move to the begining
// Move to the beginning
for (const FragmentedBlock& block : immovableBlocks)
{
VmaBlockVector* vector = m_pBlockVectors[block.data];
@ -13507,7 +13507,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector
{
// Go over every allocation and try to fit it in previous blocks at lowest offsets,
// if not possible: realloc within single block to minimize offset (exclude offset == 0),
// but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
// but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
VMA_ASSERT(m_AlgorithmState != VMA_NULL);
StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
@ -13590,7 +13590,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector
}
}
// No moves perfomed, update statistics to current vector state
// No moves performed, update statistics to current vector state
if (startMoveCount == m_Moves.size() && !update)
{
vectorState.avgAllocSize = UINT64_MAX;
@ -14517,7 +14517,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
blockVector.GetAllocationNextPtr());
if(res == VK_SUCCESS)
{
// Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
// Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
return VK_SUCCESS;
}
@ -14558,7 +14558,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
blockVector.GetAllocationNextPtr());
if(res == VK_SUCCESS)
{
// Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
// Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
return VK_SUCCESS;
}
@ -17764,7 +17764,7 @@ This will help the library decide about preferred memory type to ensure it has `
so you can map it.
For example, a staging buffer that will be filled via mapped pointer and then
used as a source of transfer to the buffer decribed previously can be created like this.
used as a source of transfer to the buffer described previously can be created like this.
It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
@ -17788,7 +17788,7 @@ Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library kno
about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
memory type, as decribed below.
memory type, as described below.
\note
Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
@ -17936,7 +17936,7 @@ object that you wanted to map.
\section memory_mapping_persistently_mapped_memory Persistently mapped memory
Kepping your memory persistently mapped is generally OK in Vulkan.
Keeping your memory persistently mapped is generally OK in Vulkan.
You don't need to unmap it before using its data on the GPU.
The library provides a special feature designed for that:
Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
@ -18539,7 +18539,7 @@ especially the amount of memory allocated from Vulkan.
If you need to obtain basic statistics about memory usage per heap, together with current budget,
you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
This is useful to keep track of memory usage and stay withing budget
This is useful to keep track of memory usage and stay within budget
(see also \ref staying_within_budget).
Example:
@ -18682,7 +18682,7 @@ VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
using the same code as the main Vulkan memory allocator.
Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
that represents an opaque handle to an allocation withing the virtual block.
that represents an opaque handle to an allocation within the virtual block.
In order to make such allocation:
@ -19081,7 +19081,7 @@ freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multipl
even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
and make the device reach out to that resource directly.
- Reads performed by the device will then go through PCI Express bus.
The performace of this access may be limited, but it may be fine depending on the size
The performance of this access may be limited, but it may be fine depending on the size
of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
of access.
-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
@ -19537,7 +19537,7 @@ which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), w
All changes between official releases are documented in file "CHANGELOG.md".
\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage.
\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
Adding new members to existing structures is treated as backward compatible if initializing
the new members to binary zero results in the old behavior.
You should always fully initialize all library structures to zeros and not rely on their