Some GrTypes cleanup.
Remove some unused functions/macros. Move two functions only used by GrBufferAllocPool there. We only ever used GrSizeAlignUp with pow 2 alignments. Require that, rename, move to GrTypesPriv.h (along with GrSizeDivRoundUp). Change-Id: I1a7248952d1905f16f02de2028d65768b186acee Reviewed-on: https://skia-review.googlesource.com/c/skia/+/262061 Reviewed-by: Brian Osman <brianosman@google.com> Commit-Queue: Brian Salomon <bsalomon@google.com>
This commit is contained in:
parent
91b4059e9f
commit
e994380d00
@ -127,54 +127,6 @@ template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFla
|
||||
friend X& operator |=(X&, X); \
|
||||
friend constexpr bool operator &(X, X)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// compile time versions of min/max
|
||||
#define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
|
||||
#define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
|
||||
|
||||
/**
|
||||
* divide, rounding up
|
||||
*/
|
||||
static inline constexpr int32_t GrIDivRoundUp(int x, int y) {
|
||||
SkASSERT(y > 0);
|
||||
return (x + (y-1)) / y;
|
||||
}
|
||||
static inline constexpr uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
|
||||
return (x + (y-1)) / y;
|
||||
}
|
||||
static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
|
||||
|
||||
/**
|
||||
* align up
|
||||
*/
|
||||
static inline constexpr uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
|
||||
return GrUIDivRoundUp(x, alignment) * alignment;
|
||||
}
|
||||
static inline constexpr size_t GrSizeAlignUp(size_t x, size_t alignment) {
|
||||
return GrSizeDivRoundUp(x, alignment) * alignment;
|
||||
}
|
||||
|
||||
/**
|
||||
* amount of pad needed to align up
|
||||
*/
|
||||
static inline constexpr uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
|
||||
return (alignment - x % alignment) % alignment;
|
||||
}
|
||||
static inline constexpr size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
|
||||
return (alignment - x % alignment) % alignment;
|
||||
}
|
||||
|
||||
/**
|
||||
* align down
|
||||
*/
|
||||
static inline constexpr uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
|
||||
return (x / alignment) * alignment;
|
||||
}
|
||||
static inline constexpr size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
|
||||
return (x / alignment) * alignment;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -31,6 +31,20 @@ using GrStdSteadyClock = std::chrono::monotonic_clock;
|
||||
using GrStdSteadyClock = std::chrono::steady_clock;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* divide, rounding up
|
||||
*/
|
||||
|
||||
static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
|
||||
|
||||
/**
|
||||
* align up to a power of 2
|
||||
*/
|
||||
static inline constexpr size_t GrAlignTo(size_t x, size_t alignment) {
|
||||
SkASSERT(alignment && SkIsPow2(alignment));
|
||||
return (x + alignment - 1) & ~(alignment - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pixel configurations. This type conflates texture formats, CPU pixel formats, and
|
||||
* premultipliedness. We are moving away from it towards SkColorType and backend API (GL, Vulkan)
|
||||
|
@ -178,6 +178,14 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline size_t align_up_pad(size_t x, size_t alignment) {
|
||||
return (alignment - x % alignment) % alignment;
|
||||
}
|
||||
|
||||
static inline size_t align_down(size_t x, uint32_t alignment) {
|
||||
return (x / alignment) * alignment;
|
||||
}
|
||||
|
||||
void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
size_t alignment,
|
||||
sk_sp<const GrBuffer>* buffer,
|
||||
@ -190,7 +198,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
if (fBufferPtr) {
|
||||
BufferBlock& back = fBlocks.back();
|
||||
size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
|
||||
size_t pad = align_up_pad(usedBytes, alignment);
|
||||
SkSafeMath safeMath;
|
||||
size_t alignedSize = safeMath.add(pad, size);
|
||||
if (!safeMath.ok()) {
|
||||
@ -245,7 +253,7 @@ void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
|
||||
if (fBufferPtr) {
|
||||
BufferBlock& back = fBlocks.back();
|
||||
size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
|
||||
size_t pad = align_up_pad(usedBytes, alignment);
|
||||
if ((minSize + pad) <= back.fBytesFree) {
|
||||
// Consume padding first, to make subsequent alignment math easier
|
||||
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
|
||||
@ -257,10 +265,10 @@ void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
|
||||
// correctly)
|
||||
size_t size;
|
||||
if (back.fBytesFree >= fallbackSize) {
|
||||
SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
|
||||
SkASSERT(align_down(fallbackSize, alignment) == fallbackSize);
|
||||
size = fallbackSize;
|
||||
} else {
|
||||
size = GrSizeAlignDown(back.fBytesFree, alignment);
|
||||
size = align_down(back.fBytesFree, alignment);
|
||||
}
|
||||
*offset = usedBytes;
|
||||
*buffer = back.fBuffer;
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
|
||||
preallocSize = std::max(preallocSize, kMinAllocationSize);
|
||||
static constexpr size_t kPoolSize = GrSizeAlignUp(sizeof(GrMemoryPool), kAlignment);
|
||||
static constexpr size_t kPoolSize = GrAlignTo(sizeof(GrMemoryPool), kAlignment);
|
||||
size_t size = kPoolSize + preallocSize;
|
||||
void* mem = operator new(size);
|
||||
void* preallocStart = static_cast<char*>(mem) + kPoolSize;
|
||||
@ -68,7 +68,7 @@ GrMemoryPool::~GrMemoryPool() {
|
||||
void* GrMemoryPool::allocate(size_t size) {
|
||||
VALIDATE;
|
||||
size += kPerAllocPad;
|
||||
size = GrSizeAlignUp(size, kAlignment);
|
||||
size = GrAlignTo(size, kAlignment);
|
||||
if (fTail->fFreeSize < size) {
|
||||
size_t blockSize = size + kHeaderSize;
|
||||
blockSize = std::max(blockSize, fMinAllocSize);
|
||||
@ -220,17 +220,15 @@ void GrMemoryPool::validate() {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static constexpr size_t kOpPoolSize =
|
||||
GrSizeAlignUp(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||
static constexpr size_t kOpPoolSize = GrAlignTo(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||
|
||||
GrOpMemoryPool::~GrOpMemoryPool() { this->pool()->~GrMemoryPool(); }
|
||||
|
||||
std::unique_ptr<GrOpMemoryPool> GrOpMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
|
||||
preallocSize = std::max(preallocSize, GrMemoryPool::kMinAllocationSize);
|
||||
static constexpr size_t kOpPoolSize =
|
||||
GrSizeAlignUp(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||
static constexpr size_t kPoolSize =
|
||||
GrSizeAlignUp(sizeof(GrMemoryPool), GrMemoryPool::kAlignment);
|
||||
GrAlignTo(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||
static constexpr size_t kPoolSize = GrAlignTo(sizeof(GrMemoryPool), GrMemoryPool::kAlignment);
|
||||
size_t size = kOpPoolSize + kPoolSize + preallocSize;
|
||||
void* mem = operator new(size);
|
||||
void* memPoolPtr = static_cast<char*>(mem) + kOpPoolSize;
|
||||
|
@ -8,7 +8,7 @@
|
||||
#ifndef GrMemoryPool_DEFINED
|
||||
#define GrMemoryPool_DEFINED
|
||||
|
||||
#include "include/gpu/GrTypes.h"
|
||||
#include "include/private/GrTypesPriv.h"
|
||||
|
||||
#include "include/core/SkRefCnt.h"
|
||||
|
||||
@ -120,8 +120,8 @@ private:
|
||||
|
||||
friend class GrOpMemoryPool;
|
||||
|
||||
static constexpr size_t kHeaderSize = GrSizeAlignUp(sizeof(BlockHeader), kAlignment);
|
||||
static constexpr size_t kPerAllocPad = GrSizeAlignUp(sizeof(AllocHeader), kAlignment);
|
||||
static constexpr size_t kHeaderSize = GrAlignTo(sizeof(BlockHeader), kAlignment);
|
||||
static constexpr size_t kPerAllocPad = GrAlignTo(sizeof(AllocHeader), kAlignment);
|
||||
};
|
||||
|
||||
class GrOp;
|
||||
|
@ -108,7 +108,7 @@ GrTRecorder<TBase>::emplaceWithData(size_t extraDataSize, Args... args) {
|
||||
static constexpr size_t kTAlign = alignof(TItem);
|
||||
static constexpr size_t kHeaderAlign = alignof(Header);
|
||||
static constexpr size_t kAllocAlign = kTAlign > kHeaderAlign ? kTAlign : kHeaderAlign;
|
||||
static constexpr size_t kTItemOffset = GrSizeAlignUp(sizeof(Header), kAllocAlign);
|
||||
static constexpr size_t kTItemOffset = GrAlignTo(sizeof(Header), kAllocAlign);
|
||||
// We're assuming if we back up from kItemOffset by sizeof(Header) we will still be aligned.
|
||||
static_assert(sizeof(Header) % alignof(Header) == 0);
|
||||
const size_t totalSize = kTItemOffset + sizeof(TItem) + extraDataSize;
|
||||
|
@ -44,7 +44,7 @@ GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrGpuBufferType intendedTyp
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
// Mac requires 4-byte alignment for copies so we need
|
||||
// to ensure we have space for the extra data
|
||||
size = GrSizeAlignUp(size, 4);
|
||||
size = SkAlign4(size);
|
||||
#endif
|
||||
fMtlBuffer = size == 0 ? nil :
|
||||
[gpu->device() newBufferWithLength: size
|
||||
@ -82,7 +82,7 @@ bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) {
|
||||
}
|
||||
SkASSERT(fMappedBuffer);
|
||||
if (!fIsDynamic) {
|
||||
SkASSERT(GrSizeAlignUp(srcInBytes, 4) == fMappedBuffer.length);
|
||||
SkASSERT(SkAlign4(srcInBytes) == fMappedBuffer.length);
|
||||
}
|
||||
memcpy(fMapPtr, src, srcInBytes);
|
||||
this->internalUnmap(srcInBytes);
|
||||
@ -136,7 +136,7 @@ void GrMtlBuffer::internalMap(size_t sizeInBytes) {
|
||||
}
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
// Mac requires 4-byte alignment for copies so we pad this out
|
||||
sizeInBytes = GrSizeAlignUp(sizeInBytes, 4);
|
||||
sizeInBytes = SkAlign4(sizeInBytes);
|
||||
#endif
|
||||
fMappedBuffer =
|
||||
[this->mtlGpu()->device() newBufferWithLength: sizeInBytes
|
||||
@ -160,7 +160,7 @@ void GrMtlBuffer::internalUnmap(size_t sizeInBytes) {
|
||||
}
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
// In both cases the size needs to be 4-byte aligned on Mac
|
||||
sizeInBytes = GrSizeAlignUp(sizeInBytes, 4);
|
||||
sizeInBytes = SkAlign4(sizeInBytes);
|
||||
#endif
|
||||
if (fIsDynamic) {
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
|
@ -239,7 +239,7 @@ id<MTLBuffer> GrMtlResourceProvider::BufferSuballocator::getAllocation(size_t si
|
||||
*offset = modHead;
|
||||
// We're not sure what the usage of the next allocation will be --
|
||||
// to be safe we'll use 16 byte alignment.
|
||||
fHead = GrSizeAlignUp(head + size, 16);
|
||||
fHead = GrAlignTo(head + size, 16);
|
||||
return fBuffer;
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ void GrMtlResourceProvider::BufferSuballocator::addCompletionHandler(
|
||||
id<MTLBuffer> GrMtlResourceProvider::getDynamicBuffer(size_t size, size_t* offset) {
|
||||
#ifdef SK_BUILD_FOR_MAC
|
||||
// Mac requires 4-byte alignment for didModifyRange:
|
||||
size = GrSizeAlignUp(size, 4);
|
||||
size = SkAlign4(size);
|
||||
#endif
|
||||
id<MTLBuffer> buffer = fBufferSuballocator->getAllocation(size, offset);
|
||||
if (buffer) {
|
||||
|
@ -321,7 +321,7 @@ void basic_transfer_from_test(skiatest::Reporter* reporter, const sk_gpu_test::C
|
||||
|
||||
size_t bufferSize = fullBufferRowBytes * kTextureHeight;
|
||||
// Arbitrary starting offset for the partial read.
|
||||
size_t partialReadOffset = GrSizeAlignUp(11, offsetAlignment);
|
||||
size_t partialReadOffset = GrAlignTo(11, offsetAlignment);
|
||||
bufferSize = SkTMax(bufferSize, partialReadOffset + partialBufferRowBytes * kPartialHeight);
|
||||
|
||||
sk_sp<GrGpuBuffer> buffer(resourceProvider->createBuffer(
|
||||
|
Loading…
Reference in New Issue
Block a user