Keep a scratch block around for reuse in GrBlockAllocator

This can optimize cases in a allocate-release loop that moves back
and forth across the end of one block and the start of another. Before
this would malloc a new block and then delete it.

It also enables reserve() in higher-level data collections w/o blocking
bytes left in the current tail block.

Change-Id: Ide16e9038384fcb188164fc9620a8295f6880b9f
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/303268
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Michael Ludwig <michaelludwig@google.com>
This commit is contained in:
Michael Ludwig 2020-07-20 16:17:14 -04:00 committed by Skia Commit-Bot
parent dc945ea077
commit 68e5f29d84
6 changed files with 348 additions and 66 deletions

View File

@ -46,7 +46,7 @@ GrBlockAllocator::Block::~Block() {
size_t GrBlockAllocator::totalSize() const {
// Use size_t since the sum across all blocks could exceed 'int', even though each block won't
size_t size = offsetof(GrBlockAllocator, fHead);
size_t size = offsetof(GrBlockAllocator, fHead) + this->scratchBlockSize();
for (const Block* b : this->blocks()) {
size += b->fSize;
}
@ -55,7 +55,10 @@ size_t GrBlockAllocator::totalSize() const {
}
size_t GrBlockAllocator::totalUsableSpace() const {
size_t size = 0;
size_t size = this->scratchBlockSize();
if (size > 0) {
size -= kDataStart; // scratchBlockSize reports total block size, not usable size
}
for (const Block* b : this->blocks()) {
size += (b->fSize - kDataStart);
}
@ -87,9 +90,14 @@ GrBlockAllocator::Block* GrBlockAllocator::findOwningBlock(const void* p) {
}
void GrBlockAllocator::releaseBlock(Block* block) {
if (block->fPrev) {
// Unlink block from the double-linked list of blocks
SkASSERT(block != &fHead);
if (block == &fHead) {
// Reset the cursor of the head block so that it can be reused if it becomes the new tail
block->fCursor = kDataStart;
block->fMetadata = 0;
// Unlike in reset(), we don't set the head's next block to null because there are
// potentially heap-allocated blocks that are still connected to it.
} else {
SkASSERT(block->fPrev);
block->fPrev->fNext = block->fNext;
if (block->fNext) {
SkASSERT(fTail != block);
@ -99,14 +107,17 @@ void GrBlockAllocator::releaseBlock(Block* block) {
fTail = block->fPrev;
}
delete block;
// The released block becomes the new scratch block (if it's bigger), or delete it
if (this->scratchBlockSize() < block->fSize) {
SkASSERT(block != fHead.fPrev); // sanity check, shouldn't already be the scratch block
if (fHead.fPrev) {
delete fHead.fPrev;
}
block->markAsScratch();
fHead.fPrev = block;
} else {
// Reset the cursor of the head block so that it can be reused
SkASSERT(block == &fHead);
block->fCursor = kDataStart;
block->fMetadata = 0;
// Unlike in reset(), we don't set the head's next block to null because there are
// potentially heap-allocated blocks that are still connected to it.
delete block;
}
}
// Decrement growth policy (opposite of addBlock()'s increment operations)
@ -139,14 +150,15 @@ void GrBlockAllocator::reset() {
b->fNext = nullptr;
b->fCursor = kDataStart;
b->fMetadata = 0;
// For reset(), but NOT releaseBlock(), the head allocatorMetadata resets too
// For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
// are reset/destroyed.
b->fAllocatorMetadata = 0;
this->resetScratchSpace();
} else {
delete b;
}
}
SkASSERT(fTail == &fHead && fHead.fNext == nullptr &&
SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
fHead.metadata() == 0 && fHead.fCursor == kDataStart);
GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
@ -154,6 +166,13 @@ void GrBlockAllocator::reset() {
fN1 = 1;
}
void GrBlockAllocator::resetScratchSpace() {
if (fHead.fPrev) {
delete fHead.fPrev;
fHead.fPrev = nullptr;
}
}
void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
SkASSERT(minimumSize > (int) sizeof(Block) && minimumSize <= maxSize);
@ -161,6 +180,23 @@ void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
static constexpr int kMaxN = (1 << 23) - 1;
static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
auto alignAllocSize = [](int size) {
// Round to a nice boundary since the block isn't maxing out:
// if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
// nicely with jeMalloc (from SkArenaAlloc).
int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
return (size + mask) & ~mask;
};
int allocSize;
void* mem = nullptr;
if (this->scratchBlockSize() >= minimumSize) {
// Activate the scratch block instead of making a new block
SkASSERT(fHead.fPrev->isScratch());
allocSize = fHead.fPrev->fSize;
mem = fHead.fPrev;
fHead.fPrev = nullptr;
} else if (minimumSize < maxSize) {
// Calculate the 'next' size per growth policy sequence
GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
int nextN1 = fN0 + fN1;
@ -179,22 +215,24 @@ void GrBlockAllocator::addBlock(int minimumSize, int maxSize) {
// However, must guard against overflow here, since all the size-based asserts prevented
// alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
int sizeIncrement = fBlockIncrement * kAddressAlign;
int allocSize;
if (maxSize / sizeIncrement < nextN1) {
// The growth policy would overflow, so use the max. We've already confirmed that maxSize
// will be sufficient for the requested minimumSize
// The growth policy would overflow, so use the max. We've already confirmed that
// maxSize will be sufficient for the requested minimumSize
allocSize = maxSize;
} else {
allocSize = std::max(minimumSize, sizeIncrement * nextN1);
// Then round to a nice boundary since the block isn't maxing out:
// if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
// nicely with jeMalloc (from SkArenaAlloc).
int mask = allocSize > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
allocSize = std::min((allocSize + mask) & ~mask, maxSize);
allocSize = std::min(alignAllocSize(std::max(minimumSize, sizeIncrement * nextN1)),
maxSize);
}
} else {
SkASSERT(minimumSize == maxSize);
// Still align on a nice boundary, no max clamping since that would just undo the alignment
allocSize = alignAllocSize(minimumSize);
}
// Create new block and append to the linked list of blocks in this allocator
void* mem = operator new(allocSize);
if (!mem) {
mem = operator new(allocSize);
}
fTail->fNext = new (mem) Block(fTail, allocSize);
fTail = fTail->fNext;
}
@ -207,7 +245,13 @@ void GrBlockAllocator::validate() const {
blocks.push_back(block);
SkASSERT(kAssignedMarker == block->fSentinel);
if (block == &fHead) {
// The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
// considered part of the linked list
SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
} else {
SkASSERT(prev == block->fPrev);
}
if (prev) {
SkASSERT(prev->fNext == block);
}

View File

@ -132,6 +132,9 @@ public:
template <size_t Align, size_t Padding>
int alignedOffset(int offset) const;
bool isScratch() const { return fCursor < 0; }
void markAsScratch() { fCursor = -1; }
SkDEBUGCODE(int fSentinel;) // known value to check for bad back pointers to blocks
Block* fNext; // doubly-linked list of blocks
@ -259,6 +262,33 @@ public:
template <size_t Align, size_t Padding = 0>
ByteRange allocate(size_t size);
enum ReserveFlags : unsigned {
// If provided to reserve(), the input 'size' will be rounded up to the next size determined
// by the growth policy of the GrBlockAllocator. If not, 'size' will be aligned to max_align
kIgnoreGrowthPolicy_Flag = 0b01,
// If provided to reserve(), the number of available bytes of the current block will not
// be used to satisfy the reservation (assuming the contiguous range was long enough to
// begin with).
kIgnoreExistingBytes_Flag = 0b10,
kNo_ReserveFlags = 0b00
};
/**
* Ensure the block allocator has 'size' contiguous available bytes. After calling this
* function, currentBlock()->avail<Align, Padding>() may still report less than 'size' if the
* reserved space was added as a scratch block. This is done so that anything remaining in
* the current block can still be used if a smaller-than-size allocation is requested. If 'size'
* is requested by a subsequent allocation, the scratch block will automatically be activated
* and the request will not itself trigger any malloc.
*
* The optional 'flags' controls how the input size is allocated; by default it will attempt
* to use available contiguous bytes in the current block and will respect the growth policy
* of the allocator.
*/
template <size_t Align = 1, size_t Padding = 0>
void reserve(size_t size, ReserveFlags flags = kNo_ReserveFlags);
/**
* Return a pointer to the start of the current block. This will never be null.
*/
@ -305,6 +335,10 @@ public:
*
* If 'block' represents the inline-allocated head block, its cursor and metadata are instead
* reset to their defaults.
*
* If the block is not the head block, it may be kept as a scratch block to be reused for
* subsequent allocation requests, instead of making an entirely new block. A scratch block is
* not visible when iterating over blocks but is reported in the total size of the allocator.
*/
void releaseBlock(Block* block);
@ -314,6 +348,11 @@ public:
*/
void reset();
/**
* Remove any reserved scratch space, either from calling reserve() or releaseBlock().
*/
void resetScratchSpace();
template <bool Forward, bool Const> class BlockIter;
/**
@ -338,6 +377,10 @@ public:
void validate() const;
#endif
#if GR_TEST_UTILS
int testingOnly_scratchBlockSize() const { return this->scratchBlockSize(); }
#endif
private:
static constexpr int kDataStart = sizeof(Block);
#ifdef SK_FORCE_8_BYTE_ALIGNMENT
@ -369,6 +412,8 @@ private:
// that will preserve the static guarantees GrBlockAllocator makes.
void addBlock(int minSize, int maxSize);
int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
// All remaining state is packed into 64 bits to keep GrBlockAllocator at 16 bytes + head block
@ -390,6 +435,9 @@ private:
// Inline head block, must be at the end so that it can utilize any additional reserved space
// from the initial allocation.
// The head block's prev pointer may be non-null, which signifies a scratch block that may be
// reused instead of allocating an entirely new block (this helps when allocate+release calls
// bounce back and forth across the capacity of a block).
alignas(kAddressAlign) Block fHead;
static_assert(kGrowthPolicyCount <= 4);
@ -435,6 +483,8 @@ private:
///////////////////////////////////////////////////////////////////////////////////////////////////
// Template and inline implementations
GR_MAKE_BITFIELD_OPS(GrBlockAllocator::ReserveFlags)
template<size_t Align, size_t Padding>
constexpr size_t GrBlockAllocator::BlockOverhead() {
static_assert(GrAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
@ -457,6 +507,29 @@ constexpr size_t GrBlockAllocator::MaxBlockSize() {
return BlockOverhead<Align, Padding>() + kMaxAllocationSize;
}
template<size_t Align, size_t Padding>
void GrBlockAllocator::reserve(size_t size, ReserveFlags flags) {
if (size > kMaxAllocationSize) {
SK_ABORT("Allocation too large (%zu bytes requested)", size);
}
int iSize = (int) size;
if ((flags & kIgnoreExistingBytes_Flag) ||
this->currentBlock()->avail<Align, Padding>() < iSize) {
int blockSize = BlockOverhead<Align, Padding>() + iSize;
int maxSize = (flags & kIgnoreGrowthPolicy_Flag) ? blockSize
: MaxBlockSize<Align, Padding>();
SkASSERT((size_t) maxSize <= (MaxBlockSize<Align, Padding>()));
SkDEBUGCODE(auto oldTail = fTail;)
this->addBlock(blockSize, maxSize);
SkASSERT(fTail != oldTail);
// Releasing the just added block will move it into scratch space, allowing the original
// tail's bytes to be used first before the scratch block is activated.
this->releaseBlock(fTail);
}
}
template <size_t Align, size_t Padding>
GrBlockAllocator::ByteRange GrBlockAllocator::allocate(size_t size) {
// Amount of extra space for a new block to make sure the allocation can succeed.
@ -599,6 +672,12 @@ public:
void advance(BlockT* block) {
fBlock = block;
fNext = block ? (Forward ? block->fNext : block->fPrev) : nullptr;
if (!Forward && fNext && fNext->isScratch()) {
// For reverse-iteration only, we need to stop at the head, not the scratch block
// possibly stashed in head->prev.
fNext = nullptr;
}
SkASSERT(!fNext || !fNext->isScratch());
}
BlockT* fBlock;

View File

@ -67,7 +67,8 @@ public:
bool isEmpty() const {
// If size is the same as preallocSize, there aren't any heap blocks, so currentBlock()
// is the inline head block.
return 0 == this->size() && 0 == fAllocator.currentBlock()->metadata();
return fAllocator.currentBlock() == fAllocator.headBlock() &&
fAllocator.currentBlock()->metadata() == 0;
}
/**

View File

@ -65,6 +65,19 @@ public:
return *new (this->pushItem()) T(std::forward<Args>(args)...);
}
/**
* Allocate, if needed, space to hold N more Ts before another malloc will occur.
*/
void reserve(int n) {
int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
if (n > avail) {
int reserved = n - avail;
// Don't consider existing bytes since we've already determined how to split the N items
fAllocator->template reserve<alignof(T)>(
reserved * sizeof(T), GrBlockAllocator::kIgnoreExistingBytes_Flag);
}
}
/**
* Remove the last item, only call if count() != 0
*/
@ -156,7 +169,7 @@ public:
* Use for-range loops by calling items() or ritems() instead to access all added items in order
*/
T& item(int i) {
SkASSERT(i >= 0 && i < fAllocator->metadata());
SkASSERT(i >= 0 && i < this->count());
// Iterate over blocks until we find the one that contains i.
for (auto* b : fAllocator->blocks()) {
@ -236,6 +249,11 @@ public:
// Iterate from newest to oldest using a for-range loop.
RIter ritems() { return RIter(fAllocator.allocator()); }
CRIter ritems() const { return CRIter(fAllocator.allocator()); }
#if GR_TEST_UTILS
// For introspection
const GrBlockAllocator* allocator() const { return fAllocator.allocator(); }
#endif
};
/**

View File

@ -38,13 +38,22 @@ static Block* get_block(GrSBlockAllocator<N>& pool, int blockIndex) {
return found;
}
// GrBlockAllocator holds on to the largest last-released block to reuse for new allocations,
// and this is still counted in its totalSize(). However, it's easier to reason about size - scratch
// in many of these tests.
template<size_t N>
static size_t total_size(GrSBlockAllocator<N>& pool) {
return pool->totalSize() - pool->testingOnly_scratchBlockSize();
}
template<size_t N>
static size_t add_block(GrSBlockAllocator<N>& pool) {
size_t currentSize = pool->totalSize();
while(pool->totalSize() == currentSize) {
size_t currentSize = total_size(pool);
GrBlockAllocator::Block* current = pool->currentBlock();
while(pool->currentBlock() == current) {
pool->template allocate<4>(pool->preallocSize() / 2);
}
return pool->totalSize() - currentSize;
return total_size(pool) - currentSize;
}
template<size_t N>
@ -124,7 +133,7 @@ DEF_TEST(GrBlockAllocatorAlloc, r) {
validate_ptr(32, 96, p32, &p16);
// All of these allocations should be in the head block
REPORTER_ASSERT(r, pool->totalSize() == pool->preallocSize());
REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
SkDEBUGCODE(pool->validate();)
// Requesting an allocation of avail() should not make a new block
@ -137,21 +146,21 @@ DEF_TEST(GrBlockAllocatorAlloc, r) {
REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
auto pNextBlock = pool->allocate<4>(4);
validate_ptr(4, 4, pNextBlock, nullptr);
REPORTER_ASSERT(r, pool->totalSize() > pool->preallocSize());
REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
// Allocating more than avail() makes an another block
size_t currentSize = pool->totalSize();
size_t currentSize = total_size(pool);
size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
auto pTooBig = pool->allocate<4>(bigRequest);
validate_ptr(4, bigRequest, pTooBig, nullptr);
REPORTER_ASSERT(r, pool->totalSize() > currentSize);
REPORTER_ASSERT(r, total_size(pool) > currentSize);
// Allocating more than the default growth policy (1024 in this case), will fulfill the request
REPORTER_ASSERT(r, pool->totalSize() - currentSize < 4096);
currentSize = pool->totalSize();
REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
currentSize = total_size(pool);
auto pReallyTooBig = pool->allocate<4>(4096);
validate_ptr(4, 4096, pReallyTooBig, nullptr);
REPORTER_ASSERT(r, pool->totalSize() >= currentSize + 4096);
REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
SkDEBUGCODE(pool->validate();)
}
@ -276,7 +285,7 @@ DEF_TEST(GrBlockAllocatorGrowthPolicy, r) {
GrSBlockAllocator<kInitSize> pool{(GrowthPolicy) gp};
SkDEBUGCODE(pool->validate();)
REPORTER_ASSERT(r, kExpectedSizes[gp][0] == pool->totalSize());
REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
for (int i = 1; i < kBlockCount; ++i) {
REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
}
@ -327,7 +336,7 @@ DEF_TEST(GrBlockAllocatorReleaseBlock, r) {
void* firstAlloc = alloc_byte(pool);
size_t b1Size = pool->totalSize();
size_t b1Size = total_size(pool);
size_t b2Size = add_block(pool);
size_t b3Size = add_block(pool);
size_t b4Size = add_block(pool);
@ -340,30 +349,31 @@ DEF_TEST(GrBlockAllocatorReleaseBlock, r) {
// Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
// on the last iteration, will test tail removal).
REPORTER_ASSERT(r, pool->totalSize() == b1Size + b2Size + b3Size + b4Size);
REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
pool->releaseBlock(get_block(pool, 1));
REPORTER_ASSERT(r, block_count(pool) == 3);
REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
REPORTER_ASSERT(r, pool->totalSize() == b1Size + b3Size + b4Size);
REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
pool->releaseBlock(get_block(pool, 1));
REPORTER_ASSERT(r, block_count(pool) == 2);
REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
REPORTER_ASSERT(r, pool->totalSize() == b1Size + b4Size);
REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
pool->releaseBlock(get_block(pool, 1));
REPORTER_ASSERT(r, block_count(pool) == 1);
REPORTER_ASSERT(r, pool->totalSize() == b1Size);
REPORTER_ASSERT(r, total_size(pool) == b1Size);
// Since we're back to just the head block, if we add a new block, the growth policy should
// match the original sequence instead of continuing with "b5Size'"
pool->resetScratchSpace();
size_t size = add_block(pool);
REPORTER_ASSERT(r, size == b2Size);
pool->releaseBlock(get_block(pool, 1));
// Explicitly release the head block and confirm it's reset
pool->releaseBlock(get_block(pool, 0));
REPORTER_ASSERT(r, pool->totalSize() == pool->preallocSize());
REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
REPORTER_ASSERT(r, block_count(pool) == 1);
REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
@ -415,6 +425,69 @@ DEF_TEST(GrBlockAllocatorIterateAndRelease, r) {
REPORTER_ASSERT(r, block_count(pool) == 1);
}
DEF_TEST(GrBlockAllocatorScratchBlockReserve, r) {
GrSBlockAllocator<256> pool;
size_t added = add_block(pool);
REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
size_t total = pool->totalSize();
pool->releaseBlock(pool->currentBlock());
// Total size shouldn't have changed, the released block should become scratch
REPORTER_ASSERT(r, pool->totalSize() == total);
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == added);
// But a reset definitely deletes any scratch block
pool->reset();
REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
// Reserving more than what's available adds a scratch block, and current block remains avail.
size_t avail = pool->currentBlock()->avail();
size_t reserve = avail + 1;
pool->reserve(reserve);
REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
// And rounds up to the fixed size of this pool's growth policy
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= reserve &&
pool->testingOnly_scratchBlockSize() % 256 == 0);
// Allocating more than avail activates the scratch block (so totalSize doesn't change)
size_t preAllocTotalSize = pool->totalSize();
pool->allocate<1>(avail + 1);
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == 0);
REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
// When reserving less than what's still available in the current block, no scratch block is
// added.
pool->reserve(pool->currentBlock()->avail());
REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() == 0);
// Unless checking available bytes is disabled
pool->reserve(pool->currentBlock()->avail(), GrBlockAllocator::kIgnoreExistingBytes_Flag);
REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0);
// If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
// follow the size (which in this case is a fixed 256 bytes per block).
pool->resetScratchSpace();
pool->reserve(32, GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
REPORTER_ASSERT(r, pool->testingOnly_scratchBlockSize() > 0 &&
pool->testingOnly_scratchBlockSize() < 256);
// When requesting an allocation larger than the current block and the scratch block, a new
// block is added, and the scratch block remains scratch.
GrBlockAllocator::Block* oldTail = pool->currentBlock();
avail = oldTail->avail();
size_t scratchAvail = 2 * avail;
pool->reserve(scratchAvail);
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() >= scratchAvail); // sanity
// This allocation request is higher than oldTail's available, and the scratch size so we
// should add a new block and scratch size should stay the same.
scratchAvail = pool->testingOnly_scratchBlockSize();
pool->allocate<1>(scratchAvail + 1);
REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == scratchAvail);
}
// These tests ensure that the allocation padding mechanism works as intended
struct TestMeta {
int fX1;

View File

@ -22,9 +22,13 @@ struct C {
int fID;
// Under the hood, GrTAllocator and GrBlockAllocator round up to max_align_t. If 'C' was just
// 4 bytes, that often means the internal blocks can squeeze a few extra instances in. This
// is fine, but makes predicting a little trickier, so make sure C is a bit bigger.
int fPadding[4];
static int gInstCnt;
};
int C::gInstCnt = 0;
}
@ -156,6 +160,63 @@ static void run_allocator_test(GrTAllocator<C, N>* allocator, skiatest::Reporter
check_allocator(allocator, 100, 10, reporter);
}
template<int N>
static void run_reserve_test(skiatest::Reporter* reporter) {
constexpr int kItemsPerBlock = N + 4; // Make this a number > 1, even if N starting items == 1
GrTAllocator<C, N> list(kItemsPerBlock);
size_t initialSize = list.allocator()->totalSize();
// Should be able to add N instances of T w/o changing size from initialSize
for (int i = 0; i < N; ++i) {
list.push_back(C(i));
}
REPORTER_ASSERT(reporter, initialSize == list.allocator()->totalSize());
// Reserve room for 2*kItemsPerBlock items
list.reserve(2 * kItemsPerBlock);
REPORTER_ASSERT(reporter, list.count() == N); // count shouldn't change though
size_t reservedSize = list.allocator()->totalSize();
REPORTER_ASSERT(reporter, reservedSize >= initialSize + 2 * kItemsPerBlock * sizeof(C));
for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
list.push_back(C(i));
}
REPORTER_ASSERT(reporter, reservedSize == list.allocator()->totalSize());
// Make the next block partially fully (N > 0 but < kItemsPerBlock)
for (int i = 0; i < N; ++i) {
list.push_back(C(i));
}
// Reserve room again for 2*kItemsPerBlock, but reserve should automatically take account of the
// (kItemsPerBlock-N) that are still available in the active block
list.reserve(2 * kItemsPerBlock);
int extraReservedCount = kItemsPerBlock + N;
// Because GrTAllocator normally allocates blocks in fixed sizes, and extraReservedCount >
// items-per-block, it will always use that size and not that of the growth policy.
REPORTER_ASSERT(reporter, (size_t) list.allocator()->testingOnly_scratchBlockSize() >=
extraReservedCount * sizeof(C));
reservedSize = list.allocator()->totalSize();
for (int i = 0; i < 2 * kItemsPerBlock; ++i) {
list.push_back(C(i));
}
REPORTER_ASSERT(reporter, reservedSize == list.allocator()->totalSize());
// If we reserve a count < items-per-block, it will use the fixed size from the growth policy.
list.reserve(2);
REPORTER_ASSERT(reporter, (size_t) list.allocator()->testingOnly_scratchBlockSize() >=
kItemsPerBlock * sizeof(C));
// Ensure the reservations didn't initialize any more D's than anticipated
int expectedInstanceCount = 2 * (N + 2 * kItemsPerBlock);
REPORTER_ASSERT(reporter, expectedInstanceCount == C::gInstCnt);
list.reset();
REPORTER_ASSERT(reporter, 0 == C::gInstCnt);
}
DEF_TEST(GrTAllocator, reporter) {
// Test combinations of allocators with and without stack storage and with different block sizes
GrTAllocator<C> a1(1);
@ -175,4 +236,10 @@ DEF_TEST(GrTAllocator, reporter) {
GrTAllocator<C, 4> sa4;
run_allocator_test(&sa4, reporter);
run_reserve_test<1>(reporter);
run_reserve_test<2>(reporter);
run_reserve_test<3>(reporter);
run_reserve_test<4>(reporter);
run_reserve_test<5>(reporter);
}