Generalize iterator in GrTAllocator to be useful for other data types
This allows the iterator type/boilerplate to be reused for any other data collection that sits above GrBlockAllocator, as long as its a fixed "type" with indices into a block. Also adds reverse iteration (which is useful for stack-like use cases). Change-Id: Id9a205e8fb396a8558e360439240fd20c92c9700 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/302665 Commit-Queue: Michael Ludwig <michaelludwig@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
3a22017402
commit
26b4ffd362
@ -12,6 +12,15 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
// Forward declarations for the iterators used by GrTAllocator
|
||||
using IndexFn = int (*)(const GrBlockAllocator::Block*);
|
||||
using NextFn = int (*)(const GrBlockAllocator::Block*, int);
|
||||
template<typename T, typename B> using ItemFn = T (*)(B*, int);
|
||||
template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next,
|
||||
ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
|
||||
GrBlockAllocator::Block>::type> Resolve>
|
||||
class BlockIndexIterator;
|
||||
|
||||
/**
|
||||
* GrTAllocator manages dynamic storage for instances of T, reserving fixed blocks such that
|
||||
* allocation is amortized across every N instances. The optional StartingItems argument specifies
|
||||
@ -45,11 +54,9 @@ public:
|
||||
T& push_back() {
|
||||
return *new (this->pushItem()) T;
|
||||
}
|
||||
|
||||
T& push_back(const T& t) {
|
||||
return *new (this->pushItem()) T(t);
|
||||
}
|
||||
|
||||
T& push_back(T&& t) {
|
||||
return *new (this->pushItem()) T(std::move(t));
|
||||
}
|
||||
@ -66,18 +73,17 @@ public:
|
||||
SkASSERT(fTotalCount > 0);
|
||||
|
||||
GrBlockAllocator::Block* block = fAllocator->currentBlock();
|
||||
int newCount = block->metadata() - 1;
|
||||
|
||||
// Run dtor for the popped item
|
||||
int releaseIndex;
|
||||
GetItemAndOffset(block, newCount, &releaseIndex)->~T();
|
||||
int releaseIndex = Last(block);
|
||||
GetItem(block, releaseIndex).~T();
|
||||
|
||||
if (newCount == 0) {
|
||||
if (releaseIndex == First(block)) {
|
||||
fAllocator->releaseBlock(block);
|
||||
} else {
|
||||
// Since this always follows LIFO, the block should always be able to release the memory
|
||||
SkAssertResult(block->release(releaseIndex, releaseIndex + sizeof(T)));
|
||||
block->setMetadata(newCount);
|
||||
block->setMetadata(Decrement(block, releaseIndex));
|
||||
}
|
||||
|
||||
fTotalCount--;
|
||||
@ -89,11 +95,8 @@ public:
|
||||
void reset() {
|
||||
// Invoke destructors in reverse order if not trivially destructible
|
||||
if /* constexpr */ (!std::is_trivially_destructible<T>::value) {
|
||||
for (const auto* b : fAllocator->rblocks()) {
|
||||
int c = b->metadata();
|
||||
for (int i = c - 1; i >= 0; i--) {
|
||||
GetItem(b, i)->~T();
|
||||
}
|
||||
for (T& t : this->ritems()) {
|
||||
t.~T();
|
||||
}
|
||||
}
|
||||
|
||||
@ -108,10 +111,11 @@ public:
|
||||
#ifdef SK_DEBUG
|
||||
// Confirm total count matches sum of block counts
|
||||
int count = 0;
|
||||
int blockCount = 0;
|
||||
for (const auto* b :fAllocator->blocks()) {
|
||||
count += b->metadata();
|
||||
blockCount++;
|
||||
if (b->metadata() == 0) {
|
||||
continue; // skip empty
|
||||
}
|
||||
count += (sizeof(T) + Last(b) - First(b)) / sizeof(T);
|
||||
}
|
||||
SkASSERT(count == fTotalCount);
|
||||
#endif
|
||||
@ -129,148 +133,83 @@ public:
|
||||
T& front() {
|
||||
// This assumes that the head block actually have room to store the first item.
|
||||
static_assert(StartingItems >= 1);
|
||||
SkASSERT(fTotalCount > 0);
|
||||
return *(GetItem(fAllocator->headBlock(), 0));
|
||||
SkASSERT(fTotalCount > 0 && fAllocator->headBlock()->metadata() > 0);
|
||||
return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Access first item, only call if count() != 0
|
||||
*/
|
||||
const T& front() const {
|
||||
SkASSERT(fTotalCount > 0);
|
||||
return *(GetItem(fAllocator->headBlock(), 0));
|
||||
SkASSERT(fTotalCount > 0 && fAllocator->headBlock()->metadata() > 0);
|
||||
return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Access last item, only call if count() != 0
|
||||
*/
|
||||
T& back() {
|
||||
SkASSERT(fTotalCount > 0);
|
||||
int blockCount = fAllocator->currentBlock()->metadata();
|
||||
return *(GetItem(fAllocator->currentBlock(), blockCount - 1));
|
||||
SkASSERT(fTotalCount > 0 && fAllocator->currentBlock()->metadata() > 0);
|
||||
return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Access last item, only call if count() != 0
|
||||
*/
|
||||
const T& back() const {
|
||||
SkASSERT(fTotalCount > 0);
|
||||
int blockCount = fAllocator->currentBlock()->metadata();
|
||||
return *(GetItem(fAllocator->currentBlock(), blockCount - 1));
|
||||
SkASSERT(fTotalCount > 0 && fAllocator->currentBlock()->metadata() > 0);
|
||||
return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
|
||||
}
|
||||
|
||||
template<bool Const>
|
||||
class Iterator {
|
||||
using BlockIter = typename GrBlockAllocator::BlockIter<true, Const>;
|
||||
using C = typename std::conditional<Const, const T, T>::type;
|
||||
using AllocatorT = typename std::conditional<Const, const GrTAllocator, GrTAllocator>::type;
|
||||
public:
|
||||
Iterator(AllocatorT* allocator) : fBlockIter(allocator->fAllocator.allocator()) {}
|
||||
|
||||
class Item {
|
||||
public:
|
||||
bool operator!=(const Item& other) const {
|
||||
if (other.fBlock != fBlock) {
|
||||
// Treat an empty head block the same as the end block
|
||||
bool blockEmpty = !(*fBlock) || (*fBlock)->metadata() == 0;
|
||||
bool otherEmpty = !(*other.fBlock) || (*other.fBlock)->metadata() == 0;
|
||||
return !blockEmpty || !otherEmpty;
|
||||
} else {
|
||||
// Same block, so differentiate by index into it (unless it's the "end" block
|
||||
// in which case ignore index).
|
||||
return SkToBool(*fBlock) && other.fIndex != fIndex;
|
||||
}
|
||||
}
|
||||
|
||||
C& operator*() const {
|
||||
C* item = const_cast<C*>(static_cast<const C*>((*fBlock)->ptr(fIndex)));
|
||||
SkDEBUGCODE(int offset;)
|
||||
SkASSERT(item == GetItemAndOffset(*fBlock, fItem, &offset) && offset == fIndex);
|
||||
return *item;
|
||||
}
|
||||
|
||||
Item& operator++() {
|
||||
const auto* block = *fBlock;
|
||||
fItem++;
|
||||
fIndex += sizeof(T);
|
||||
if (fItem >= block->metadata()) {
|
||||
++fBlock;
|
||||
fItem = 0;
|
||||
fIndex = StartingIndex(fBlock);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
friend Iterator;
|
||||
using BlockItem = typename BlockIter::Item;
|
||||
|
||||
Item(BlockItem block) : fBlock(block), fItem(0), fIndex(StartingIndex(block)) {}
|
||||
|
||||
static int StartingIndex(const BlockItem& block) {
|
||||
return *block ? (*block)->template firstAlignedOffset<alignof(T)>() : 0;
|
||||
}
|
||||
|
||||
BlockItem fBlock;
|
||||
int fItem;
|
||||
int fIndex;
|
||||
};
|
||||
|
||||
Item begin() const { return Item(fBlockIter.begin()); }
|
||||
Item end() const { return Item(fBlockIter.end()); }
|
||||
|
||||
private:
|
||||
const BlockIter fBlockIter;
|
||||
};
|
||||
|
||||
using Iter = Iterator<false>;
|
||||
using CIter = Iterator<true>;
|
||||
|
||||
/**
|
||||
* Prefer using a for-range loop when iterating over all allocated items, vs. index access.
|
||||
*/
|
||||
Iter items() { return Iter(this); }
|
||||
CIter items() const { return CIter(this); }
|
||||
|
||||
/**
|
||||
* Access item by index. Not an operator[] since it should not be considered constant time.
|
||||
* Use for-range loops by calling items() or ritems() instead to access all added items in order
|
||||
*/
|
||||
T& item(int i) {
|
||||
// Iterate over blocks until we find the one that contains i.
|
||||
SkASSERT(i >= 0 && i < fTotalCount);
|
||||
for (const auto* b : fAllocator->blocks()) {
|
||||
int blockCount = b->metadata();
|
||||
if (i < blockCount) {
|
||||
return *GetItem(b, i);
|
||||
|
||||
// Iterate over blocks until we find the one that contains i.
|
||||
for (auto* b : fAllocator->blocks()) {
|
||||
if (b->metadata() == 0) {
|
||||
continue; // skip empty
|
||||
}
|
||||
|
||||
int start = First(b);
|
||||
int end = Last(b) + sizeof(T); // exclusive
|
||||
int index = start + i * sizeof(T);
|
||||
if (index < end) {
|
||||
return GetItem(b, index);
|
||||
} else {
|
||||
i -= blockCount;
|
||||
i -= (end - start) / sizeof(T);
|
||||
}
|
||||
}
|
||||
SkUNREACHABLE;
|
||||
}
|
||||
const T& item(int i) const {
|
||||
return const_cast<GrTAllocator<T>*>(this)->item(i);
|
||||
return const_cast<GrTAllocator*>(this)->item(i);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t StartingSize =
|
||||
GrBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
|
||||
|
||||
static T* GetItemAndOffset(const GrBlockAllocator::Block* block, int index, int* offset) {
|
||||
SkASSERT(index >= 0 && index < block->metadata());
|
||||
*offset = block->firstAlignedOffset<alignof(T)>() + index * sizeof(T);
|
||||
return const_cast<T*>(static_cast<const T*>(block->ptr(*offset)));
|
||||
static T& GetItem(GrBlockAllocator::Block* block, int index) {
|
||||
return *static_cast<T*>(block->ptr(index));
|
||||
}
|
||||
|
||||
static T* GetItem(const GrBlockAllocator::Block* block, int index) {
|
||||
int offset;
|
||||
return GetItemAndOffset(block, index, &offset);
|
||||
static const T& GetItem(const GrBlockAllocator::Block* block, int index) {
|
||||
return *static_cast<const T*>(block->ptr(index));
|
||||
}
|
||||
static int First(const GrBlockAllocator::Block* b) {
|
||||
return b->firstAlignedOffset<alignof(T)>();
|
||||
}
|
||||
static int Last(const GrBlockAllocator::Block* b) {
|
||||
return b->metadata();
|
||||
}
|
||||
static int Increment(const GrBlockAllocator::Block* b, int index) {
|
||||
return index + sizeof(T);
|
||||
}
|
||||
static int Decrement(const GrBlockAllocator::Block* b, int index) {
|
||||
return index - sizeof(T);
|
||||
}
|
||||
|
||||
void* pushItem() {
|
||||
// 'template' required because fAllocator is a template, calling a template member
|
||||
auto br = fAllocator->template allocate<alignof(T)>(sizeof(T));
|
||||
br.fBlock->setMetadata(br.fBlock->metadata() + 1);
|
||||
SkASSERT(br.fStart == br.fAlignedOffset ||
|
||||
br.fAlignedOffset == First(fAllocator->currentBlock()));
|
||||
br.fBlock->setMetadata(br.fAlignedOffset);
|
||||
fTotalCount++;
|
||||
return br.fBlock->ptr(br.fAlignedOffset);
|
||||
}
|
||||
@ -282,6 +221,103 @@ private:
|
||||
// N represents the number of items, whereas GrSBlockAllocator takes total bytes, so must
|
||||
// account for the block allocator's size too.
|
||||
GrSBlockAllocator<StartingSize> fAllocator;
|
||||
|
||||
public:
|
||||
using Iter = BlockIndexIterator<T&, true, false, &First, &Last, &Increment, &GetItem>;
|
||||
using CIter = BlockIndexIterator<const T&, true, true, &First, &Last, &Increment, &GetItem>;
|
||||
using RIter = BlockIndexIterator<T&, false, false, &Last, &First, &Decrement, &GetItem>;
|
||||
using CRIter = BlockIndexIterator<const T&, false, true, &Last, &First, &Decrement, &GetItem>;
|
||||
|
||||
/**
|
||||
* Iterate over all items in allocation order (oldest to newest) using a for-range loop:
|
||||
*
|
||||
* for (auto&& T : this->items()) {}
|
||||
*/
|
||||
Iter items() { return Iter(fAllocator.allocator()); }
|
||||
CIter items() const { return CIter(fAllocator.allocator()); }
|
||||
|
||||
// Iterate from newest to oldest using a for-range loop.
|
||||
RIter ritems() { return RIter(fAllocator.allocator()); }
|
||||
CRIter ritems() const { return CRIter(fAllocator.allocator()); }
|
||||
};
|
||||
|
||||
/**
|
||||
* BlockIndexIterator provides a reusable iterator template for collections built on top of a
|
||||
* GrBlockAllocator, where each item is of the same type, and the index to an item can be iterated
|
||||
* over in a known manner. It supports const and non-const, and forward and reverse, assuming it's
|
||||
* provided with proper functions for starting, ending, and advancing.
|
||||
*/
|
||||
template <typename T, // The element type (including any modifiers)
|
||||
bool Forward, // Are indices within a block increasing or decreasing with iteration?
|
||||
bool Const, // Whether or not T is const
|
||||
IndexFn Start, // Returns the index of the first valid item in a block
|
||||
IndexFn End, // Returns the index of the last valid item (so it is inclusive)
|
||||
NextFn Next, // Returns the next index given the current index
|
||||
ItemFn<T, typename std::conditional<Const, const GrBlockAllocator::Block,
|
||||
GrBlockAllocator::Block>::type> Resolve>
|
||||
class BlockIndexIterator {
|
||||
using BlockIter = typename GrBlockAllocator::BlockIter<Forward, Const>;
|
||||
public:
|
||||
BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {}
|
||||
|
||||
class Item {
|
||||
public:
|
||||
bool operator!=(const Item& other) const {
|
||||
return other.fBlock != fBlock || (SkToBool(*fBlock) && other.fIndex != fIndex);
|
||||
}
|
||||
|
||||
T operator*() const {
|
||||
SkASSERT(*fBlock);
|
||||
return Resolve(*fBlock, fIndex);
|
||||
}
|
||||
|
||||
Item& operator++() {
|
||||
const auto* block = *fBlock;
|
||||
SkASSERT(block && block->metadata() > 0);
|
||||
SkASSERT((Forward && Next(block, fIndex) > fIndex) ||
|
||||
(!Forward && Next(block, fIndex) < fIndex));
|
||||
fIndex = Next(block, fIndex);
|
||||
if ((Forward && fIndex > fEndIndex) || (!Forward && fIndex < fEndIndex)) {
|
||||
++fBlock;
|
||||
this->setIndices();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
friend BlockIndexIterator;
|
||||
using BlockItem = typename BlockIter::Item;
|
||||
|
||||
Item(BlockItem block) : fBlock(block) {
|
||||
this->setIndices();
|
||||
}
|
||||
|
||||
void setIndices() {
|
||||
// Skip empty blocks
|
||||
while(*fBlock && (*fBlock)->metadata() == 0) {
|
||||
++fBlock;
|
||||
}
|
||||
if (*fBlock) {
|
||||
fIndex = Start(*fBlock);
|
||||
fEndIndex = End(*fBlock);
|
||||
} else {
|
||||
fIndex = 0;
|
||||
fEndIndex = 0;
|
||||
}
|
||||
|
||||
SkASSERT((Forward && fIndex <= fEndIndex) || (!Forward && fIndex >= fEndIndex));
|
||||
}
|
||||
|
||||
BlockItem fBlock;
|
||||
int fIndex;
|
||||
int fEndIndex;
|
||||
};
|
||||
|
||||
Item begin() const { return Item(fBlockIter.begin()); }
|
||||
Item end() const { return Item(fBlockIter.end()); }
|
||||
|
||||
private:
|
||||
BlockIter fBlockIter;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -52,6 +52,50 @@ static void check_allocator_helper(GrTAllocator<C, N>* allocator, int cnt, int p
|
||||
}
|
||||
}
|
||||
|
||||
template<int N>
|
||||
static void check_iterator_helper(GrTAllocator<C, N>* allocator, const std::vector<C*>& expected,
|
||||
skiatest::Reporter* reporter) {
|
||||
const GrTAllocator<C, N>* cAlloc = allocator;
|
||||
REPORTER_ASSERT(reporter, (size_t) allocator->count() == expected.size());
|
||||
// Forward+const
|
||||
int i = 0;
|
||||
for (const C& c : cAlloc->items()) {
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
|
||||
++i;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, (size_t) i == expected.size());
|
||||
|
||||
// Forward+non-const
|
||||
i = 0;
|
||||
for (C& c : allocator->items()) {
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
|
||||
++i;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, (size_t) i == expected.size());
|
||||
|
||||
// Reverse+const
|
||||
i = (int) expected.size() - 1;
|
||||
for (const C& c : cAlloc->ritems()) {
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
|
||||
--i;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, i == -1);
|
||||
|
||||
// Reverse+non-const
|
||||
i = (int) expected.size() - 1;
|
||||
for (C& c : allocator->ritems()) {
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &c == (uintptr_t) expected[i]);
|
||||
--i;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, i == -1);
|
||||
|
||||
// Also test random access
|
||||
for (int i = 0; i < allocator->count(); ++i) {
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &allocator->item(i) == (uintptr_t) expected[i]);
|
||||
REPORTER_ASSERT(reporter, (uintptr_t) &cAlloc->item(i) == (uintptr_t) expected[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Adds cnt items to the allocator, tests the cnts and iterators, pops popCnt items and checks
|
||||
// again. Finally it resets the allocator and checks again.
|
||||
template<int N>
|
||||
@ -59,6 +103,7 @@ static void check_allocator(GrTAllocator<C, N>* allocator, int cnt, int popCnt,
|
||||
skiatest::Reporter* reporter) {
|
||||
SkASSERT(allocator);
|
||||
SkASSERT(allocator->empty());
|
||||
std::vector<C*> items;
|
||||
for (int i = 0; i < cnt; ++i) {
|
||||
// Try both variations of push_back().
|
||||
if (i % 1) {
|
||||
@ -66,9 +111,12 @@ static void check_allocator(GrTAllocator<C, N>* allocator, int cnt, int popCnt,
|
||||
} else {
|
||||
allocator->push_back() = C(i);
|
||||
}
|
||||
items.push_back(&allocator->back());
|
||||
}
|
||||
check_iterator_helper(allocator, items, reporter);
|
||||
check_allocator_helper(allocator, cnt, popCnt, reporter);
|
||||
allocator->reset();
|
||||
check_iterator_helper(allocator, {}, reporter);
|
||||
check_allocator_helper(allocator, 0, 0, reporter);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user