Store tasks to execute before VK secondary command buffers generically.

Repurpose GrTRecorder for storing these tasks. It's currently unused.
Reimplement on top of SkArenaAlloc and using emplace methods now that we
have C++14.

Currently it stores copy and upload tasks. In the future it will store
transfer-out commands.

Removes the optimization that reset copy-ins on clear/discard. However,
none of our existing tests exercised it.

Change-Id: I0474f77cc2d368461d542de50a7a0c5609312001
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/209643
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Brian Salomon <bsalomon@google.com>
This commit is contained in:
Brian Salomon 2019-04-23 15:24:31 -04:00 committed by Skia Commit-Bot
parent ba201aea74
commit 24d377eedf
8 changed files with 284 additions and 500 deletions

View File

@ -134,50 +134,42 @@ template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFla
/** /**
* divide, rounding up * divide, rounding up
*/ */
static inline int32_t GrIDivRoundUp(int x, int y) { static inline constexpr int32_t GrIDivRoundUp(int x, int y) {
SkASSERT(y > 0); SkASSERT(y > 0);
return (x + (y-1)) / y; return (x + (y-1)) / y;
} }
static inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) { static inline constexpr uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
return (x + (y-1)) / y; return (x + (y-1)) / y;
} }
static inline size_t GrSizeDivRoundUp(size_t x, size_t y) { static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
return (x + (y-1)) / y;
}
// compile time, evaluates Y multiple times
#define GR_CT_DIV_ROUND_UP(X, Y) (((X) + ((Y)-1)) / (Y))
/** /**
* align up * align up
*/ */
static inline uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) { static inline constexpr uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
return GrUIDivRoundUp(x, alignment) * alignment; return GrUIDivRoundUp(x, alignment) * alignment;
} }
static inline size_t GrSizeAlignUp(size_t x, size_t alignment) { static inline constexpr size_t GrSizeAlignUp(size_t x, size_t alignment) {
return GrSizeDivRoundUp(x, alignment) * alignment; return GrSizeDivRoundUp(x, alignment) * alignment;
} }
// compile time, evaluates A multiple times
#define GR_CT_ALIGN_UP(X, A) (GR_CT_DIV_ROUND_UP((X),(A)) * (A))
/** /**
* amount of pad needed to align up * amount of pad needed to align up
*/ */
static inline uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) { static inline constexpr uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
return (alignment - x % alignment) % alignment; return (alignment - x % alignment) % alignment;
} }
static inline size_t GrSizeAlignUpPad(size_t x, size_t alignment) { static inline constexpr size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
return (alignment - x % alignment) % alignment; return (alignment - x % alignment) % alignment;
} }
/** /**
* align down * align down
*/ */
static inline uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) { static inline constexpr uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
return (x / alignment) * alignment; return (x / alignment) * alignment;
} }
static inline size_t GrSizeAlignDown(size_t x, uint32_t alignment) { static inline constexpr size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
return (x / alignment) * alignment; return (x / alignment) * alignment;
} }

View File

@ -119,8 +119,8 @@ protected:
enum { enum {
// We assume this alignment is good enough for everybody. // We assume this alignment is good enough for everybody.
kAlignment = 8, kAlignment = 8,
kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment), kHeaderSize = GrSizeAlignUp(sizeof(BlockHeader), kAlignment),
kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment), kPerAllocPad = GrSizeAlignUp(sizeof(AllocHeader), kAlignment),
}; };
}; };

View File

@ -8,45 +8,31 @@
#ifndef GrTRecorder_DEFINED #ifndef GrTRecorder_DEFINED
#define GrTRecorder_DEFINED #define GrTRecorder_DEFINED
#include "SkTypes.h" #include "GrTypes.h"
#include <new> #include "SkArenaAlloc.h"
#include "SkTLogic.h"
template<typename TBase, typename TAlign> class GrTRecorder;
template<typename TItem> struct GrTRecorderAllocWrapper;
/** /**
* Records a list of items with a common base type, optional associated data, and * Records a list of items with a common base type, optional associated data, and
* permanent memory addresses. * permanent memory addresses. It supports forward iteration.
* *
* This class preallocates its own chunks of memory for hosting objects, so new items can * This class allocates space for the stored items and associated data in a SkArenaAlloc.
* be created without excessive calls to malloc(). * There is an overhead of 1 pointer for each stored item.
*
* To create a new item and append it to the back of the list, use the following macros:
*
* GrNEW_APPEND_TO_RECORDER(recorder, SubclassName, (args))
* GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, SubclassName, (args), sizeOfData)
* *
* Upon reset or delete, the items are destructed in the same order they were received, * Upon reset or delete, the items are destructed in the same order they were received,
* not reverse (stack) order. * not reverse (stack) order.
* *
* @param TBase Common base type of items in the list. If TBase is not a class with a * @param TBase Common base type of items in the list. It is assumed that the items are
* virtual destructor, the client is responsible for invoking any necessary * trivially destructable or that TBase has a virtual destructor as ~TBase()
* destructors. * is called to destroy the items.
*
* For now, any subclass used in the list must have the same start address
* as TBase (or in other words, the types must be convertible via
* reinterpret_cast<>). Classes with multiple inheritance (or any subclass
* on an obscure compiler) may not be compatible. This is runtime asserted
* in debug builds.
*
* @param TAlign A type whose size is the desired memory alignment for object allocations.
* This should be the largest known alignment requirement for all objects
* that may be stored in the list.
*/ */
template<typename TBase, typename TAlign> class GrTRecorder : SkNoncopyable { template <typename TBase> class GrTRecorder {
private:
template <bool IsConst> class IterImpl;
public: public:
class Iter; using iterator = IterImpl<false>;
class ReverseIter; using const_iterator = IterImpl<true>;
/** /**
* Create a recorder. * Create a recorder.
@ -54,338 +40,137 @@ public:
* @param initialSizeInBytes The amount of memory reserved by the recorder initially, * @param initialSizeInBytes The amount of memory reserved by the recorder initially,
and after calls to reset(). and after calls to reset().
*/ */
GrTRecorder(int initialSizeInBytes) explicit GrTRecorder(size_t initialSizeInBytes) : fArena(initialSizeInBytes) {}
: fHeadBlock(MemBlock::Alloc(LengthOf(initialSizeInBytes), nullptr)), GrTRecorder(const GrTRecorder&) = delete;
fTailBlock(fHeadBlock), GrTRecorder& operator=(const GrTRecorder&) = delete;
fLastItem(nullptr) {}
~GrTRecorder() { ~GrTRecorder() { this->reset(); }
this->reset();
MemBlock::Free(fHeadBlock);
}
bool empty() { return !fLastItem; } bool empty() { return !SkToBool(fTail); }
/** The last item. Must not be empty. */
TBase& back() { TBase& back() {
SkASSERT(!this->empty()); SkASSERT(!this->empty());
return *reinterpret_cast<TBase*>(fLastItem); return *fTail->get();
} }
/** /** Forward mutable iteration */
* Removes and destroys the last block added to the recorder. It may not be called when the iterator begin() { return iterator(fHead); }
* recorder is empty. iterator end() { return iterator(nullptr); }
*/
void pop_back();
/** /** Forward const iteration */
* Destruct all items in the list and reset to empty. const_iterator begin() const { return const_iterator(fHead); }
*/ const_iterator end() const { return const_iterator(nullptr); }
/** Destruct all items in the list and reset to empty. Frees memory allocated from arena. */
void reset(); void reset();
/** /**
* Retrieve the extra data associated with an item that was allocated using * Emplace a new TItem (which derives from TBase) in the recorder. This requires equivalence
* GrNEW_APPEND_WITH_DATA_TO_RECORDER(). * between reinterpret_cast<TBase*> and static_cast<TBase*> when operating on TItem*.
* * Multiple inheritance may make this not true. It is runtime asserted.
* @param item The item whose data to retrieve. The pointer must be of the same type
* that was allocated initally; it can't be a pointer to a base class.
*
* @return The item's associated data.
*/ */
template<typename TItem> static const void* GetDataForItem(const TItem* item) { template <typename TItem, typename... Args> TItem& emplace(Args... args) {
const TAlign* ptr = reinterpret_cast<const TAlign*>(item); return this->emplaceWithData<TItem, Args...>(0, std::forward<Args>(args)...);
return &ptr[length_of<TItem>::kValue];
}
template<typename TItem> static void* GetDataForItem(TItem* item) {
TAlign* ptr = reinterpret_cast<TAlign*>(item);
return &ptr[length_of<TItem>::kValue];
} }
/**
* Emplace a new TItem (which derives from TBase) in the recorder with extra data space. The
* extra data immediately follows the stored item with no extra alignment. E.g.,
* void* extraData = &recorder->emplaceWithData<Subclass>(dataSize, ...) + 1;
*
* This requires equivalence between reinterpret_cast<TBase*> and static_cast<TBase*> when
* operating on TItem*. Multiple inheritance may make this not true. It is runtime asserted.
*/
template <typename TItem, typename... Args>
SK_WHEN((std::is_base_of<TBase, TItem>::value), TItem&)
emplaceWithData(size_t extraDataSize, Args... args);
private: private:
template<typename TItem> struct length_of {
enum { kValue = (sizeof(TItem) + sizeof(TAlign) - 1) / sizeof(TAlign) };
};
static int LengthOf(int bytes) { return (bytes + sizeof(TAlign) - 1) / sizeof(TAlign); }
struct Header { struct Header {
int fTotalLength; // The length of an entry including header, item, and data in TAligns. Header* fNext = nullptr;
int fPrevLength; // Same but for the previous entry. Used for iterating backwards. // We always store the T immediately after the header (and ensure proper alignment). See
// emplaceWithData() implementation.
TBase* get() const { return reinterpret_cast<TBase*>(const_cast<Header*>(this) + 1); }
}; };
template<typename TItem> void* alloc_back(int dataLength);
struct MemBlock : SkNoncopyable { SkArenaAlloc fArena;
/** Allocates a new block and appends it to prev if not nullptr. The length param is in units Header* fHead = nullptr;
of TAlign. */ Header* fTail = nullptr;
static MemBlock* Alloc(int length, MemBlock* prev) {
MemBlock* block = reinterpret_cast<MemBlock*>(
sk_malloc_throw(sizeof(TAlign) * (length_of<MemBlock>::kValue + length)));
block->fLength = length;
block->fBack = 0;
block->fNext = nullptr;
block->fPrev = prev;
if (prev) {
SkASSERT(nullptr == prev->fNext);
prev->fNext = block;
}
return block;
}
// Frees from this block forward. Also adjusts prev block's next ptr.
static void Free(MemBlock* block) {
if (block && block->fPrev) {
SkASSERT(block->fPrev->fNext == block);
block->fPrev->fNext = nullptr;
}
while (block) {
MemBlock* next = block->fNext;
sk_free(block);
block = next;
}
}
TAlign& operator [](int i) {
return reinterpret_cast<TAlign*>(this)[length_of<MemBlock>::kValue + i];
}
int fLength; // Length in units of TAlign of the block.
int fBack; // Offset, in TAligns, to unused portion of the memory block.
MemBlock* fNext;
MemBlock* fPrev;
};
MemBlock* const fHeadBlock;
MemBlock* fTailBlock;
void* fLastItem; // really a ptr to TBase
template<typename TItem> friend struct GrTRecorderAllocWrapper;
template <typename UBase, typename UAlign, typename UItem>
friend void* operator new(size_t, GrTRecorder<UBase, UAlign>&,
const GrTRecorderAllocWrapper<UItem>&);
friend class Iter;
friend class ReverseIter;
}; };
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template<typename TBase, typename TAlign> template <typename TBase>
void GrTRecorder<TBase, TAlign>::pop_back() { template <typename TItem, typename... Args>
SkASSERT(fLastItem); inline SK_WHEN((std::is_base_of<TBase, TItem>::value), TItem&)
Header* header = reinterpret_cast<Header*>( GrTRecorder<TBase>::emplaceWithData(size_t extraDataSize, Args... args) {
reinterpret_cast<TAlign*>(fLastItem) - length_of<Header>::kValue); static constexpr size_t kTAlign = alignof(TItem);
fTailBlock->fBack -= header->fTotalLength; static constexpr size_t kHeaderAlign = alignof(Header);
reinterpret_cast<TBase*>(fLastItem)->~TBase(); static constexpr size_t kAllocAlign = kTAlign > kHeaderAlign ? kTAlign : kHeaderAlign;
static constexpr size_t kTItemOffset = GrSizeAlignUp(sizeof(Header), kAllocAlign);
int lastItemLength = header->fPrevLength; // We're assuming if we back up from kItemOffset by sizeof(Header) we will still be aligned.
GR_STATIC_ASSERT(sizeof(Header) % alignof(Header) == 0);
if (!header->fPrevLength) { const size_t totalSize = kTItemOffset + sizeof(TItem) + extraDataSize;
// We popped the first entry in the recorder. auto alloc = reinterpret_cast<char*>(fArena.makeBytesAlignedTo(totalSize, kAllocAlign));
SkASSERT(0 == fTailBlock->fBack); Header* header = new (alloc + kTItemOffset - sizeof(Header)) Header();
fLastItem = nullptr; if (fTail) {
return; fTail->fNext = header;
} }
while (!fTailBlock->fBack) { fTail = header;
// We popped the last entry in a block that isn't the head block. Move back a block but if (!fHead) {
// don't free it since we'll probably grow into it shortly. fHead = header;
fTailBlock = fTailBlock->fPrev;
SkASSERT(fTailBlock);
} }
fLastItem = &(*fTailBlock)[fTailBlock->fBack - lastItemLength + length_of<Header>::kValue]; auto* item = new (alloc + kTItemOffset) TItem(std::forward<Args>(args)...);
// We require that we can reinterpret_cast between TBase* and TItem*. Could not figure out how
// to statically assert this. See proposal for std::is_initial_base_of here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0466r0.pdf
SkASSERT(reinterpret_cast<uintptr_t>(item) ==
reinterpret_cast<uintptr_t>(static_cast<TBase*>(item)));
return *item;
} }
template<typename TBase, typename TAlign> template <typename TBase> inline void GrTRecorder<TBase>::reset() {
template<typename TItem> for (auto& i : *this) {
void* GrTRecorder<TBase, TAlign>::alloc_back(int dataLength) { i.~TBase();
// Find the header of the previous entry and get its length. We need to store that in the new
// header for backwards iteration (pop_back()).
int prevLength = 0;
if (fLastItem) {
Header* lastHeader = reinterpret_cast<Header*>(
reinterpret_cast<TAlign*>(fLastItem) - length_of<Header>::kValue);
prevLength = lastHeader->fTotalLength;
} }
GR_STATIC_ASSERT(std::is_trivially_destructible<Header>::value);
const int totalLength = length_of<Header>::kValue + length_of<TItem>::kValue + dataLength; fHead = fTail = nullptr;
fArena.reset();
// Check if there is room in the current block and if not walk to next (allocating if
// necessary). Note that pop_back() and reset() can leave the recorder in a state where it
// has preallocated blocks hanging off the tail that are currently unused.
while (fTailBlock->fBack + totalLength > fTailBlock->fLength) {
if (!fTailBlock->fNext) {
fTailBlock = MemBlock::Alloc(SkTMax(2 * fTailBlock->fLength, totalLength), fTailBlock);
} else {
fTailBlock = fTailBlock->fNext;
}
SkASSERT(0 == fTailBlock->fBack);
}
Header* header = reinterpret_cast<Header*>(&(*fTailBlock)[fTailBlock->fBack]);
void* rawPtr = &(*fTailBlock)[fTailBlock->fBack + length_of<Header>::kValue];
header->fTotalLength = totalLength;
header->fPrevLength = prevLength;
fLastItem = rawPtr;
fTailBlock->fBack += totalLength;
// FIXME: We currently require that the base and subclass share the same start address.
// This is not required by the C++ spec, and is likely to not be true in the case of
// multiple inheritance or a base class that doesn't have virtual methods (when the
// subclass does). It would be ideal to find a more robust solution that comes at no
// extra cost to performance or code generality.
SkDEBUGCODE(void* baseAddr = fLastItem;
void* subclassAddr = rawPtr);
SkASSERT(baseAddr == subclassAddr);
return rawPtr;
} }
/** /**
* Iterates through a recorder from front to back. The initial state of the iterator is * Iterates through a recorder front-to-back, const or not.
* to not have the front item loaded yet; next() must be called first. Usage model:
*
* GrTRecorder<TBase, TAlign>::Iter iter(recorder);
* while (iter.next()) {
* iter->doSomething();
* }
*/ */
template<typename TBase, typename TAlign> template <typename TBase> template <bool IsConst> class GrTRecorder<TBase>::IterImpl {
class GrTRecorder<TBase, TAlign>::Iter { private:
using T = typename std::conditional<IsConst, const TBase, TBase>::type;
public: public:
Iter(GrTRecorder& recorder) : fBlock(recorder.fHeadBlock), fPosition(0), fItem(nullptr) {} IterImpl() = default;
bool next() { IterImpl operator++() {
while (fPosition >= fBlock->fBack) { fCurr = fCurr->fNext;
SkASSERT(fPosition == fBlock->fBack); return *this;
if (!fBlock->fNext) {
return false;
}
fBlock = fBlock->fNext;
fPosition = 0;
}
Header* header = reinterpret_cast<Header*>(&(*fBlock)[fPosition]);
fItem = reinterpret_cast<TBase*>(&(*fBlock)[fPosition + length_of<Header>::kValue]);
fPosition += header->fTotalLength;
return true;
} }
TBase* get() const { IterImpl operator++(int) {
SkASSERT(fItem); auto old = fCurr;
return fItem; fCurr = fCurr->fNext;
return {old};
} }
TBase* operator->() const { return this->get(); } T& operator*() const { return *fCurr->get(); }
T* operator->() const { return fCurr->get(); }
bool operator==(const IterImpl& that) const { return fCurr == that.fCurr; }
bool operator!=(const IterImpl& that) const { return !(*this == that); }
private: private:
MemBlock* fBlock; IterImpl(Header* curr) : fCurr(curr) {}
int fPosition; Header* fCurr = nullptr;
TBase* fItem;
friend class GrTRecorder<TBase>; // To construct from Header.
}; };
/**
* Iterates through a recorder in reverse, from back to front. This version mirrors "Iter",
* so the initial state is to have recorder.back() loaded already. (Note that this will
* assert if the recorder is empty.) Usage model:
*
* GrTRecorder<TBase, TAlign>::ReverseIter reverseIter(recorder);
* do {
* reverseIter->doSomething();
* } while (reverseIter.previous());
*/
template<typename TBase, typename TAlign>
class GrTRecorder<TBase, TAlign>::ReverseIter {
public:
ReverseIter(GrTRecorder& recorder)
: fBlock(recorder.fTailBlock),
fItem(&recorder.back()) {
Header* lastHeader = reinterpret_cast<Header*>(
reinterpret_cast<TAlign*>(fItem) - length_of<Header>::kValue);
fPosition = fBlock->fBack - lastHeader->fTotalLength;
}
bool previous() {
Header* header = reinterpret_cast<Header*>(&(*fBlock)[fPosition]);
while (0 == fPosition) {
if (!fBlock->fPrev) {
// We've reached the front of the recorder.
return false;
}
fBlock = fBlock->fPrev;
fPosition = fBlock->fBack;
}
fPosition -= header->fPrevLength;
SkASSERT(fPosition >= 0);
fItem = reinterpret_cast<TBase*>(&(*fBlock)[fPosition + length_of<Header>::kValue]);
return true;
}
TBase* get() const { return fItem; }
TBase* operator->() const { return this->get(); }
private:
MemBlock* fBlock;
int fPosition;
TBase* fItem;
};
template<typename TBase, typename TAlign>
void GrTRecorder<TBase, TAlign>::reset() {
Iter iter(*this);
while (iter.next()) {
iter->~TBase();
}
// Assume the next time this recorder fills up it will use approximately the same
// amount of space as last time. Leave enough space for up to ~50% growth; free
// everything else.
if (fTailBlock->fBack <= fTailBlock->fLength / 2) {
MemBlock::Free(fTailBlock->fNext);
} else if (fTailBlock->fNext) {
MemBlock::Free(fTailBlock->fNext->fNext);
fTailBlock->fNext->fNext = nullptr;
}
for (MemBlock* block = fHeadBlock; block; block = block->fNext) {
block->fBack = 0;
}
fTailBlock = fHeadBlock;
fLastItem = nullptr;
}
////////////////////////////////////////////////////////////////////////////////
template<typename TItem> struct GrTRecorderAllocWrapper {
GrTRecorderAllocWrapper() : fDataLength(0) {}
template <typename TBase, typename TAlign>
GrTRecorderAllocWrapper(const GrTRecorder<TBase, TAlign>&, int sizeOfData)
: fDataLength(GrTRecorder<TBase, TAlign>::LengthOf(sizeOfData)) {}
const int fDataLength;
};
template <typename TBase, typename TAlign, typename TItem>
void* operator new(size_t size, GrTRecorder<TBase, TAlign>& recorder,
const GrTRecorderAllocWrapper<TItem>& wrapper) {
SkASSERT(size == sizeof(TItem));
return recorder.template alloc_back<TItem>(wrapper.fDataLength);
}
template <typename TBase, typename TAlign, typename TItem>
void operator delete(void*, GrTRecorder<TBase, TAlign>&, const GrTRecorderAllocWrapper<TItem>&) {
// We only provide an operator delete to work around compiler warnings that can come
// up for an unmatched operator new when compiling with exceptions.
SK_ABORT("Invalid Operation");
}
#define GrNEW_APPEND_TO_RECORDER(recorder, type_name, args) \
(new (recorder, GrTRecorderAllocWrapper<type_name>()) type_name args)
#define GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, type_name, args, size_of_data) \
(new (recorder, GrTRecorderAllocWrapper<type_name>(recorder, size_of_data)) type_name args)
#endif #endif

View File

@ -315,7 +315,7 @@ bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
// QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
// them on the first index that will not overwrite previous TriPointInstance data. // them on the first index that will not overwrite previous TriPointInstance data.
int quadBaseIdx = int quadBaseIdx =
GR_CT_DIV_ROUND_UP(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance)); GrSizeDivRoundUp(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
fBaseInstances[0].fWeightedTriangles = quadBaseIdx; fBaseInstances[0].fWeightedTriangles = quadBaseIdx;
fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles + fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles +
fTotalPrimitiveCounts[0].fWeightedTriangles; fTotalPrimitiveCounts[0].fWeightedTriangles;

View File

@ -573,8 +573,8 @@ bool GrCCStroker::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
fBaseInstances[1].fStrokes[0] = fInstanceCounts[0]->fStrokes[0]; fBaseInstances[1].fStrokes[0] = fInstanceCounts[0]->fStrokes[0];
int endLinearStrokesIdx = fBaseInstances[1].fStrokes[0] + fInstanceCounts[1]->fStrokes[0]; int endLinearStrokesIdx = fBaseInstances[1].fStrokes[0] + fInstanceCounts[1]->fStrokes[0];
int cubicStrokesIdx = GR_CT_DIV_ROUND_UP(endLinearStrokesIdx * sizeof(LinearStrokeInstance), int cubicStrokesIdx = GrSizeDivRoundUp(endLinearStrokesIdx * sizeof(LinearStrokeInstance),
sizeof(CubicStrokeInstance)); sizeof(CubicStrokeInstance));
for (int i = 1; i <= kMaxNumLinearSegmentsLog2; ++i) { for (int i = 1; i <= kMaxNumLinearSegmentsLog2; ++i) {
for (int j = 0; j < kNumScissorModes; ++j) { for (int j = 0; j < kNumScissorModes; ++j) {
fBaseInstances[j].fStrokes[i] = cubicStrokesIdx; fBaseInstances[j].fStrokes[i] = cubicStrokesIdx;
@ -582,16 +582,16 @@ bool GrCCStroker::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
} }
} }
int trianglesIdx = GR_CT_DIV_ROUND_UP(cubicStrokesIdx * sizeof(CubicStrokeInstance), int trianglesIdx = GrSizeDivRoundUp(cubicStrokesIdx * sizeof(CubicStrokeInstance),
sizeof(TriangleInstance)); sizeof(TriangleInstance));
fBaseInstances[0].fTriangles = trianglesIdx; fBaseInstances[0].fTriangles = trianglesIdx;
fBaseInstances[1].fTriangles = fBaseInstances[1].fTriangles =
fBaseInstances[0].fTriangles + fInstanceCounts[0]->fTriangles; fBaseInstances[0].fTriangles + fInstanceCounts[0]->fTriangles;
int endTrianglesIdx = int endTrianglesIdx =
fBaseInstances[1].fTriangles + fInstanceCounts[1]->fTriangles; fBaseInstances[1].fTriangles + fInstanceCounts[1]->fTriangles;
int conicsIdx = GR_CT_DIV_ROUND_UP(endTrianglesIdx * sizeof(TriangleInstance), int conicsIdx =
sizeof(ConicInstance)); GrSizeDivRoundUp(endTrianglesIdx * sizeof(TriangleInstance), sizeof(ConicInstance));
fBaseInstances[0].fConics = conicsIdx; fBaseInstances[0].fConics = conicsIdx;
fBaseInstances[1].fConics = fBaseInstances[0].fConics + fInstanceCounts[0]->fConics; fBaseInstances[1].fConics = fBaseInstances[0].fConics + fInstanceCounts[0]->fConics;

View File

@ -78,11 +78,7 @@ void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
} }
} }
GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu) GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu) : fGpu(gpu) {}
: fCurrentCmdInfo(-1)
, fGpu(gpu)
, fLastPipelineState(nullptr) {
}
void GrVkGpuRTCommandBuffer::init() { void GrVkGpuRTCommandBuffer::init() {
GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp); GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
@ -164,22 +160,15 @@ void GrVkGpuRTCommandBuffer::submit() {
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget); GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT; GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment(); GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment();
auto currPreCmd = fPreCommandBufferTasks.begin();
for (int i = 0; i < fCommandBufferInfos.count(); ++i) { for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[i]; CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
for (int j = 0; j < cbInfo.fPreDrawUploads.count(); ++j) { for (int c = 0; c < cbInfo.fNumPreCmds; ++c, ++currPreCmd) {
InlineUploadInfo& iuInfo = cbInfo.fPreDrawUploads[j]; currPreCmd->execute(this);
iuInfo.fFlushState->doUpload(iuInfo.fUpload);
} }
for (int j = 0; j < cbInfo.fPreCopies.count(); ++j) {
CopyInfo& copyInfo = cbInfo.fPreCopies[j];
fGpu->copySurface(fRenderTarget, fOrigin, copyInfo.fSrc.get(), copyInfo.fSrcOrigin,
copyInfo.fSrcRect, copyInfo.fDstPoint, copyInfo.fShouldDiscardDst);
}
// TODO: Many things create a scratch texture which adds the discard immediately, but then // TODO: Many things create a scratch texture which adds the discard immediately, but then
// don't draw to it right away. This causes the discard to be ignored and we get yelled at // don't draw to it right away. This causes the discard to be ignored and we get yelled at
// for loading uninitialized data. However, once MDB lands with reordering, the discard will // for loading uninitialized data. However, once MDB lands with reordering, the discard will
@ -260,6 +249,7 @@ void GrVkGpuRTCommandBuffer::submit() {
&cbInfo.fColorClearValue, vkRT, fOrigin, iBounds); &cbInfo.fColorClearValue, vkRT, fOrigin, iBounds);
} }
} }
SkASSERT(currPreCmd == fPreCommandBufferTasks.end());
} }
void GrVkGpuRTCommandBuffer::set(GrRenderTarget* rt, GrSurfaceOrigin origin, void GrVkGpuRTCommandBuffer::set(GrRenderTarget* rt, GrSurfaceOrigin origin,
@ -298,6 +288,7 @@ void GrVkGpuRTCommandBuffer::reset() {
cbInfo.fRenderPass->unref(fGpu); cbInfo.fRenderPass->unref(fGpu);
} }
fCommandBufferInfos.reset(); fCommandBufferInfos.reset();
fPreCommandBufferTasks.reset();
fCurrentCmdInfo = -1; fCurrentCmdInfo = -1;
@ -341,9 +332,6 @@ void GrVkGpuRTCommandBuffer::discard() {
oldRP->unref(fGpu); oldRP->unref(fGpu);
cbInfo.fBounds.join(fRenderTarget->getBoundsRect()); cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard; cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
// If we are going to discard the whole render target then the results of any copies we did
// immediately before to the target won't matter, so just drop them.
cbInfo.fPreCopies.reset();
} }
} }
@ -447,10 +435,6 @@ void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f&
cbInfo.fColorClearValue.color = {{color.fR, color.fG, color.fB, color.fA}}; cbInfo.fColorClearValue.color = {{color.fR, color.fG, color.fB, color.fA}};
cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear; cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
// If we are going to clear the whole render target then the results of any copies we did
// immediately before to the target won't matter, so just drop them.
cbInfo.fPreCopies.reset();
// Update command buffer bounds // Update command buffer bounds
cbInfo.fBounds.join(fRenderTarget->getBoundsRect()); cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
return; return;
@ -545,7 +529,21 @@ void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state,
if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) { if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
this->addAdditionalRenderPass(); this->addAdditionalRenderPass();
} }
fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
class InlineUpload : public PreCommandBufferTask {
public:
InlineUpload(GrOpFlushState* state, const GrDeferredTextureUploadFn& upload)
: fFlushState(state), fUpload(upload) {}
~InlineUpload() override = default;
void execute(GrVkGpuRTCommandBuffer*) override { fFlushState->doUpload(fUpload); }
private:
GrOpFlushState* fFlushState;
GrDeferredTextureUploadFn fUpload;
};
fPreCommandBufferTasks.emplace<InlineUpload>(state, upload);
++fCommandBufferInfos[fCurrentCmdInfo].fNumPreCmds;
} }
void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
@ -555,9 +553,35 @@ void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, con
this->addAdditionalRenderPass(); this->addAdditionalRenderPass();
} }
fCommandBufferInfos[fCurrentCmdInfo].fPreCopies.emplace_back( class Copy : public PreCommandBufferTask {
public:
Copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
const SkIPoint& dstPoint, bool shouldDiscardDst)
: fSrc(src)
, fSrcOrigin(srcOrigin)
, fSrcRect(srcRect)
, fDstPoint(dstPoint)
, fShouldDiscardDst(shouldDiscardDst) {}
~Copy() override = default;
void execute(GrVkGpuRTCommandBuffer* cb) override {
cb->fGpu->copySurface(cb->fRenderTarget, cb->fOrigin, fSrc.get(), fSrcOrigin, fSrcRect,
fDstPoint, fShouldDiscardDst);
}
private:
using Src = GrPendingIOResource<GrSurface, kRead_GrIOType>;
Src fSrc;
GrSurfaceOrigin fSrcOrigin;
SkIRect fSrcRect;
SkIPoint fDstPoint;
bool fShouldDiscardDst;
};
fPreCommandBufferTasks.emplace<Copy>(
src, srcOrigin, srcRect, dstPoint, src, srcOrigin, srcRect, dstPoint,
LoadStoreState::kStartsWithDiscard == cbInfo.fLoadStoreState); LoadStoreState::kStartsWithDiscard == cbInfo.fLoadStoreState);
++fCommandBufferInfos[fCurrentCmdInfo].fNumPreCmds;
if (LoadStoreState::kLoadAndStore != cbInfo.fLoadStoreState) { if (LoadStoreState::kLoadAndStore != cbInfo.fLoadStoreState) {
// Change the render pass to do a load and store so we don't lose the results of our copy // Change the render pass to do a load and store so we don't lose the results of our copy

View File

@ -12,6 +12,7 @@
#include "GrColor.h" #include "GrColor.h"
#include "GrMesh.h" #include "GrMesh.h"
#include "GrTRecorder.h"
#include "GrTypes.h" #include "GrTypes.h"
#include "GrVkPipelineState.h" #include "GrVkPipelineState.h"
#include "vk/GrVkTypes.h" #include "vk/GrVkTypes.h"
@ -148,28 +149,16 @@ private:
void addAdditionalCommandBuffer(); void addAdditionalCommandBuffer();
void addAdditionalRenderPass(); void addAdditionalRenderPass();
struct InlineUploadInfo { class PreCommandBufferTask {
InlineUploadInfo(GrOpFlushState* state, const GrDeferredTextureUploadFn& upload) public:
: fFlushState(state), fUpload(upload) {} virtual ~PreCommandBufferTask() = default;
GrOpFlushState* fFlushState; virtual void execute(GrVkGpuRTCommandBuffer*) = 0;
GrDeferredTextureUploadFn fUpload;
};
struct CopyInfo { protected:
CopyInfo(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, PreCommandBufferTask() = default;
const SkIPoint& dstPoint, bool shouldDiscardDst) PreCommandBufferTask(const PreCommandBufferTask&) = delete;
: fSrc(src) PreCommandBufferTask& operator=(const PreCommandBufferTask&) = delete;
, fSrcOrigin(srcOrigin)
, fSrcRect(srcRect)
, fDstPoint(dstPoint)
, fShouldDiscardDst(shouldDiscardDst) {}
using Src = GrPendingIOResource<GrSurface, kRead_GrIOType>;
Src fSrc;
GrSurfaceOrigin fSrcOrigin;
SkIRect fSrcRect;
SkIPoint fDstPoint;
bool fShouldDiscardDst;
}; };
enum class LoadStoreState { enum class LoadStoreState {
@ -183,14 +172,11 @@ private:
using SampledTexture = GrPendingIOResource<GrVkTexture, kRead_GrIOType>; using SampledTexture = GrPendingIOResource<GrVkTexture, kRead_GrIOType>;
const GrVkRenderPass* fRenderPass; const GrVkRenderPass* fRenderPass;
SkTArray<GrVkSecondaryCommandBuffer*> fCommandBuffers; SkTArray<GrVkSecondaryCommandBuffer*> fCommandBuffers;
int fNumPreCmds = 0;
VkClearValue fColorClearValue; VkClearValue fColorClearValue;
SkRect fBounds; SkRect fBounds;
bool fIsEmpty = true; bool fIsEmpty = true;
LoadStoreState fLoadStoreState = LoadStoreState::kUnknown; LoadStoreState fLoadStoreState = LoadStoreState::kUnknown;
// The PreDrawUploads and PreCopies are sent to the GPU before submitting the secondary
// command buffer.
SkTArray<InlineUploadInfo> fPreDrawUploads;
SkTArray<CopyInfo> fPreCopies;
// Array of images that will be sampled and thus need to be transferred to sampled layout // Array of images that will be sampled and thus need to be transferred to sampled layout
// before submitting the secondary command buffers. This must happen after we do any predraw // before submitting the secondary command buffers. This must happen after we do any predraw
// uploads or copies. // uploads or copies.
@ -201,16 +187,16 @@ private:
} }
}; };
SkTArray<CommandBufferInfo> fCommandBufferInfos; SkTArray<CommandBufferInfo> fCommandBufferInfos;
int fCurrentCmdInfo; GrTRecorder<PreCommandBufferTask> fPreCommandBufferTasks{1024};
GrVkGpu* fGpu;
GrVkGpu* fGpu; GrVkPipelineState* fLastPipelineState = nullptr;
VkAttachmentLoadOp fVkColorLoadOp; SkPMColor4f fClearColor;
VkAttachmentStoreOp fVkColorStoreOp; VkAttachmentLoadOp fVkColorLoadOp;
VkAttachmentLoadOp fVkStencilLoadOp; VkAttachmentStoreOp fVkColorStoreOp;
VkAttachmentStoreOp fVkStencilStoreOp; VkAttachmentLoadOp fVkStencilLoadOp;
SkPMColor4f fClearColor; VkAttachmentStoreOp fVkStencilStoreOp;
GrVkPipelineState* fLastPipelineState; int fCurrentCmdInfo = -1;
typedef GrGpuRTCommandBuffer INHERITED; typedef GrGpuRTCommandBuffer INHERITED;
}; };

View File

@ -14,7 +14,7 @@
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
static int activeRecorderItems = 0; static int gActiveRecorderItems = 0;
class IntWrapper { class IntWrapper {
public: public:
@ -25,92 +25,43 @@ private:
int fValue; int fValue;
}; };
static void test_empty_back_and_pop(skiatest::Reporter* reporter) {
SkRandom rand;
for (int data = 0; data < 2; ++data) {
// Do this with different starting sizes to have different alignment between blocks and pops.
// pops. We want to test poping the first guy off, guys in the middle of the block, and the
// first guy on a non-head block.
for (int j = 0; j < 8; ++j) {
GrTRecorder<IntWrapper, int> recorder(j);
REPORTER_ASSERT(reporter, recorder.empty());
for (int i = 0; i < 100; ++i) {
if (data) {
REPORTER_ASSERT(reporter, i == *GrNEW_APPEND_TO_RECORDER(recorder,
IntWrapper, (i)));
} else {
REPORTER_ASSERT(reporter, i ==
*GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder,
IntWrapper, (i),
rand.nextULessThan(10)));
}
REPORTER_ASSERT(reporter, !recorder.empty());
REPORTER_ASSERT(reporter, i == recorder.back());
if (0 == (i % 7)) {
recorder.pop_back();
if (i > 0) {
REPORTER_ASSERT(reporter, !recorder.empty());
REPORTER_ASSERT(reporter, i-1 == recorder.back());
}
}
}
REPORTER_ASSERT(reporter, !recorder.empty());
recorder.reset();
REPORTER_ASSERT(reporter, recorder.empty());
}
}
}
struct ExtraData { struct ExtraData {
typedef GrTRecorder<ExtraData, int> Recorder; typedef GrTRecorder<ExtraData> Recorder;
ExtraData(int i) : fData(i) { ExtraData(int i) : fData(i) {
int* extraData = this->extraData(); int* extraData = this->extraData();
for (int j = 0; j < i; j++) { for (int j = 0; j < i; j++) {
extraData[j] = i; extraData[j] = i;
} }
++activeRecorderItems; ++gActiveRecorderItems;
}
~ExtraData() {
--activeRecorderItems;
}
int* extraData() {
return reinterpret_cast<int*>(Recorder::GetDataForItem(this));
} }
~ExtraData() { --gActiveRecorderItems; }
int* extraData() { return reinterpret_cast<int*>(this + 1); }
int fData; int fData;
}; };
static void test_extra_data(skiatest::Reporter* reporter) { static void test_extra_data(skiatest::Reporter* reporter) {
ExtraData::Recorder recorder(0); ExtraData::Recorder recorder(0);
REPORTER_ASSERT(reporter, recorder.empty());
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i) {
GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, ExtraData, (i), i * sizeof(int)); recorder.emplaceWithData<ExtraData>(i * sizeof(int), i);
REPORTER_ASSERT(reporter, !recorder.empty());
} }
REPORTER_ASSERT(reporter, 100 == activeRecorderItems); REPORTER_ASSERT(reporter, 100 == gActiveRecorderItems);
ExtraData::Recorder::Iter iter(recorder); auto iter = recorder.begin();
for (int i = 0; i < 100; ++i) { for (int i = 0; i < 100; ++i, ++iter) {
REPORTER_ASSERT(reporter, iter.next());
REPORTER_ASSERT(reporter, i == iter->fData); REPORTER_ASSERT(reporter, i == iter->fData);
for (int j = 0; j < i; j++) { for (int j = 0; j < i; j++) {
REPORTER_ASSERT(reporter, i == iter->extraData()[j]); REPORTER_ASSERT(reporter, i == iter->extraData()[j]);
} }
} }
REPORTER_ASSERT(reporter, !iter.next()); REPORTER_ASSERT(reporter, iter == recorder.end());
ExtraData::Recorder::ReverseIter reverseIter(recorder);
for (int i = 99; i >= 0; --i) {
REPORTER_ASSERT(reporter, i == reverseIter->fData);
for (int j = 0; j < i; j++) {
REPORTER_ASSERT(reporter, i == reverseIter->extraData()[j]);
}
REPORTER_ASSERT(reporter, reverseIter.previous() == !!i);
}
recorder.reset(); recorder.reset();
REPORTER_ASSERT(reporter, 0 == activeRecorderItems); REPORTER_ASSERT(reporter, 0 == gActiveRecorderItems);
REPORTER_ASSERT(reporter, recorder.begin() == recorder.end());
REPORTER_ASSERT(reporter, recorder.empty());
} }
enum ClassType { enum ClassType {
@ -125,14 +76,14 @@ enum ClassType {
class Base { class Base {
public: public:
typedef GrTRecorder<Base, void*> Recorder; typedef GrTRecorder<Base> Recorder;
Base() { Base() {
fMatrix.reset(); fMatrix.reset();
++activeRecorderItems; ++gActiveRecorderItems;
} }
virtual ~Base() { --activeRecorderItems; } virtual ~Base() { --gActiveRecorderItems; }
virtual ClassType getType() { return kBase_ClassType; } virtual ClassType getType() { return kBase_ClassType; }
@ -179,7 +130,7 @@ private:
class SubclassExtraData : public Base { class SubclassExtraData : public Base {
public: public:
SubclassExtraData(int length) : fLength(length) { SubclassExtraData(int length) : fLength(length) {
int* data = reinterpret_cast<int*>(Recorder::GetDataForItem(this)); int* data = reinterpret_cast<int*>(this + 1);
for (int i = 0; i < fLength; ++i) { for (int i = 0; i < fLength; ++i) {
data[i] = ValueAt(i); data[i] = ValueAt(i);
} }
@ -189,7 +140,7 @@ public:
virtual void validate(skiatest::Reporter* reporter) const { virtual void validate(skiatest::Reporter* reporter) const {
Base::validate(reporter); Base::validate(reporter);
const int* data = reinterpret_cast<const int*>(Recorder::GetDataForItem(this)); const int* data = reinterpret_cast<const int*>(this + 1);
for (int i = 0; i < fLength; ++i) { for (int i = 0; i < fLength; ++i) {
REPORTER_ASSERT(reporter, ValueAt(i) == data[i]); REPORTER_ASSERT(reporter, ValueAt(i) == data[i]);
} }
@ -218,8 +169,9 @@ public:
private: private:
uint32_t fCurrent; uint32_t fCurrent;
}; };
static void test_subclasses_iters(skiatest::Reporter*, Order&, Base::Recorder::Iter&,
Base::Recorder::ReverseIter&, int = 0); static void test_subclasses_iter(skiatest::Reporter*, Order&, Base::Recorder::iterator&, int = 0);
static void test_subclasses(skiatest::Reporter* reporter) { static void test_subclasses(skiatest::Reporter* reporter) {
Base::Recorder recorder(1024); Base::Recorder recorder(1024);
@ -227,23 +179,23 @@ static void test_subclasses(skiatest::Reporter* reporter) {
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
switch (order.next()) { switch (order.next()) {
case kBase_ClassType: case kBase_ClassType:
GrNEW_APPEND_TO_RECORDER(recorder, Base, ()); recorder.emplace<Base>();
break; break;
case kSubclass_ClassType: case kSubclass_ClassType:
GrNEW_APPEND_TO_RECORDER(recorder, Subclass, ()); recorder.emplace<Subclass>();
break; break;
case kSubSubclass_ClassType: case kSubSubclass_ClassType:
GrNEW_APPEND_TO_RECORDER(recorder, SubSubclass, ()); recorder.emplace<SubSubclass>();
break; break;
case kSubclassExtraData_ClassType: case kSubclassExtraData_ClassType:
GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, SubclassExtraData, (i), sizeof(int) * i); recorder.emplaceWithData<SubclassExtraData>(sizeof(int) * i, i);
break; break;
case kSubclassEmpty_ClassType: case kSubclassEmpty_ClassType:
GrNEW_APPEND_TO_RECORDER(recorder, SubclassEmpty, ()); recorder.emplace<SubclassEmpty>();
break; break;
default: default:
@ -251,44 +203,89 @@ static void test_subclasses(skiatest::Reporter* reporter) {
break; break;
} }
} }
REPORTER_ASSERT(reporter, 1000 == activeRecorderItems); REPORTER_ASSERT(reporter, 1000 == gActiveRecorderItems);
order.reset(); order.reset();
Base::Recorder::Iter iter(recorder); auto iter = recorder.begin();
Base::Recorder::ReverseIter reverseIter(recorder);
test_subclasses_iters(reporter, order, iter, reverseIter); test_subclasses_iter(reporter, order, iter);
REPORTER_ASSERT(reporter, !iter.next());
REPORTER_ASSERT(reporter, iter == recorder.end());
// Don't reset the recorder. It should automatically destruct all its items. // Don't reset the recorder. It should automatically destruct all its items.
} }
static void test_subclasses_iters(skiatest::Reporter* reporter, Order& order, static void test_subclasses_iter(skiatest::Reporter* reporter, Order& order,
Base::Recorder::Iter& iter, Base::Recorder::iterator& iter, int i) {
Base::Recorder::ReverseIter& reverseIter, int i) {
if (i >= 1000) { if (i >= 1000) {
return; return;
} }
ClassType classType = order.next(); ClassType classType = order.next();
REPORTER_ASSERT(reporter, iter.next());
REPORTER_ASSERT(reporter, classType == iter->getType()); REPORTER_ASSERT(reporter, classType == iter->getType());
iter->validate(reporter); iter->validate(reporter);
test_subclasses_iters(reporter, order, iter, reverseIter, i + 1); ++iter;
test_subclasses_iter(reporter, order, iter, i + 1);
}
REPORTER_ASSERT(reporter, classType == reverseIter->getType()); struct AlignBase {
reverseIter->validate(reporter); AlignBase() { ++gActiveRecorderItems; }
REPORTER_ASSERT(reporter, reverseIter.previous() == !!i); ~AlignBase() { --gActiveRecorderItems; }
char fValue;
};
struct alignas(16) Align16 : public AlignBase {};
struct alignas(32) Align32 : public AlignBase {};
struct alignas(64) Align64 : public AlignBase {};
struct alignas(128) Align128 : public AlignBase {};
static void test_alignment(skiatest::Reporter* reporter) {
GrTRecorder<AlignBase> recorder(0);
SkTArray<size_t> expectedAlignments;
SkRandom random;
for (int i = 0; i < 100; ++i) {
size_t dataSize = random.nextULessThan(20);
switch (random.nextULessThan(5)) {
case 0:
recorder.emplaceWithData<AlignBase>(dataSize);
expectedAlignments.push_back(alignof(AlignBase));
break;
case 1:
recorder.emplaceWithData<Align16>(dataSize);
expectedAlignments.push_back(16);
break;
case 2:
recorder.emplaceWithData<Align32>(dataSize);
expectedAlignments.push_back(32);
break;
case 3:
recorder.emplaceWithData<Align64>(dataSize);
expectedAlignments.push_back(64);
break;
case 4:
recorder.emplaceWithData<Align128>(dataSize);
expectedAlignments.push_back(128);
break;
}
recorder.back().fValue = i;
}
int i = 0;
for (const auto& x : recorder) {
REPORTER_ASSERT(reporter, x.fValue == i);
auto pointer = reinterpret_cast<uintptr_t>(&x);
auto mask = static_cast<uintptr_t>(expectedAlignments[i]) - 1;
REPORTER_ASSERT(reporter, !(pointer & mask));
i++;
}
REPORTER_ASSERT(reporter, i == 100);
} }
DEF_GPUTEST(GrTRecorder, reporter, /* options */) { DEF_GPUTEST(GrTRecorder, reporter, /* options */) {
test_empty_back_and_pop(reporter);
test_extra_data(reporter); test_extra_data(reporter);
REPORTER_ASSERT(reporter, 0 == activeRecorderItems); // test_extra_data should call reset(). REPORTER_ASSERT(reporter, 0 == gActiveRecorderItems); // test_extra_data should call reset().
test_subclasses(reporter); test_subclasses(reporter);
REPORTER_ASSERT(reporter, 0 == activeRecorderItems); // Ensure ~GrTRecorder invokes dtors. REPORTER_ASSERT(reporter, 0 == gActiveRecorderItems); // Ensure ~GrTRecorder invokes dtors.
test_alignment(reporter);
REPORTER_ASSERT(reporter, 0 == gActiveRecorderItems); // Ensure ~GrTRecorder invokes dtors.
} }