Revert "Support moving blocks from one allocator to another"

This reverts commit 0f064041bf.

Reason for revert: unit test failures on some windows bots

Original change's description:
> Support moving blocks from one allocator to another
> 
> Uses this new capability and the previously implemented reserve()
> feature to implement efficient concatenation of GrTAllocators.
> 
> Change-Id: I2aff42eaf75e74e3b595d3069b6a271fa7329465
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/303665
> Commit-Queue: Michael Ludwig <michaelludwig@google.com>
> Reviewed-by: Robert Phillips <robertphillips@google.com>

TBR=robertphillips@google.com,csmartdalton@google.com,michaelludwig@google.com

Change-Id: I931f2462ecf6e04d40a671336d0de7d80efd313d
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/304604
Reviewed-by: Michael Ludwig <michaelludwig@google.com>
Commit-Queue: Michael Ludwig <michaelludwig@google.com>
This commit is contained in:
Michael Ludwig 2020-07-21 15:22:48 +00:00 committed by Skia Commit-Bot
parent a6db510563
commit 423dd689df
5 changed files with 0 additions and 222 deletions

View File

@ -142,21 +142,6 @@ void GrBlockAllocator::releaseBlock(Block* block) {
SkASSERT(fN1 >= 1 && fN0 >= 0);
}
void GrBlockAllocator::stealHeapBlocks(GrBlockAllocator* other) {
Block* toSteal = other->fHead.fNext;
if (toSteal) {
// The other's next block connects back to this allocator's current tail, and its new tail
// becomes the end of other's block linked list.
SkASSERT(other->fTail != &other->fHead);
toSteal->fPrev = fTail;
fTail->fNext = toSteal;
fTail = other->fTail;
// The other allocator becomes just its inline head block
other->fTail = &other->fHead;
other->fHead.fNext = nullptr;
} // else no block to steal
}
void GrBlockAllocator::reset() {
for (Block* b : this->rblocks()) {
if (b == &fHead) {

View File

@ -342,19 +342,6 @@ public:
*/
void releaseBlock(Block* block);
/**
* Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
* list of blocks. This memory is now managed by this allocator. Since this only transfers
* ownership of a Block, and a Block itself does not move, any previous allocations remain
* valid and associated with their original Block instances. GrBlockAllocator-level functions
* that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
* 'other' for these allocations.
*
* The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
* must handle that data differently.
*/
void stealHeapBlocks(GrBlockAllocator* other);
/**
* Explicitly free all blocks (invalidating all allocations), and resets the head block to its
* default state. The allocator-level metadata is reset to 0 as well.

View File

@ -65,15 +65,6 @@ public:
return *new (this->pushItem()) T(std::forward<Args>(args)...);
}
/**
* Move all items from 'other' to the end of this collection. When this returns, 'other' will
* be empty. Items in 'other' may be moved as part of compacting the pre-allocated start of
* 'other' into this list (using T's move constructor or memcpy if T is trivially copyable), but
* this is O(StartingItems) and not O(N). All other items are concatenated in O(1).
*/
template <int SI>
void concat(GrTAllocator<T, SI>&& other);
/**
* Allocate, if needed, space to hold N more Ts before another malloc will occur.
*/
@ -202,10 +193,6 @@ public:
}
private:
// Let other GrTAllocators have access (only ever used when T and S are the same but you cannot
// have partial specializations declared as a friend...)
template<typename S, int N> friend class GrTAllocator;
static constexpr size_t StartingSize =
GrBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
@ -269,66 +256,6 @@ public:
#endif
};
template <typename T, int SI1>
template <int SI2>
void GrTAllocator<T, SI1>::concat(GrTAllocator<T, SI2>&& other) {
// Manually move all items in other's head block into this list; all heap blocks from 'other'
// will be appended to the block linked list (no per-item moves needed then).
int headItemCount = 0;
GrBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
SkDEBUGCODE(int oldCount = this->count();)
if (headBlock->metadata() > 0) {
int headStart = First(headBlock);
int headEnd = Last(headBlock) + sizeof(T); // exclusive
headItemCount = (headEnd - headStart) / sizeof(T);
int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
if (headItemCount > avail) {
// Make sure there is extra room for the items beyond what's already avail. Use the
// kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since
// 'other's heap blocks will be appended after it and any extra space is wasted.
fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T),
GrBlockAllocator::kIgnoreExistingBytes_Flag |
GrBlockAllocator::kIgnoreGrowthPolicy_Flag);
}
if /*constexpr*/ (std::is_trivially_copyable<T>::value) {
// memcpy all items at once (or twice between current and reserved space).
SkASSERT(std::is_trivially_destructible<T>::value);
auto copy = [](GrBlockAllocator::Block* src, int start, GrBlockAllocator* dst, int n) {
auto target = dst->template allocate<alignof(T)>(n * sizeof(T));
memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T));
target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T));
};
copy(headBlock, headStart, fAllocator.allocator(), std::min(headItemCount, avail));
if (headItemCount > avail) {
copy(headBlock, headStart + avail * sizeof(T),
fAllocator.allocator(), headItemCount - avail);
}
fAllocator->setMetadata(fAllocator->metadata() + headItemCount);
} else {
// Move every item over one at a time
for (int i = headStart; i < headEnd; i += sizeof(T)) {
T& toMove = GetItem(headBlock, i);
this->push_back(std::move(toMove));
// Anything of interest should have been moved, but run this since T isn't
// a trusted type.
toMove.~T(); // NOLINT(bugprone-use-after-move): calling dtor always allowed
}
}
other.fAllocator->releaseBlock(headBlock);
}
// other's head block must have been fully copied since it cannot be stolen
SkASSERT(other.fAllocator->headBlock()->metadata() == 0 &&
fAllocator->metadata() == oldCount + headItemCount);
fAllocator->stealHeapBlocks(other.fAllocator.allocator());
fAllocator->setMetadata(fAllocator->metadata() +
(other.fAllocator->metadata() - headItemCount));
other.fAllocator->setMetadata(0);
}
/**
* BlockIndexIterator provides a reusable iterator template for collections built on top of a
* GrBlockAllocator, where each item is of the same type, and the index to an item can be iterated

View File

@ -488,46 +488,6 @@ DEF_TEST(GrBlockAllocatorScratchBlockReserve, r) {
REPORTER_ASSERT(r, (size_t) pool->testingOnly_scratchBlockSize() == scratchAvail);
}
DEF_TEST(GrBlockAllocatorStealBlocks, r) {
GrSBlockAllocator<256> poolA;
GrSBlockAllocator<128> poolB;
add_block(poolA);
add_block(poolA);
add_block(poolA);
add_block(poolB);
add_block(poolB);
char* bAlloc = (char*) alloc_byte(poolB);
*bAlloc = 't';
const GrBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
REPORTER_ASSERT(r, block_count(poolA) == 4);
REPORTER_ASSERT(r, block_count(poolB) == 3);
size_t aSize = poolA->totalSize();
size_t bSize = poolB->totalSize();
size_t theftSize = bSize - poolB->preallocSize();
// This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
poolA->stealHeapBlocks(poolB.allocator());
REPORTER_ASSERT(r, block_count(poolA) == 6);
REPORTER_ASSERT(r, block_count(poolB) == 1);
REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
REPORTER_ASSERT(r, *bAlloc == 't');
REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
// Redoing the steal now that B is just a head block should be a no-op
poolA->stealHeapBlocks(poolB.allocator());
REPORTER_ASSERT(r, block_count(poolA) == 6);
REPORTER_ASSERT(r, block_count(poolB) == 1);
}
// These tests ensure that the allocation padding mechanism works as intended
struct TestMeta {
int fX1;

View File

@ -30,11 +30,6 @@ struct C {
static int gInstCnt;
};
int C::gInstCnt = 0;
struct D {
int fID;
};
}
// Checks that the allocator has the correct count, etc and that the element IDs are correct.
@ -165,72 +160,6 @@ static void run_allocator_test(GrTAllocator<C, N>* allocator, skiatest::Reporter
check_allocator(allocator, 100, 10, reporter);
}
template<int N1, int N2>
static void run_concat_test(skiatest::Reporter* reporter, int aCount, int bCount) {
GrTAllocator<C, N1> listA;
GrTAllocator<C, N2> listB;
for (int i = 0; i < aCount; ++i) {
listA.emplace_back(i);
}
for (int i = 0; i < bCount; ++i) {
listB.emplace_back(aCount + i);
}
// Sanity check
REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
// Concatenate B into A and verify.
listA.concat(std::move(listB));
REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
// GrTAllocator guarantees the moved list is empty, but clang-tidy doesn't know about it; in
// practice we won't really be using moved lists so this won't pollute our main code base with
// lots of warning disables.
REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move)
REPORTER_ASSERT(reporter, C::gInstCnt == aCount + bCount);
int i = 0;
for (const C& item : listA.items()) {
// By construction of A and B originally, the concatenated id sequence is continuous
REPORTER_ASSERT(reporter, i == item.fID);
i++;
}
REPORTER_ASSERT(reporter, i == (aCount + bCount));
}
template<int N1, int N2>
static void run_concat_trivial_test(skiatest::Reporter* reporter, int aCount, int bCount) {
static_assert(std::is_trivially_copyable<D>::value);
// This is similar to run_concat_test(), except since D is trivial we can't verify the instant
// counts that are tracked via ctor/dtor.
GrTAllocator<D, N1> listA;
GrTAllocator<D, N2> listB;
for (int i = 0; i < aCount; ++i) {
listA.push_back({i});
}
for (int i = 0; i < bCount; ++i) {
listB.push_back({aCount + i});
}
// Sanity check
REPORTER_ASSERT(reporter, listA.count() == aCount && listB.count() == bCount);
// Concatenate B into A and verify.
listA.concat(std::move(listB));
REPORTER_ASSERT(reporter, listA.count() == aCount + bCount);
REPORTER_ASSERT(reporter, listB.count() == 0); // NOLINT(bugprone-use-after-move): see above
int i = 0;
for (const D& item : listA.items()) {
// By construction of A and B originally, the concatenated id sequence is continuous
REPORTER_ASSERT(reporter, i == item.fID);
i++;
}
REPORTER_ASSERT(reporter, i == (aCount + bCount));
}
template<int N>
static void run_reserve_test(skiatest::Reporter* reporter) {
@ -313,14 +242,4 @@ DEF_TEST(GrTAllocator, reporter) {
run_reserve_test<3>(reporter);
run_reserve_test<4>(reporter);
run_reserve_test<5>(reporter);
run_concat_test<1, 1>(reporter, 10, 10);
run_concat_test<5, 1>(reporter, 50, 10);
run_concat_test<1, 5>(reporter, 10, 50);
run_concat_test<5, 5>(reporter, 100, 100);
run_concat_trivial_test<1, 1>(reporter, 10, 10);
run_concat_trivial_test<5, 1>(reporter, 50, 10);
run_concat_trivial_test<1, 5>(reporter, 10, 50);
run_concat_trivial_test<5, 5>(reporter, 100, 100);
}