Make Gr[Op]MemoryPool allocate itself into its initial block.
Saves one heap allocation per DDL recorded. Change-Id: I9393aedc3b48031cd2ea5f0160b107915077099a Reviewed-on: https://skia-review.googlesource.com/c/skia/+/259419 Commit-Queue: Brian Salomon <bsalomon@google.com> Reviewed-by: Michael Ludwig <michaelludwig@google.com>
This commit is contained in:
parent
bb59dfa9e3
commit
6986c6539e
@ -21,12 +21,16 @@
|
|||||||
struct A {
|
struct A {
|
||||||
int gStuff[10];
|
int gStuff[10];
|
||||||
#if OVERRIDE_NEW
|
#if OVERRIDE_NEW
|
||||||
void* operator new (size_t size) { return gBenchPool.allocate(size); }
|
void* operator new(size_t size) { return gBenchPool->allocate(size); }
|
||||||
void operator delete (void* mem) { if (mem) { return gBenchPool.release(mem); } }
|
void operator delete(void* mem) {
|
||||||
|
if (mem) {
|
||||||
|
return gBenchPool->release(mem);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
static GrMemoryPool gBenchPool;
|
static std::unique_ptr<GrMemoryPool> gBenchPool;
|
||||||
};
|
};
|
||||||
GrMemoryPool A::gBenchPool(10 * (1 << 10), 10 * (1 << 10));
|
std::unique_ptr<GrMemoryPool> A::gBenchPool = GrMemoryPool::Make(10 * (1 << 10), 10 * (1 << 10));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This benchmark creates and deletes objects in stack order
|
* This benchmark creates and deletes objects in stack order
|
||||||
@ -83,12 +87,16 @@ private:
|
|||||||
struct B {
|
struct B {
|
||||||
int gStuff[10];
|
int gStuff[10];
|
||||||
#if OVERRIDE_NEW
|
#if OVERRIDE_NEW
|
||||||
void* operator new (size_t size) { return gBenchPool.allocate(size); }
|
void* operator new(size_t size) { return gBenchPool->allocate(size); }
|
||||||
void operator delete (void* mem) { if (mem) { return gBenchPool.release(mem); } }
|
void operator delete(void* mem) {
|
||||||
|
if (mem) {
|
||||||
|
return gBenchPool->release(mem);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
static GrMemoryPool gBenchPool;
|
static std::unique_ptr<GrMemoryPool> gBenchPool;
|
||||||
};
|
};
|
||||||
GrMemoryPool B::gBenchPool(10 * (1 << 10), 10 * (1 << 10));
|
std::unique_ptr<GrMemoryPool> B::gBenchPool = GrMemoryPool::Make(10 * (1 << 10), 10 * (1 << 10));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This benchmark creates objects and deletes them in random order
|
* This benchmark creates objects and deletes them in random order
|
||||||
@ -128,12 +136,16 @@ private:
|
|||||||
struct C {
|
struct C {
|
||||||
int gStuff[10];
|
int gStuff[10];
|
||||||
#if OVERRIDE_NEW
|
#if OVERRIDE_NEW
|
||||||
void* operator new (size_t size) { return gBenchPool.allocate(size); }
|
void* operator new(size_t size) { return gBenchPool->allocate(size); }
|
||||||
void operator delete (void* mem) { if (mem) { return gBenchPool.release(mem); } }
|
void operator delete(void* mem) {
|
||||||
|
if (mem) {
|
||||||
|
return gBenchPool->release(mem);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
static GrMemoryPool gBenchPool;
|
static std::unique_ptr<GrMemoryPool> gBenchPool;
|
||||||
};
|
};
|
||||||
GrMemoryPool C::gBenchPool(10 * (1 << 10), 10 * (1 << 10));
|
std::unique_ptr<GrMemoryPool> C::gBenchPool = GrMemoryPool::Make(10 * (1 << 10), 10 * (1 << 10));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This benchmark creates objects and deletes them in queue order
|
* This benchmark creates objects and deletes them in queue order
|
||||||
|
@ -18,26 +18,26 @@
|
|||||||
#define VALIDATE
|
#define VALIDATE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
|
std::unique_ptr<GrMemoryPool> GrMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
|
||||||
GrOp* tmp = op.release();
|
preallocSize = std::max(preallocSize, kMinAllocationSize);
|
||||||
SkASSERT(tmp);
|
static constexpr size_t kPoolSize = GrSizeAlignUp(sizeof(GrMemoryPool), kAlignment);
|
||||||
tmp->~GrOp();
|
size_t size = kPoolSize + preallocSize;
|
||||||
fMemoryPool.release(tmp);
|
void* mem = operator new(size);
|
||||||
|
void* preallocStart = static_cast<char*>(mem) + kPoolSize;
|
||||||
|
return std::unique_ptr<GrMemoryPool>(
|
||||||
|
new (mem) GrMemoryPool(preallocStart, preallocSize, minAllocSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GrMemoryPool::kSmallestMinAllocSize;
|
GrMemoryPool::GrMemoryPool(void* preallocStart, size_t preallocSize, size_t minAllocSize) {
|
||||||
|
|
||||||
GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
|
|
||||||
SkDEBUGCODE(fAllocationCnt = 0);
|
SkDEBUGCODE(fAllocationCnt = 0);
|
||||||
SkDEBUGCODE(fAllocBlockCnt = 0);
|
SkDEBUGCODE(fAllocBlockCnt = 0);
|
||||||
|
|
||||||
minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize);
|
minAllocSize = std::max(minAllocSize, kMinAllocationSize);
|
||||||
preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize);
|
|
||||||
|
|
||||||
fMinAllocSize = minAllocSize;
|
fMinAllocSize = minAllocSize;
|
||||||
fSize = 0;
|
fSize = 0;
|
||||||
|
|
||||||
fHead = CreateBlock(preallocSize);
|
fHead = InitBlock(preallocStart, preallocSize);
|
||||||
fTail = fHead;
|
fTail = fHead;
|
||||||
fHead->fNext = nullptr;
|
fHead->fNext = nullptr;
|
||||||
fHead->fPrev = nullptr;
|
fHead->fPrev = nullptr;
|
||||||
@ -62,7 +62,7 @@ GrMemoryPool::~GrMemoryPool() {
|
|||||||
SkASSERT(0 == fAllocationCnt);
|
SkASSERT(0 == fAllocationCnt);
|
||||||
SkASSERT(fHead == fTail);
|
SkASSERT(fHead == fTail);
|
||||||
SkASSERT(0 == fHead->fLiveCount);
|
SkASSERT(0 == fHead->fLiveCount);
|
||||||
DeleteBlock(fHead);
|
SkASSERT(kAssignedMarker == fHead->fBlockSentinal);
|
||||||
};
|
};
|
||||||
|
|
||||||
void* GrMemoryPool::allocate(size_t size) {
|
void* GrMemoryPool::allocate(size_t size) {
|
||||||
@ -71,7 +71,7 @@ void* GrMemoryPool::allocate(size_t size) {
|
|||||||
size = GrSizeAlignUp(size, kAlignment);
|
size = GrSizeAlignUp(size, kAlignment);
|
||||||
if (fTail->fFreeSize < size) {
|
if (fTail->fFreeSize < size) {
|
||||||
size_t blockSize = size + kHeaderSize;
|
size_t blockSize = size + kHeaderSize;
|
||||||
blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
|
blockSize = std::max(blockSize, fMinAllocSize);
|
||||||
BlockHeader* block = CreateBlock(blockSize);
|
BlockHeader* block = CreateBlock(blockSize);
|
||||||
|
|
||||||
block->fPrev = fTail;
|
block->fPrev = fTail;
|
||||||
@ -149,11 +149,13 @@ void GrMemoryPool::release(void* p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
|
GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
|
||||||
blockSize = SkTMax<size_t>(blockSize, kHeaderSize);
|
blockSize = std::max(blockSize, kHeaderSize);
|
||||||
BlockHeader* block =
|
return InitBlock(sk_malloc_throw(blockSize), blockSize);
|
||||||
reinterpret_cast<BlockHeader*>(sk_malloc_throw(blockSize));
|
}
|
||||||
// we assume malloc gives us aligned memory
|
|
||||||
SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
|
auto GrMemoryPool::InitBlock(void* mem, size_t blockSize) -> BlockHeader* {
|
||||||
|
SkASSERT(!(reinterpret_cast<intptr_t>(mem) % kAlignment));
|
||||||
|
auto block = reinterpret_cast<BlockHeader*>(mem);
|
||||||
SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
|
SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
|
||||||
block->fLiveCount = 0;
|
block->fLiveCount = 0;
|
||||||
block->fFreeSize = blockSize - kHeaderSize;
|
block->fFreeSize = blockSize - kHeaderSize;
|
||||||
@ -215,3 +217,36 @@ void GrMemoryPool::validate() {
|
|||||||
SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
|
SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
static constexpr size_t kOpPoolSize =
|
||||||
|
GrSizeAlignUp(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||||
|
|
||||||
|
GrOpMemoryPool::~GrOpMemoryPool() { this->pool()->~GrMemoryPool(); }
|
||||||
|
|
||||||
|
std::unique_ptr<GrOpMemoryPool> GrOpMemoryPool::Make(size_t preallocSize, size_t minAllocSize) {
|
||||||
|
preallocSize = std::max(preallocSize, GrMemoryPool::kMinAllocationSize);
|
||||||
|
static constexpr size_t kOpPoolSize =
|
||||||
|
GrSizeAlignUp(sizeof(GrOpMemoryPool), GrMemoryPool::kAlignment);
|
||||||
|
static constexpr size_t kPoolSize =
|
||||||
|
GrSizeAlignUp(sizeof(GrMemoryPool), GrMemoryPool::kAlignment);
|
||||||
|
size_t size = kOpPoolSize + kPoolSize + preallocSize;
|
||||||
|
void* mem = operator new(size);
|
||||||
|
void* memPoolPtr = static_cast<char*>(mem) + kOpPoolSize;
|
||||||
|
void* preallocStart = static_cast<char*>(mem) + kOpPoolSize + kPoolSize;
|
||||||
|
new (memPoolPtr) GrMemoryPool(preallocStart, preallocSize, minAllocSize);
|
||||||
|
return std::unique_ptr<GrOpMemoryPool>(new (mem) GrOpMemoryPool());
|
||||||
|
}
|
||||||
|
|
||||||
|
void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
|
||||||
|
GrOp* tmp = op.release();
|
||||||
|
SkASSERT(tmp);
|
||||||
|
tmp->~GrOp();
|
||||||
|
this->pool()->release(tmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
GrMemoryPool* GrOpMemoryPool::pool() const {
|
||||||
|
auto addr = reinterpret_cast<const char*>(this) + kOpPoolSize;
|
||||||
|
return reinterpret_cast<GrMemoryPool*>(const_cast<char*>(addr));
|
||||||
|
}
|
||||||
|
@ -21,23 +21,27 @@
|
|||||||
* requests. It is optimized for allocate / release speed over memory
|
* requests. It is optimized for allocate / release speed over memory
|
||||||
* efficiency. The interface is designed to be used to implement operator new
|
* efficiency. The interface is designed to be used to implement operator new
|
||||||
* and delete overrides. All allocations are expected to be released before the
|
* and delete overrides. All allocations are expected to be released before the
|
||||||
* pool's destructor is called. Allocations will be 8-byte aligned.
|
* pool's destructor is called. Allocations will be aligned to
|
||||||
|
* sizeof(std::max_align_t).
|
||||||
*/
|
*/
|
||||||
class GrMemoryPool {
|
class GrMemoryPool {
|
||||||
public:
|
public:
|
||||||
|
// Guaranteed alignment of pointer returned by allocate().
|
||||||
|
static constexpr size_t kAlignment = alignof(std::max_align_t);
|
||||||
|
// Minimum size this class will allocate at once.
|
||||||
|
static constexpr size_t kMinAllocationSize = 1 << 10;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prealloc size is the amount of space to allocate at pool creation
|
* Prealloc size is the amount of space to allocate at pool creation
|
||||||
* time and keep around until pool destruction. The min alloc size is
|
* time and keep around until pool destruction. The min alloc size is
|
||||||
* the smallest allowed size of additional allocations. Both sizes are
|
* the smallest allowed size of additional allocations. Both sizes are
|
||||||
* adjusted to ensure that:
|
* adjusted to ensure that they are at least as large as kMinAllocationSize.
|
||||||
* 1. they are are 8-byte aligned
|
|
||||||
* 2. minAllocSize >= kSmallestMinAllocSize
|
|
||||||
* 3. preallocSize >= minAllocSize
|
|
||||||
*
|
*
|
||||||
* Both sizes is what the pool will end up allocating from the system, and
|
* Both sizes are what the pool will end up allocating from the system, and
|
||||||
* portions of the allocated memory is used for internal bookkeeping.
|
* portions of the allocated memory is used for internal bookkeeping.
|
||||||
*/
|
*/
|
||||||
GrMemoryPool(size_t preallocSize, size_t minAllocSize);
|
static std::unique_ptr<GrMemoryPool> Make(size_t preallocSize, size_t minAllocSize);
|
||||||
|
void operator delete(void* p) { ::operator delete(p); }
|
||||||
|
|
||||||
~GrMemoryPool();
|
~GrMemoryPool();
|
||||||
|
|
||||||
@ -66,15 +70,14 @@ public:
|
|||||||
*/
|
*/
|
||||||
size_t preallocSize() const { return fHead->fSize; }
|
size_t preallocSize() const { return fHead->fSize; }
|
||||||
|
|
||||||
/**
|
|
||||||
* Minimum value of minAllocSize constructor argument.
|
|
||||||
*/
|
|
||||||
constexpr static size_t kSmallestMinAllocSize = 1 << 10;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
GrMemoryPool(void* preallocStart, size_t preallocSize, size_t minAllocSize);
|
||||||
|
|
||||||
struct BlockHeader;
|
struct BlockHeader;
|
||||||
|
|
||||||
static BlockHeader* CreateBlock(size_t size);
|
static BlockHeader* CreateBlock(size_t size);
|
||||||
|
static BlockHeader* InitBlock(void* mem, size_t blockSize);
|
||||||
|
|
||||||
static void DeleteBlock(BlockHeader* block);
|
static void DeleteBlock(BlockHeader* block);
|
||||||
|
|
||||||
@ -115,39 +118,37 @@ private:
|
|||||||
SkTHashSet<int32_t> fAllocatedIDs;
|
SkTHashSet<int32_t> fAllocatedIDs;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
protected:
|
friend class GrOpMemoryPool;
|
||||||
enum {
|
|
||||||
// We assume this alignment is good enough for everybody.
|
static constexpr size_t kHeaderSize = GrSizeAlignUp(sizeof(BlockHeader), kAlignment);
|
||||||
kAlignment = 8,
|
static constexpr size_t kPerAllocPad = GrSizeAlignUp(sizeof(AllocHeader), kAlignment);
|
||||||
kHeaderSize = GrSizeAlignUp(sizeof(BlockHeader), kAlignment),
|
|
||||||
kPerAllocPad = GrSizeAlignUp(sizeof(AllocHeader), kAlignment),
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class GrOp;
|
class GrOp;
|
||||||
|
|
||||||
class GrOpMemoryPool {
|
class GrOpMemoryPool {
|
||||||
public:
|
public:
|
||||||
GrOpMemoryPool(size_t preallocSize, size_t minAllocSize)
|
static std::unique_ptr<GrOpMemoryPool> Make(size_t preallocSize, size_t minAllocSize);
|
||||||
: fMemoryPool(preallocSize, minAllocSize) {
|
void operator delete(void* p) { ::operator delete(p); }
|
||||||
}
|
|
||||||
|
~GrOpMemoryPool();
|
||||||
|
|
||||||
template <typename Op, typename... OpArgs>
|
template <typename Op, typename... OpArgs>
|
||||||
std::unique_ptr<Op> allocate(OpArgs&&... opArgs) {
|
std::unique_ptr<Op> allocate(OpArgs&&... opArgs) {
|
||||||
char* mem = (char*) fMemoryPool.allocate(sizeof(Op));
|
auto mem = this->pool()->allocate(sizeof(Op));
|
||||||
return std::unique_ptr<Op>(new (mem) Op(std::forward<OpArgs>(opArgs)...));
|
return std::unique_ptr<Op>(new (mem) Op(std::forward<OpArgs>(opArgs)...));
|
||||||
}
|
}
|
||||||
|
|
||||||
void* allocate(size_t size) {
|
void* allocate(size_t size) { return this->pool()->allocate(size); }
|
||||||
return fMemoryPool.allocate(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void release(std::unique_ptr<GrOp> op);
|
void release(std::unique_ptr<GrOp> op);
|
||||||
|
|
||||||
bool isEmpty() const { return fMemoryPool.isEmpty(); }
|
bool isEmpty() const { return this->pool()->isEmpty(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
GrMemoryPool fMemoryPool;
|
GrMemoryPool* pool() const;
|
||||||
|
|
||||||
|
GrOpMemoryPool() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -116,7 +116,7 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
GrMemoryPool* pool() const {
|
GrMemoryPool* pool() const {
|
||||||
static GrMemoryPool* gPool = new GrMemoryPool(4096, 4096);
|
static GrMemoryPool* gPool = GrMemoryPool::Make(4096, 4096).release();
|
||||||
return gPool;
|
return gPool;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -122,7 +122,7 @@ GrOpMemoryPool* GrRecordingContext::opMemoryPool() {
|
|||||||
// DDL TODO: should the size of the memory pool be decreased in DDL mode? CPU-side memory
|
// DDL TODO: should the size of the memory pool be decreased in DDL mode? CPU-side memory
|
||||||
// consumed in DDL mode vs. normal mode for a single skp might be a good metric of wasted
|
// consumed in DDL mode vs. normal mode for a single skp might be a good metric of wasted
|
||||||
// memory.
|
// memory.
|
||||||
fOpMemoryPool = std::make_unique<GrOpMemoryPool>(16384, 16384);
|
fOpMemoryPool = GrOpMemoryPool::Make(16384, 16384);
|
||||||
}
|
}
|
||||||
|
|
||||||
return fOpMemoryPool.get();
|
return fOpMemoryPool.get();
|
||||||
|
@ -27,7 +27,7 @@ public:
|
|||||||
virtual ~A() {}
|
virtual ~A() {}
|
||||||
|
|
||||||
void* operator new(size_t size) {
|
void* operator new(size_t size) {
|
||||||
if (!gPool.get()) {
|
if (!gPool) {
|
||||||
return ::operator new(size);
|
return ::operator new(size);
|
||||||
} else {
|
} else {
|
||||||
return gPool->allocate(size);
|
return gPool->allocate(size);
|
||||||
@ -35,7 +35,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void operator delete(void* p) {
|
void operator delete(void* p) {
|
||||||
if (!gPool.get()) {
|
if (!gPool) {
|
||||||
::operator delete(p);
|
::operator delete(p);
|
||||||
} else {
|
} else {
|
||||||
return gPool->release(p);
|
return gPool->release(p);
|
||||||
@ -45,13 +45,10 @@ public:
|
|||||||
static A* Create(SkRandom* r);
|
static A* Create(SkRandom* r);
|
||||||
|
|
||||||
static void SetAllocator(size_t preallocSize, size_t minAllocSize) {
|
static void SetAllocator(size_t preallocSize, size_t minAllocSize) {
|
||||||
GrMemoryPool* pool = new GrMemoryPool(preallocSize, minAllocSize);
|
gPool = GrMemoryPool::Make(preallocSize, minAllocSize);
|
||||||
gPool.reset(pool);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ResetAllocator() {
|
static void ResetAllocator() { gPool.reset(); }
|
||||||
gPool.reset(nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static std::unique_ptr<GrMemoryPool> gPool;
|
static std::unique_ptr<GrMemoryPool> gPool;
|
||||||
@ -246,9 +243,9 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
DEF_TEST(GrMemoryPoolAPI, reporter) {
|
DEF_TEST(GrMemoryPoolAPI, reporter) {
|
||||||
constexpr size_t kSmallestMinAllocSize = GrMemoryPool::kSmallestMinAllocSize;
|
constexpr size_t kSmallestMinAllocSize = GrMemoryPool::kMinAllocationSize;
|
||||||
|
|
||||||
// Allocates memory until pool adds a new block (pool.size() changes).
|
// Allocates memory until pool adds a new block (pool->size() changes).
|
||||||
auto allocateMemory = [](GrMemoryPool& pool, AutoPoolReleaser& r) {
|
auto allocateMemory = [](GrMemoryPool& pool, AutoPoolReleaser& r) {
|
||||||
size_t origPoolSize = pool.size();
|
size_t origPoolSize = pool.size();
|
||||||
while (pool.size() == origPoolSize) {
|
while (pool.size() == origPoolSize) {
|
||||||
@ -256,65 +253,58 @@ DEF_TEST(GrMemoryPoolAPI, reporter) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Effective prealloc space capacity is >= kSmallestMinAllocSize.
|
// Effective prealloc space capacity is >= kMinAllocationSize.
|
||||||
{
|
{
|
||||||
GrMemoryPool pool(0, 0);
|
auto pool = GrMemoryPool::Make(0, 0);
|
||||||
REPORTER_ASSERT(reporter, pool.preallocSize() == kSmallestMinAllocSize);
|
REPORTER_ASSERT(reporter, pool->preallocSize() == kSmallestMinAllocSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Effective prealloc space capacity is >= minAllocSize.
|
// Effective block size capacity >= kMinAllocationSize.
|
||||||
{
|
{
|
||||||
constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
|
auto pool = GrMemoryPool::Make(kSmallestMinAllocSize, kSmallestMinAllocSize / 2);
|
||||||
GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
|
AutoPoolReleaser r(*pool);
|
||||||
REPORTER_ASSERT(reporter, pool.preallocSize() == kMinAllocSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Effective block size capacity >= kSmallestMinAllocSize.
|
allocateMemory(*pool, r);
|
||||||
{
|
REPORTER_ASSERT(reporter, pool->size() == kSmallestMinAllocSize);
|
||||||
GrMemoryPool pool(kSmallestMinAllocSize, kSmallestMinAllocSize / 2);
|
|
||||||
AutoPoolReleaser r(pool);
|
|
||||||
|
|
||||||
allocateMemory(pool, r);
|
|
||||||
REPORTER_ASSERT(reporter, pool.size() == kSmallestMinAllocSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pool allocates exactly preallocSize on creation.
|
// Pool allocates exactly preallocSize on creation.
|
||||||
{
|
{
|
||||||
constexpr size_t kPreallocSize = kSmallestMinAllocSize * 5;
|
constexpr size_t kPreallocSize = kSmallestMinAllocSize * 5;
|
||||||
GrMemoryPool pool(kPreallocSize, 0);
|
auto pool = GrMemoryPool::Make(kPreallocSize, 0);
|
||||||
REPORTER_ASSERT(reporter, pool.preallocSize() == kPreallocSize);
|
REPORTER_ASSERT(reporter, pool->preallocSize() == kPreallocSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pool allocates exactly minAllocSize when it expands.
|
// Pool allocates exactly minAllocSize when it expands.
|
||||||
{
|
{
|
||||||
constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 7;
|
constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 7;
|
||||||
GrMemoryPool pool(0, kMinAllocSize);
|
auto pool = GrMemoryPool::Make(0, kMinAllocSize);
|
||||||
AutoPoolReleaser r(pool);
|
AutoPoolReleaser r(*pool);
|
||||||
|
|
||||||
allocateMemory(pool, r);
|
allocateMemory(*pool, r);
|
||||||
REPORTER_ASSERT(reporter, pool.size() == kMinAllocSize);
|
REPORTER_ASSERT(reporter, pool->size() == kMinAllocSize);
|
||||||
|
|
||||||
allocateMemory(pool, r);
|
allocateMemory(*pool, r);
|
||||||
REPORTER_ASSERT(reporter, pool.size() == 2 * kMinAllocSize);
|
REPORTER_ASSERT(reporter, pool->size() == 2 * kMinAllocSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// When asked to allocate amount > minAllocSize, pool allocates larger block
|
// When asked to allocate amount > minAllocSize, pool allocates larger block
|
||||||
// to accommodate all internal structures.
|
// to accommodate all internal structures.
|
||||||
{
|
{
|
||||||
constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
|
constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
|
||||||
GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
|
auto pool = GrMemoryPool::Make(kSmallestMinAllocSize, kMinAllocSize);
|
||||||
AutoPoolReleaser r(pool);
|
AutoPoolReleaser r(*pool);
|
||||||
|
|
||||||
REPORTER_ASSERT(reporter, pool.size() == 0);
|
REPORTER_ASSERT(reporter, pool->size() == 0);
|
||||||
|
|
||||||
constexpr size_t hugeSize = 10 * kMinAllocSize;
|
constexpr size_t hugeSize = 10 * kMinAllocSize;
|
||||||
r.add(pool.allocate(hugeSize));
|
r.add(pool->allocate(hugeSize));
|
||||||
REPORTER_ASSERT(reporter, pool.size() > hugeSize);
|
REPORTER_ASSERT(reporter, pool->size() > hugeSize);
|
||||||
|
|
||||||
// Block size allocated to accommodate huge request doesn't include any extra
|
// Block size allocated to accommodate huge request doesn't include any extra
|
||||||
// space, so next allocation request allocates a new block.
|
// space, so next allocation request allocates a new block.
|
||||||
size_t hugeBlockSize = pool.size();
|
size_t hugeBlockSize = pool->size();
|
||||||
r.add(pool.allocate(0));
|
r.add(pool->allocate(0));
|
||||||
REPORTER_ASSERT(reporter, pool.size() == hugeBlockSize + kMinAllocSize);
|
REPORTER_ASSERT(reporter, pool->size() == hugeBlockSize + kMinAllocSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user