Allow SkSL compilers to reuse SkSL Pools without reallocating.
When a Program is freed, rather than immediately disposing of its Pool, it now sends it to Pool::Recycle, which holds onto it. If Pool::Create is called, it satisfies the request by simply handing back the recycled pool. Only one pool is kept in recycle storage at a time--recycling more than one pool in a row will cause all but one to be freed. To avoid holding onto Pool memory indefinitely, pool recycle storage is cleaned up whenever a Compiler is destroyed. Change-Id: I21c1ccde84507e344102d05506d869e62ca095a6 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329175 Reviewed-by: Brian Osman <brianosman@google.com> Reviewed-by: Ethan Nicholas <ethannicholas@google.com> Commit-Queue: John Stiles <johnstiles@google.com> Auto-Submit: John Stiles <johnstiles@google.com>
This commit is contained in:
parent
53281c7121
commit
2d68ea3fbf
@ -228,7 +228,9 @@ Compiler::Compiler(Flags flags)
|
|||||||
fFragmentModule = this->parseModule(Program::kFragment_Kind, MODULE_DATA(frag), fGPUModule);
|
fFragmentModule = this->parseModule(Program::kFragment_Kind, MODULE_DATA(frag), fGPUModule);
|
||||||
}
|
}
|
||||||
|
|
||||||
Compiler::~Compiler() {}
|
Compiler::~Compiler() {
|
||||||
|
Pool::FreeRecycledPool();
|
||||||
|
}
|
||||||
|
|
||||||
const ParsedModule& Compiler::loadGeometryModule() {
|
const ParsedModule& Compiler::loadGeometryModule() {
|
||||||
if (!fGeometryModule.fSymbols) {
|
if (!fGeometryModule.fSymbols) {
|
||||||
@ -1550,7 +1552,8 @@ std::unique_ptr<Program> Compiler::convertProgram(
|
|||||||
|
|
||||||
// Enable node pooling while converting and optimizing the program for a performance boost.
|
// Enable node pooling while converting and optimizing the program for a performance boost.
|
||||||
// The Program will take ownership of the pool.
|
// The Program will take ownership of the pool.
|
||||||
std::unique_ptr<Pool> pool = Pool::CreatePoolOnThread(2000);
|
std::unique_ptr<Pool> pool = Pool::Create();
|
||||||
|
pool->attachToThread();
|
||||||
IRGenerator::IRBundle ir =
|
IRGenerator::IRBundle ir =
|
||||||
fIRGenerator->convertProgram(kind, &settings, baseModule, /*isBuiltinCode=*/false,
|
fIRGenerator->convertProgram(kind, &settings, baseModule, /*isBuiltinCode=*/false,
|
||||||
textPtr->c_str(), textPtr->size(), externalValues);
|
textPtr->c_str(), textPtr->size(), externalValues);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include "src/sksl/SkSLPool.h"
|
#include "src/sksl/SkSLPool.h"
|
||||||
|
|
||||||
|
#include "include/private/SkMutex.h"
|
||||||
#include "src/sksl/ir/SkSLIRNode.h"
|
#include "src/sksl/ir/SkSLIRNode.h"
|
||||||
|
|
||||||
#define VLOG(...) // printf(__VA_ARGS__)
|
#define VLOG(...) // printf(__VA_ARGS__)
|
||||||
@ -79,6 +80,12 @@ static void set_thread_local_pool_data(PoolData* poolData) {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static Pool* sRecycledPool; // GUARDED_BY recycled_pool_mutex
|
||||||
|
static SkMutex& recycled_pool_mutex() {
|
||||||
|
static SkMutex* mutex = new SkMutex;
|
||||||
|
return *mutex;
|
||||||
|
}
|
||||||
|
|
||||||
static PoolData* create_pool_data(int nodesInPool) {
|
static PoolData* create_pool_data(int nodesInPool) {
|
||||||
// Create a PoolData structure with extra space at the end for additional IRNode data.
|
// Create a PoolData structure with extra space at the end for additional IRNode data.
|
||||||
int numExtraIRNodes = nodesInPool - 1;
|
int numExtraIRNodes = nodesInPool - 1;
|
||||||
@ -102,44 +109,42 @@ Pool::~Pool() {
|
|||||||
set_thread_local_pool_data(nullptr);
|
set_thread_local_pool_data(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// In debug mode, report any leaked nodes.
|
this->checkForLeaks();
|
||||||
#ifdef SK_DEBUG
|
|
||||||
ptrdiff_t nodeCount = fData->nodeCount();
|
|
||||||
std::vector<bool> freed(nodeCount);
|
|
||||||
for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
|
|
||||||
ptrdiff_t nodeIndex = fData->nodeIndex(node);
|
|
||||||
freed[nodeIndex] = true;
|
|
||||||
}
|
|
||||||
bool foundLeaks = false;
|
|
||||||
for (int index = 0; index < nodeCount; ++index) {
|
|
||||||
if (!freed[index]) {
|
|
||||||
IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
|
|
||||||
SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
|
|
||||||
foundLeaks = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (foundLeaks) {
|
|
||||||
SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
|
VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
|
||||||
free(fData);
|
free(fData);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
|
std::unique_ptr<Pool> Pool::Create() {
|
||||||
auto pool = std::unique_ptr<Pool>(new Pool);
|
constexpr int kNodesInPool = 2000;
|
||||||
pool->fData = create_pool_data(nodesInPool);
|
|
||||||
pool->fData->fFreeListHead = &pool->fData->fNodes[0];
|
SkAutoMutexExclusive lock(recycled_pool_mutex());
|
||||||
VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
|
std::unique_ptr<Pool> pool;
|
||||||
pool->attachToThread();
|
if (sRecycledPool) {
|
||||||
|
pool = std::unique_ptr<Pool>(sRecycledPool);
|
||||||
|
sRecycledPool = nullptr;
|
||||||
|
VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fData);
|
||||||
|
} else {
|
||||||
|
pool = std::unique_ptr<Pool>(new Pool);
|
||||||
|
pool->fData = create_pool_data(kNodesInPool);
|
||||||
|
pool->fData->fFreeListHead = &pool->fData->fNodes[0];
|
||||||
|
VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
|
||||||
|
}
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::detachFromThread() {
|
void Pool::Recycle(std::unique_ptr<Pool> pool) {
|
||||||
VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
|
if (pool) {
|
||||||
SkASSERT(get_thread_local_pool_data() != nullptr);
|
pool->checkForLeaks();
|
||||||
set_thread_local_pool_data(nullptr);
|
}
|
||||||
|
|
||||||
|
SkAutoMutexExclusive lock(recycled_pool_mutex());
|
||||||
|
if (sRecycledPool) {
|
||||||
|
delete sRecycledPool;
|
||||||
|
}
|
||||||
|
|
||||||
|
VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
|
||||||
|
sRecycledPool = pool.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::attachToThread() {
|
void Pool::attachToThread() {
|
||||||
@ -148,6 +153,12 @@ void Pool::attachToThread() {
|
|||||||
set_thread_local_pool_data(fData);
|
set_thread_local_pool_data(fData);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Pool::detachFromThread() {
|
||||||
|
VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
|
||||||
|
SkASSERT(get_thread_local_pool_data() != nullptr);
|
||||||
|
set_thread_local_pool_data(nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
void* Pool::AllocIRNode() {
|
void* Pool::AllocIRNode() {
|
||||||
// Is a pool attached?
|
// Is a pool attached?
|
||||||
PoolData* poolData = get_thread_local_pool_data();
|
PoolData* poolData = get_thread_local_pool_data();
|
||||||
@ -192,4 +203,26 @@ void Pool::FreeIRNode(void* node_v) {
|
|||||||
::operator delete(node_v);
|
::operator delete(node_v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Pool::checkForLeaks() {
|
||||||
|
#ifdef SK_DEBUG
|
||||||
|
ptrdiff_t nodeCount = fData->nodeCount();
|
||||||
|
std::vector<bool> freed(nodeCount);
|
||||||
|
for (IRNodeData* node = fData->fFreeListHead; node; node = node->fFreeListNext) {
|
||||||
|
ptrdiff_t nodeIndex = fData->nodeIndex(node);
|
||||||
|
freed[nodeIndex] = true;
|
||||||
|
}
|
||||||
|
bool foundLeaks = false;
|
||||||
|
for (int index = 0; index < nodeCount; ++index) {
|
||||||
|
if (!freed[index]) {
|
||||||
|
IRNode* leak = reinterpret_cast<IRNode*>(fData->fNodes[index].fBuffer);
|
||||||
|
SkDebugf("Node %d leaked: %s\n", index, leak->description().c_str());
|
||||||
|
foundLeaks = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (foundLeaks) {
|
||||||
|
SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will likely be fatal");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace SkSL
|
} // namespace SkSL
|
||||||
|
@ -19,21 +19,28 @@ class Pool {
|
|||||||
public:
|
public:
|
||||||
~Pool();
|
~Pool();
|
||||||
|
|
||||||
// Creates a pool to store newly-created IRNodes during program creation and attaches it to the
|
// Creates a pool to store IRNodes during program creation. Call attachToThread() to start using
|
||||||
// current thread. When your program is complete, call pool->detachFromThread() to transfer
|
// the pool for IRNode allocations. When your program is complete, call pool->detachFromThread()
|
||||||
// ownership of those nodes. Before destroying any of the program's nodes, reattach the pool via
|
// to take ownership of the pool and its nodes. Before destroying any of the program's nodes,
|
||||||
// pool->attachToThread(). It is an error to call CreatePoolOnThread if a pool is already
|
// make sure to reattach the pool by calling pool->attachToThread() again.
|
||||||
// attached to the current thread.
|
static std::unique_ptr<Pool> Create();
|
||||||
static std::unique_ptr<Pool> CreatePoolOnThread(int nodesInPool);
|
|
||||||
|
|
||||||
// Once a pool has been created and the ephemeral work has completed, detach it from its thread.
|
// Gives up ownership of a pool; conceptually, this deletes it. In practice, on some platforms,
|
||||||
|
// it is expensive to free and reallocate pools, so this gives us an opportunity to reuse the
|
||||||
|
// allocation for future CreatePoolOnThread calls.
|
||||||
|
static void Recycle(std::unique_ptr<Pool> pool);
|
||||||
|
|
||||||
|
// Explicitly frees a previously recycled pool (if any), reclaiming the memory.
|
||||||
|
static void FreeRecycledPool() { Recycle(nullptr); }
|
||||||
|
|
||||||
|
// Attaches a pool to the current thread.
|
||||||
|
// It is an error to call this while a pool is already attached.
|
||||||
|
void attachToThread();
|
||||||
|
|
||||||
|
// Once you are done creating or destroying IRNodes in the pool, detach it from the thread.
|
||||||
// It is an error to call this while no pool is attached.
|
// It is an error to call this while no pool is attached.
|
||||||
void detachFromThread();
|
void detachFromThread();
|
||||||
|
|
||||||
// Reattaches a pool to the current thread. It is an error to call this while a pool is already
|
|
||||||
// attached.
|
|
||||||
void attachToThread();
|
|
||||||
|
|
||||||
// Retrieves a node from the thread pool. If the pool is exhausted, this will allocate a node.
|
// Retrieves a node from the thread pool. If the pool is exhausted, this will allocate a node.
|
||||||
static void* AllocIRNode();
|
static void* AllocIRNode();
|
||||||
|
|
||||||
@ -42,6 +49,8 @@ public:
|
|||||||
static void FreeIRNode(void* node_v);
|
static void FreeIRNode(void* node_v);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void checkForLeaks();
|
||||||
|
|
||||||
Pool() = default; // use CreatePoolOnThread to make a pool
|
Pool() = default; // use CreatePoolOnThread to make a pool
|
||||||
PoolData* fData = nullptr;
|
PoolData* fData = nullptr;
|
||||||
};
|
};
|
||||||
|
@ -188,6 +188,10 @@ struct Program {
|
|||||||
fSymbols.reset();
|
fSymbols.reset();
|
||||||
fModifiers.reset();
|
fModifiers.reset();
|
||||||
fPool->detachFromThread();
|
fPool->detachFromThread();
|
||||||
|
|
||||||
|
// We are done with the pool; recycle it so that it can be reused for future program
|
||||||
|
// compilation.
|
||||||
|
Pool::Recycle(std::move(fPool));
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<std::unique_ptr<ProgramElement>>& elements() const { return fElements; }
|
const std::vector<std::unique_ptr<ProgramElement>>& elements() const { return fElements; }
|
||||||
|
Loading…
Reference in New Issue
Block a user