Reland "Update the SkSL pool interface to take an allocation size."
This is a reland of 22ef2257c8
This reland fixes an issue with ExternalValue nodes on 32-bit builds;
these should never be pooled, because we can't control their lifetimes.
Original change's description:
> Update the SkSL pool interface to take an allocation size.
>
> Since our end goal no longer has all IRNodes ending up as the exact same
> size in memory, it makes sense for the allocator function to take in a
> desired size. This also opens the door to separate "small" and "large"
> pools, if we want to add pooling support for large but semi-common
> things like Variables.
>
> Change-Id: If3dbe31588adeedede327c5967c344a19507b6fa
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329961
> Reviewed-by: Brian Osman <brianosman@google.com>
> Commit-Queue: Brian Osman <brianosman@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
Change-Id: If701ae4a5e18b66d4138bc09c9c8dc1a60579c90
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/330105
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
This commit is contained in:
parent
7b14f2f6e9
commit
270b5c04c2
@ -105,6 +105,17 @@ public:
|
||||
return String("external<") + this->name() + ">";
|
||||
}
|
||||
|
||||
// Disable IRNode pooling on external value nodes. ExternalValue node lifetimes are controlled
|
||||
// by the calling code; we can't guarantee that they will be destroyed before a Program is
|
||||
// freed. (In fact, it's very unlikely that they would be.)
|
||||
static void* operator new(const size_t size) {
|
||||
return ::operator new(size);
|
||||
}
|
||||
|
||||
static void operator delete(void* ptr) {
|
||||
::operator delete(ptr);
|
||||
}
|
||||
|
||||
private:
|
||||
using INHERITED = Symbol;
|
||||
|
||||
|
@ -14,9 +14,12 @@
|
||||
|
||||
namespace SkSL {
|
||||
|
||||
static constexpr int kSmallNodeSize = 120;
|
||||
static constexpr int kNodesInPool = 512;
|
||||
|
||||
namespace { struct IRNodeData {
|
||||
union {
|
||||
uint8_t fBuffer[sizeof(IRNode)];
|
||||
uint8_t fBuffer[kSmallNodeSize];
|
||||
IRNodeData* fFreeListNext;
|
||||
};
|
||||
}; }
|
||||
@ -116,8 +119,6 @@ Pool::~Pool() {
|
||||
}
|
||||
|
||||
std::unique_ptr<Pool> Pool::Create() {
|
||||
constexpr int kNodesInPool = 512;
|
||||
|
||||
SkAutoMutexExclusive lock(recycled_pool_mutex());
|
||||
std::unique_ptr<Pool> pool;
|
||||
if (sRecycledPool) {
|
||||
@ -159,23 +160,26 @@ void Pool::detachFromThread() {
|
||||
set_thread_local_pool_data(nullptr);
|
||||
}
|
||||
|
||||
void* Pool::AllocIRNode() {
|
||||
void* Pool::AllocIRNode(size_t size) {
|
||||
// Is a pool attached?
|
||||
PoolData* poolData = get_thread_local_pool_data();
|
||||
if (poolData) {
|
||||
// Does the pool contain a free node?
|
||||
IRNodeData* node = poolData->fFreeListHead;
|
||||
if (node) {
|
||||
// Yes. Take a node from the freelist.
|
||||
poolData->fFreeListHead = node->fFreeListNext;
|
||||
VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
|
||||
(uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
|
||||
return node->fBuffer;
|
||||
// Can the requested size fit in a pool node?
|
||||
if (size <= kSmallNodeSize) {
|
||||
// Does the pool contain a free node?
|
||||
IRNodeData* node = poolData->fFreeListHead;
|
||||
if (node) {
|
||||
// Yes. Take a node from the freelist.
|
||||
poolData->fFreeListHead = node->fFreeListNext;
|
||||
VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
|
||||
(uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
|
||||
return node->fBuffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The pool is detached or full; allocate nodes using malloc.
|
||||
void* ptr = ::operator new(sizeof(IRNode));
|
||||
// The pool can't be used for this allocation. Allocate nodes using the system allocator.
|
||||
void* ptr = ::operator new(size);
|
||||
VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
|
||||
(uint64_t)poolData, (uint64_t)ptr);
|
||||
return ptr;
|
||||
@ -197,7 +201,7 @@ void Pool::FreeIRNode(void* node_v) {
|
||||
}
|
||||
}
|
||||
|
||||
// No pool is attached or the node was malloced; it must be freed.
|
||||
// We couldn't associate this node with our pool. Free it using the system allocator.
|
||||
VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
|
||||
(uint64_t)poolData, (uint64_t)node_v);
|
||||
::operator delete(node_v);
|
||||
|
@ -41,8 +41,9 @@ public:
|
||||
// It is an error to call this while no pool is attached.
|
||||
void detachFromThread();
|
||||
|
||||
// Retrieves a node from the thread pool. If the pool is exhausted, this will allocate a node.
|
||||
static void* AllocIRNode();
|
||||
// Retrieves a node from the thread pool. If the pool is exhausted, or if the requested size
|
||||
// exceeds the size that we can deliver from a pool, this will just allocate memory.
|
||||
static void* AllocIRNode(size_t size);
|
||||
|
||||
// Releases a node that was created by AllocIRNode. This will return it to the pool, or free it,
|
||||
// as appropriate. Make sure to free all nodes, since some of them may be real allocations.
|
||||
|
@ -67,10 +67,7 @@ public:
|
||||
|
||||
// Override operator new and delete to allow us to control allocation behavior.
|
||||
static void* operator new(const size_t size) {
|
||||
if (size == sizeof(IRNode)) {
|
||||
return Pool::AllocIRNode();
|
||||
}
|
||||
return ::operator new(size);
|
||||
return Pool::AllocIRNode(size);
|
||||
}
|
||||
|
||||
static void operator delete(void* ptr) {
|
||||
|
Loading…
Reference in New Issue
Block a user