Reland "Add pooling support on iOS."

This reverts commit f73091bb81.

Reason for revert: rolling forward after fixing prior CL

Original change's description:
> Revert "Add pooling support on iOS."
>
> This reverts commit 38a93e622b.
>
> Reason for revert: need revert first pool change
>
> Original change's description:
> > Add pooling support on iOS.
> >
> > This replaces the `thread_local` attribute with `pthread_setspecific`
> > and `pthread_getspecific`. I don't have easy access to iOS 8/9 for
> > testing purposes, but on Mac OS X, this implementation works and
> > benchmarks the same as the `thread_local` implementation.
> >
> > Change-Id: I86db88c24d59d946adb66141b32733ebf5261c76
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/328837
> > Reviewed-by: Brian Osman <brianosman@google.com>
> > Commit-Queue: Brian Osman <brianosman@google.com>
> > Auto-Submit: John Stiles <johnstiles@google.com>
>
> TBR=brianosman@google.com,adlai@google.com,johnstiles@google.com
>
> Change-Id: Ic06f9e32e524b2be601ee21a5da605fd19aaa64b
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329164
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>

TBR=egdaniel@google.com,brianosman@google.com,adlai@google.com,johnstiles@google.com

Change-Id: I0e021e9304ee88d6a29739c287eb515abff8b8a4
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329173
Commit-Queue: John Stiles <johnstiles@google.com>
Auto-Submit: John Stiles <johnstiles@google.com>
Reviewed-by: John Stiles <johnstiles@google.com>
This commit is contained in:
John Stiles 2020-10-22 11:35:18 -04:00 committed by Skia Commit-Bot
parent 4fd41ffd54
commit 0bb9ec5437

View File

@ -13,26 +13,6 @@
namespace SkSL { namespace SkSL {
#if defined(SK_BUILD_FOR_IOS) && \
(!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
// iOS did not support for C++11 `thread_local` variables until iOS 9.
// Pooling is not supported here; we allocate all nodes directly.
struct PoolData {};
Pool::~Pool() {}
std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
auto pool = std::unique_ptr<Pool>(new Pool);
pool->fData = nullptr;
return pool;
}
void Pool::detachFromThread() {}
void Pool::attachToThread() {}
void* Pool::AllocIRNode() { return ::operator new(sizeof(IRNode)); }
void Pool::FreeIRNode(void* node) { ::operator delete(node); }
#else // !defined(SK_BUILD_FOR_IOS)...
namespace { struct IRNodeData { namespace { struct IRNodeData {
union { union {
uint8_t fBuffer[sizeof(IRNode)]; uint8_t fBuffer[sizeof(IRNode)];
@ -53,15 +33,52 @@ struct PoolData {
// Accessors. // Accessors.
ptrdiff_t nodeCount() { return fNodesEnd - fNodes; } ptrdiff_t nodeCount() { return fNodesEnd - fNodes; }
ptrdiff_t nodeIndex(IRNodeData* node) { int nodeIndex(IRNodeData* node) {
SkASSERT(node >= fNodes); SkASSERT(node >= fNodes);
SkASSERT(node < fNodesEnd); SkASSERT(node < fNodesEnd);
return node - fNodes; return SkToInt(node - fNodes);
} }
}; };
#if defined(SK_BUILD_FOR_IOS) && \
(!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#include <pthread.h>
static pthread_key_t get_pthread_key() {
static pthread_key_t sKey = []{
pthread_key_t key;
int result = pthread_key_create(&key, /*destructor=*/nullptr);
if (result != 0) {
SK_ABORT("pthread_key_create failure: %d", result);
}
return key;
}();
return sKey;
}
static PoolData* get_thread_local_pool_data() {
return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
}
static void set_thread_local_pool_data(PoolData* poolData) {
pthread_setspecific(get_pthread_key(), poolData);
}
#else
static thread_local PoolData* sPoolData = nullptr; static thread_local PoolData* sPoolData = nullptr;
static PoolData* get_thread_local_pool_data() {
return sPoolData;
}
static void set_thread_local_pool_data(PoolData* poolData) {
sPoolData = poolData;
}
#endif
static PoolData* create_pool_data(int nodesInPool) { static PoolData* create_pool_data(int nodesInPool) {
// Create a PoolData structure with extra space at the end for additional IRNode data. // Create a PoolData structure with extra space at the end for additional IRNode data.
int numExtraIRNodes = nodesInPool - 1; int numExtraIRNodes = nodesInPool - 1;
@ -80,9 +97,9 @@ static PoolData* create_pool_data(int nodesInPool) {
} }
Pool::~Pool() { Pool::~Pool() {
if (sPoolData == fData) { if (get_thread_local_pool_data() == fData) {
SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread"); SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
sPoolData = nullptr; set_thread_local_pool_data(nullptr);
} }
// In debug mode, report any leaked nodes. // In debug mode, report any leaked nodes.
@ -120,27 +137,28 @@ std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
} }
void Pool::detachFromThread() { void Pool::detachFromThread() {
VLOG("DETACH Pool:0x%016llX\n", (uint64_t)sPoolData); VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
SkASSERT(sPoolData != nullptr); SkASSERT(get_thread_local_pool_data() != nullptr);
sPoolData = nullptr; set_thread_local_pool_data(nullptr);
} }
void Pool::attachToThread() { void Pool::attachToThread() {
VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData); VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
SkASSERT(sPoolData == nullptr); SkASSERT(get_thread_local_pool_data() == nullptr);
sPoolData = fData; set_thread_local_pool_data(fData);
} }
void* Pool::AllocIRNode() { void* Pool::AllocIRNode() {
// Is a pool attached? // Is a pool attached?
if (sPoolData) { PoolData* poolData = get_thread_local_pool_data();
if (poolData) {
// Does the pool contain a free node? // Does the pool contain a free node?
IRNodeData* node = sPoolData->fFreeListHead; IRNodeData* node = poolData->fFreeListHead;
if (node) { if (node) {
// Yes. Take a node from the freelist. // Yes. Take a node from the freelist.
sPoolData->fFreeListHead = node->fFreeListNext; poolData->fFreeListHead = node->fFreeListNext;
VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n", VLOG("ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n",
(uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node); (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
return node->fBuffer; return node->fBuffer;
} }
} }
@ -148,31 +166,30 @@ void* Pool::AllocIRNode() {
// The pool is detached or full; allocate nodes using malloc. // The pool is detached or full; allocate nodes using malloc.
void* ptr = ::operator new(sizeof(IRNode)); void* ptr = ::operator new(sizeof(IRNode));
VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n", VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
(uint64_t)sPoolData, (uint64_t)ptr); (uint64_t)poolData, (uint64_t)ptr);
return ptr; return ptr;
} }
void Pool::FreeIRNode(void* node_v) { void Pool::FreeIRNode(void* node_v) {
// Is a pool attached? // Is a pool attached?
if (sPoolData) { PoolData* poolData = get_thread_local_pool_data();
if (poolData) {
// Did this node come from our pool? // Did this node come from our pool?
auto* node = static_cast<IRNodeData*>(node_v); auto* node = static_cast<IRNodeData*>(node_v);
if (node >= &sPoolData->fNodes[0] && node < sPoolData->fNodesEnd) { if (node >= &poolData->fNodes[0] && node < poolData->fNodesEnd) {
// Yes. Push it back onto the freelist. // Yes. Push it back onto the freelist.
VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n", VLOG("FREE Pool:0x%016llX Index:%04d 0x%016llX\n",
(uint64_t)sPoolData, (int)(node - &sPoolData->fNodes[0]), (uint64_t)node); (uint64_t)poolData, poolData->nodeIndex(node), (uint64_t)node);
node->fFreeListNext = sPoolData->fFreeListHead; node->fFreeListNext = poolData->fFreeListHead;
sPoolData->fFreeListHead = node; poolData->fFreeListHead = node;
return; return;
} }
} }
// No pool is attached or the node was malloced; it must be freed. // No pool is attached or the node was malloced; it must be freed.
VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n", VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
(uint64_t)sPoolData, (uint64_t)node_v); (uint64_t)poolData, (uint64_t)node_v);
::operator delete(node_v); ::operator delete(node_v);
} }
#endif // !defined(SK_BUILD_FOR_IOS)...
} // namespace SkSL } // namespace SkSL