Revert "Replace pooling mechanism with GrMemoryPool."
This reverts commit 67e1cf4b1d
.
Reason for revert: iOS 8
Original change's description:
> Replace pooling mechanism with GrMemoryPool.
>
> This change is a wash for tests that could fit inside the previous
> hard-coded pool (512 nodes) and appears to be a 5% improvement for
> sksl_large. Larger programs would hypothetically show an even more
> significant improvement.
>
> When SK_SUPPORT_GPU is disabled, we disable pooling entirely and fall
> back to the system allocator. This is necessary because SkSL can exist
> without Ganesh (such as in the wasm+CanvasKit build).
>
> Nanobench: http://screen/4xJEzdGducRxGeq
>
> Change-Id: I71dc702a84ab5c163673e35ec651003d7d45dacd
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/330219
> Commit-Queue: John Stiles <johnstiles@google.com>
> Reviewed-by: Brian Osman <brianosman@google.com>
> Auto-Submit: John Stiles <johnstiles@google.com>
TBR=brianosman@google.com,ethannicholas@google.com,johnstiles@google.com
Change-Id: I26dbd7f2d5348dd717c39fd0780ee5d140292e9a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/330416
Reviewed-by: John Stiles <johnstiles@google.com>
Commit-Queue: John Stiles <johnstiles@google.com>
This commit is contained in:
parent
f933e4fb56
commit
b3cc5fdf53
2
BUILD.gn
2
BUILD.gn
@ -560,8 +560,6 @@ if (skia_compile_processors || skia_compile_sksl_tests) {
|
|||||||
"src/core/SkMath.cpp",
|
"src/core/SkMath.cpp",
|
||||||
"src/core/SkSemaphore.cpp",
|
"src/core/SkSemaphore.cpp",
|
||||||
"src/core/SkThreadID.cpp",
|
"src/core/SkThreadID.cpp",
|
||||||
"src/gpu/GrBlockAllocator.cpp",
|
|
||||||
"src/gpu/GrMemoryPool.cpp",
|
|
||||||
"src/ports/SkMemory_malloc.cpp",
|
"src/ports/SkMemory_malloc.cpp",
|
||||||
"src/sksl/SkSLMain.cpp",
|
"src/sksl/SkSLMain.cpp",
|
||||||
]
|
]
|
||||||
|
@ -36,7 +36,6 @@ skia_sksl_sources = [
|
|||||||
"$_src/sksl/SkSLLexer.cpp",
|
"$_src/sksl/SkSLLexer.cpp",
|
||||||
"$_src/sksl/SkSLLexer.h",
|
"$_src/sksl/SkSLLexer.h",
|
||||||
"$_src/sksl/SkSLMemoryLayout.h",
|
"$_src/sksl/SkSLMemoryLayout.h",
|
||||||
"$_src/sksl/SkSLMemoryPool.h",
|
|
||||||
"$_src/sksl/SkSLParser.cpp",
|
"$_src/sksl/SkSLParser.cpp",
|
||||||
"$_src/sksl/SkSLParser.h",
|
"$_src/sksl/SkSLParser.h",
|
||||||
"$_src/sksl/SkSLPool.cpp",
|
"$_src/sksl/SkSLPool.cpp",
|
||||||
|
@ -1,43 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2016 Google Inc.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license that can be
|
|
||||||
* found in the LICENSE file.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SKSL_MEMORYPOOL
|
|
||||||
#define SKSL_MEMORYPOOL
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#include "include/core/SkTypes.h"
|
|
||||||
|
|
||||||
#if SK_SUPPORT_GPU
|
|
||||||
|
|
||||||
#include "src/gpu/GrMemoryPool.h"
|
|
||||||
|
|
||||||
namespace SkSL {
|
|
||||||
using MemoryPool = ::GrMemoryPool;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
// When Ganesh is disabled, GrMemoryPool is not linked in. We include a minimal class which mimics
|
|
||||||
// the GrMemoryPool interface but simply redirects to the system allocator.
|
|
||||||
namespace SkSL {
|
|
||||||
|
|
||||||
class MemoryPool {
|
|
||||||
public:
|
|
||||||
static std::unique_ptr<MemoryPool> Make(size_t, size_t) {
|
|
||||||
return std::make_unique<MemoryPool>();
|
|
||||||
}
|
|
||||||
void reportLeaks() const {}
|
|
||||||
bool isEmpty() const { return true; }
|
|
||||||
void* allocate(size_t size) { return ::operator new(size); }
|
|
||||||
void release(void* p) { ::operator delete(p); }
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace SkSL
|
|
||||||
|
|
||||||
#endif // SK_SUPPORT_GPU
|
|
||||||
#endif // SKSL_MEMORYPOOL
|
|
@ -10,11 +10,132 @@
|
|||||||
#include <bitset>
|
#include <bitset>
|
||||||
|
|
||||||
#include "include/private/SkMutex.h"
|
#include "include/private/SkMutex.h"
|
||||||
|
#include "src/sksl/ir/SkSLIRNode.h"
|
||||||
|
|
||||||
#define VLOG(...) // printf(__VA_ARGS__)
|
#define VLOG(...) // printf(__VA_ARGS__)
|
||||||
|
|
||||||
namespace SkSL {
|
namespace SkSL {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
template <int kNodeSize, int kNumNodes>
|
||||||
|
class Subpool {
|
||||||
|
public:
|
||||||
|
Subpool() {
|
||||||
|
// Initializes each node in the pool as a free node. The free nodes form a singly-linked
|
||||||
|
// list, each pointing to the next free node in sequence.
|
||||||
|
for (int index = 0; index < kNumNodes - 1; ++index) {
|
||||||
|
fNodes[index].fFreeListNext = &fNodes[index + 1];
|
||||||
|
}
|
||||||
|
fNodes[kNumNodes - 1].fFreeListNext = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* poolBegin() {
|
||||||
|
return &fNodes[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
void* poolEnd() {
|
||||||
|
return &fNodes[kNumNodes];
|
||||||
|
}
|
||||||
|
|
||||||
|
void* alloc() {
|
||||||
|
// Does the pool contain a free node?
|
||||||
|
if (!fFreeListHead) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
// Yes. Take a node from the freelist.
|
||||||
|
auto* node = fFreeListHead;
|
||||||
|
fFreeListHead = node->fFreeListNext;
|
||||||
|
return node->fBuffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free(void* node_v) {
|
||||||
|
SkASSERT(this->isValidNodePtrInPool(node_v));
|
||||||
|
|
||||||
|
// Push a node back onto the freelist.
|
||||||
|
auto* node = static_cast<Subpool::Node*>(node_v);
|
||||||
|
node->fFreeListNext = fFreeListHead;
|
||||||
|
fFreeListHead = node;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isValidNodePtrInPool(void* node_v) {
|
||||||
|
// Verify that the pointer exists in our subpool at all.
|
||||||
|
if (node_v < this->poolBegin()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (node_v >= this->poolEnd()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Verify that the pointer points to the start of a node, not the middle.
|
||||||
|
intptr_t offsetInPool = (intptr_t)node_v - (intptr_t)this->poolBegin();
|
||||||
|
return (offsetInPool % kNodeSize) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void checkForLeaks() {
|
||||||
|
#ifdef SK_DEBUG
|
||||||
|
// Walk the free list and mark each node. We should encounter every item in the pool.
|
||||||
|
std::bitset<kNumNodes> freed;
|
||||||
|
for (Node* node = fFreeListHead; node; node = node->fFreeListNext) {
|
||||||
|
ptrdiff_t nodeIndex = this->nodeIndex(node);
|
||||||
|
freed[nodeIndex] = true;
|
||||||
|
}
|
||||||
|
// Look for any bit left unset above, and report it as a leak.
|
||||||
|
bool foundLeaks = false;
|
||||||
|
for (int index = 0; index < kNumNodes; ++index) {
|
||||||
|
if (!freed[index]) {
|
||||||
|
SkDebugf("Node %d leaked: ", index);
|
||||||
|
IRNode* leak = reinterpret_cast<IRNode*>(fNodes[index].fBuffer);
|
||||||
|
SkDebugf("%s\n", leak->description().c_str());
|
||||||
|
foundLeaks = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (foundLeaks) {
|
||||||
|
SkDEBUGFAIL("leaking SkSL pool nodes; if they are later freed, this will "
|
||||||
|
"likely be fatal");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accessors.
|
||||||
|
constexpr int nodeCount() { return kNumNodes; }
|
||||||
|
|
||||||
|
int nodeIndex(void* node_v) {
|
||||||
|
SkASSERT(this->isValidNodePtrInPool(node_v));
|
||||||
|
|
||||||
|
auto* node = static_cast<Subpool::Node*>(node_v);
|
||||||
|
return SkToInt(node - fNodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct Node {
|
||||||
|
union {
|
||||||
|
uint8_t fBuffer[kNodeSize];
|
||||||
|
Node* fFreeListNext;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
// This holds the first free node in the pool. It will be null when the pool is exhausted.
|
||||||
|
Node* fFreeListHead = fNodes;
|
||||||
|
|
||||||
|
// Our pooled data lives here.
|
||||||
|
Node fNodes[kNumNodes];
|
||||||
|
};
|
||||||
|
|
||||||
|
static constexpr int kSmallNodeSize = 120;
|
||||||
|
static constexpr int kNumSmallNodes = 480;
|
||||||
|
using SmallSubpool = Subpool<kSmallNodeSize, kNumSmallNodes>;
|
||||||
|
|
||||||
|
static constexpr int kLargeNodeSize = 240;
|
||||||
|
static constexpr int kNumLargeNodes = 20;
|
||||||
|
using LargeSubpool = Subpool<kLargeNodeSize, kNumLargeNodes>;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
struct PoolData {
|
||||||
|
SmallSubpool fSmall;
|
||||||
|
LargeSubpool fLarge;
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(SK_BUILD_FOR_IOS) && \
|
#if defined(SK_BUILD_FOR_IOS) && \
|
||||||
(!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
|
(!defined(__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
|
||||||
|
|
||||||
@ -32,24 +153,24 @@ static pthread_key_t get_pthread_key() {
|
|||||||
return sKey;
|
return sKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
static PoolData* get_thread_local_memory_pool() {
|
static PoolData* get_thread_local_pool_data() {
|
||||||
return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
|
return static_cast<PoolData*>(pthread_getspecific(get_pthread_key()));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_thread_local_memory_pool(PoolData* poolData) {
|
static void set_thread_local_pool_data(PoolData* poolData) {
|
||||||
pthread_setspecific(get_pthread_key(), poolData);
|
pthread_setspecific(get_pthread_key(), poolData);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static thread_local MemoryPool* sMemPool = nullptr;
|
static thread_local PoolData* sPoolData = nullptr;
|
||||||
|
|
||||||
static MemoryPool* get_thread_local_memory_pool() {
|
static PoolData* get_thread_local_pool_data() {
|
||||||
return sMemPool;
|
return sPoolData;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_thread_local_memory_pool(MemoryPool* memPool) {
|
static void set_thread_local_pool_data(PoolData* poolData) {
|
||||||
sMemPool = memPool;
|
sPoolData = poolData;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -61,15 +182,16 @@ static SkMutex& recycled_pool_mutex() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Pool::~Pool() {
|
Pool::~Pool() {
|
||||||
if (get_thread_local_memory_pool() == fMemPool.get()) {
|
if (get_thread_local_pool_data() == fData) {
|
||||||
SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
|
SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
|
||||||
set_thread_local_memory_pool(nullptr);
|
set_thread_local_pool_data(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
fMemPool->reportLeaks();
|
fData->fSmall.checkForLeaks();
|
||||||
SkASSERT(fMemPool->isEmpty());
|
fData->fLarge.checkForLeaks();
|
||||||
|
|
||||||
VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fMemPool.get());
|
VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fData);
|
||||||
|
delete fData;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<Pool> Pool::Create() {
|
std::unique_ptr<Pool> Pool::Create() {
|
||||||
@ -78,19 +200,19 @@ std::unique_ptr<Pool> Pool::Create() {
|
|||||||
if (sRecycledPool) {
|
if (sRecycledPool) {
|
||||||
pool = std::unique_ptr<Pool>(sRecycledPool);
|
pool = std::unique_ptr<Pool>(sRecycledPool);
|
||||||
sRecycledPool = nullptr;
|
sRecycledPool = nullptr;
|
||||||
VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fMemPool.get());
|
VLOG("REUSE Pool:0x%016llX\n", (uint64_t)pool->fData);
|
||||||
} else {
|
} else {
|
||||||
pool = std::unique_ptr<Pool>(new Pool);
|
pool = std::unique_ptr<Pool>(new Pool);
|
||||||
pool->fMemPool = MemoryPool::Make(/*preallocSize=*/65536, /*minAllocSize=*/32768);
|
pool->fData = new PoolData;
|
||||||
VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fMemPool.get());
|
VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fData);
|
||||||
}
|
}
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::Recycle(std::unique_ptr<Pool> pool) {
|
void Pool::Recycle(std::unique_ptr<Pool> pool) {
|
||||||
if (pool) {
|
if (pool) {
|
||||||
pool->fMemPool->reportLeaks();
|
pool->fData->fSmall.checkForLeaks();
|
||||||
SkASSERT(pool->fMemPool->isEmpty());
|
pool->fData->fLarge.checkForLeaks();
|
||||||
}
|
}
|
||||||
|
|
||||||
SkAutoMutexExclusive lock(recycled_pool_mutex());
|
SkAutoMutexExclusive lock(recycled_pool_mutex());
|
||||||
@ -98,49 +220,77 @@ void Pool::Recycle(std::unique_ptr<Pool> pool) {
|
|||||||
delete sRecycledPool;
|
delete sRecycledPool;
|
||||||
}
|
}
|
||||||
|
|
||||||
VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fMemPool.get() : 0ull);
|
VLOG("STASH Pool:0x%016llX\n", pool ? (uint64_t)pool->fData : 0ull);
|
||||||
sRecycledPool = pool.release();
|
sRecycledPool = pool.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::attachToThread() {
|
void Pool::attachToThread() {
|
||||||
VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fMemPool.get());
|
VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fData);
|
||||||
SkASSERT(get_thread_local_memory_pool() == nullptr);
|
SkASSERT(get_thread_local_pool_data() == nullptr);
|
||||||
set_thread_local_memory_pool(fMemPool.get());
|
set_thread_local_pool_data(fData);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::detachFromThread() {
|
void Pool::detachFromThread() {
|
||||||
VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_memory_pool());
|
VLOG("DETACH Pool:0x%016llX\n", (uint64_t)get_thread_local_pool_data());
|
||||||
SkASSERT(get_thread_local_memory_pool() != nullptr);
|
SkASSERT(get_thread_local_pool_data() != nullptr);
|
||||||
set_thread_local_memory_pool(nullptr);
|
set_thread_local_pool_data(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Pool::AllocIRNode(size_t size) {
|
void* Pool::AllocIRNode(size_t size) {
|
||||||
// Is a pool attached?
|
// Is a pool attached?
|
||||||
MemoryPool* memPool = get_thread_local_memory_pool();
|
PoolData* poolData = get_thread_local_pool_data();
|
||||||
if (memPool) {
|
if (poolData) {
|
||||||
void* node = memPool->allocate(size);
|
if (size <= kSmallNodeSize) {
|
||||||
VLOG("ALLOC Pool:0x%016llX 0x%016llX\n", (uint64_t)memPool, (uint64_t)node);
|
// The node will fit in the small pool.
|
||||||
|
auto* node = poolData->fSmall.alloc();
|
||||||
|
if (node) {
|
||||||
|
VLOG("ALLOC Pool:0x%016llX Index:S%03d 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, poolData->fSmall.nodeIndex(node), (uint64_t)node);
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
} else if (size <= kLargeNodeSize) {
|
||||||
// There's no pool attached. Allocate nodes using the system allocator.
|
// Try to allocate a large node.
|
||||||
void* node = ::operator new(size);
|
auto* node = poolData->fLarge.alloc();
|
||||||
VLOG("ALLOC Pool:__________________ 0x%016llX\n", (uint64_t)node);
|
if (node) {
|
||||||
|
VLOG("ALLOC Pool:0x%016llX Index:L%03d 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, poolData->fLarge.nodeIndex(node), (uint64_t)node);
|
||||||
return node;
|
return node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The pool can't be used for this allocation. Allocate nodes using the system allocator.
|
||||||
|
void* ptr = ::operator new(size);
|
||||||
|
VLOG("ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, (uint64_t)ptr);
|
||||||
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Pool::FreeIRNode(void* node) {
|
void Pool::FreeIRNode(void* node) {
|
||||||
// Is a pool attached?
|
// Is a pool attached?
|
||||||
MemoryPool* memPool = get_thread_local_memory_pool();
|
PoolData* poolData = get_thread_local_pool_data();
|
||||||
if (memPool) {
|
if (poolData) {
|
||||||
VLOG("FREE Pool:0x%016llX 0x%016llX\n", (uint64_t)memPool, (uint64_t)node);
|
// Did this node come from either of our pools?
|
||||||
memPool->release(node);
|
if (node >= poolData->fSmall.poolBegin()) {
|
||||||
|
if (node < poolData->fSmall.poolEnd()) {
|
||||||
|
poolData->fSmall.free(node);
|
||||||
|
VLOG("FREE Pool:0x%016llX Index:S%03d 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, poolData->fSmall.nodeIndex(node), (uint64_t)node);
|
||||||
|
return;
|
||||||
|
} else if (node < poolData->fLarge.poolEnd()) {
|
||||||
|
poolData->fLarge.free(node);
|
||||||
|
VLOG("FREE Pool:0x%016llX Index:L%03d 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, poolData->fLarge.nodeIndex(node), (uint64_t)node);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// There's no pool attached. Free it using the system allocator.
|
// We couldn't associate this node with our pool. Free it using the system allocator.
|
||||||
VLOG("FREE Pool:__________________ 0x%016llX\n", (uint64_t)node);
|
VLOG("FREE Pool:0x%016llX Index:____ free 0x%016llX\n",
|
||||||
|
(uint64_t)poolData, (uint64_t)node);
|
||||||
::operator delete(node);
|
::operator delete(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
} // namespace SkSL
|
} // namespace SkSL
|
||||||
|
@ -10,16 +10,10 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "src/sksl/SkSLMemoryPool.h"
|
|
||||||
|
|
||||||
namespace SkSL {
|
namespace SkSL {
|
||||||
|
|
||||||
/**
|
class IRNode;
|
||||||
* Efficiently allocates memory for IRNodes in an SkSL program. Optimized for allocate/release
|
struct PoolData;
|
||||||
* performance over memory efficiency.
|
|
||||||
*
|
|
||||||
* All allocated IRNodes must be released back to the pool before it can be destroyed or recycled.
|
|
||||||
*/
|
|
||||||
|
|
||||||
class Pool {
|
class Pool {
|
||||||
public:
|
public:
|
||||||
@ -59,7 +53,7 @@ private:
|
|||||||
void checkForLeaks();
|
void checkForLeaks();
|
||||||
|
|
||||||
Pool() = default; // use Create to make a pool
|
Pool() = default; // use Create to make a pool
|
||||||
std::unique_ptr<SkSL::MemoryPool> fMemPool;
|
PoolData* fData = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace SkSL
|
} // namespace SkSL
|
||||||
|
@ -65,7 +65,7 @@ public:
|
|||||||
// purposes
|
// purposes
|
||||||
int fOffset;
|
int fOffset;
|
||||||
|
|
||||||
// Override operator new and delete to allow us to use a memory pool.
|
// Override operator new and delete to allow us to control allocation behavior.
|
||||||
static void* operator new(const size_t size) {
|
static void* operator new(const size_t size) {
|
||||||
return Pool::AllocIRNode(size);
|
return Pool::AllocIRNode(size);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user