Poison the GrMemoryPool's sentinel field when ASAN is enabled.

The fSentinel field can now serve as an ASAN barrier, to prevent wild
writes across pooled nodes.

It's also been moved to the very end of the Header so that it
immediately precedes the actual pooled node; this will make it easier to
catch wild writes that come before the front of the object. (Padding
between nodes, when there is any, should already able to catch wild
writes that extend off the back end of an object.)

Change-Id: Ibf20dbdc1bb45e012f4971a1cd39e5c94a5a938f
Bug: skia:10885
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/332176
Auto-Submit: John Stiles <johnstiles@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
This commit is contained in:
John Stiles 2020-11-04 09:59:36 -05:00 committed by Skia Commit-Bot
parent 3e251dc3c2
commit 5c7e1a15fb
3 changed files with 24 additions and 12 deletions

View File

@ -22,7 +22,7 @@
#endif
// Typically declared in LLVM's asan_interface.h.
#if SK_SANITIZE_ADDRESS
#ifdef SK_SANITIZE_ADDRESS
extern "C" {
void __asan_poison_memory_region(void const volatile *addr, size_t size);
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
@ -33,13 +33,13 @@ extern "C" {
// unpoison chunks of arena memory as they are parceled out. Consider leaving gaps between blocks
// to detect buffer overrun.
static inline void sk_asan_poison_memory_region(void const volatile *addr, size_t size) {
#if SK_SANITIZE_ADDRESS
#ifdef SK_SANITIZE_ADDRESS
__asan_poison_memory_region(addr, size);
#endif
}
static inline void sk_asan_unpoison_memory_region(void const volatile *addr, size_t size) {
#if SK_SANITIZE_ADDRESS
#ifdef SK_SANITIZE_ADDRESS
__asan_unpoison_memory_region(addr, size);
#endif
}

View File

@ -8,6 +8,7 @@
#include "src/gpu/GrMemoryPool.h"
#include "include/private/SkTPin.h"
#include "src/core/SkASAN.h"
#include "src/gpu/ops/GrOp.h"
#ifdef SK_DEBUG
@ -69,8 +70,13 @@ void* GrMemoryPool::allocate(size_t size) {
// Update live count within the block
alloc.fBlock->setMetadata(alloc.fBlock->metadata() + 1);
#ifdef SK_DEBUG
#if defined(SK_SANITIZE_ADDRESS)
sk_asan_poison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
#elif defined(SK_DEBUG)
header->fSentinel = GrBlockAllocator::kAssignedMarker;
#endif
#if defined(SK_DEBUG)
header->fID = []{
static std::atomic<int> nextID{1};
return nextID++;
@ -89,16 +95,20 @@ void GrMemoryPool::release(void* p) {
// NOTE: if we needed it, (p - block) would equal the original alignedOffset value returned by
// GrBlockAllocator::allocate()
Header* header = reinterpret_cast<Header*>(reinterpret_cast<intptr_t>(p) - sizeof(Header));
#if defined(SK_SANITIZE_ADDRESS)
sk_asan_unpoison_memory_region(&header->fSentinel, sizeof(header->fSentinel));
#elif defined(SK_DEBUG)
SkASSERT(GrBlockAllocator::kAssignedMarker == header->fSentinel);
GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
#ifdef SK_DEBUG
header->fSentinel = GrBlockAllocator::kFreedMarker;
#endif
#if defined(SK_DEBUG)
fAllocatedIDs.remove(header->fID);
fAllocationCount--;
#endif
GrBlockAllocator::Block* block = fAllocator.owningBlock<kAlignment>(header, header->fStart);
int alive = block->metadata();
if (alive == 1) {
// This was last allocation in the block, so remove it

View File

@ -106,12 +106,14 @@ private:
// Per-allocation overhead so that GrMemoryPool can always identify the block owning each and
// release all occupied bytes, including any resulting from alignment padding.
struct Header {
#ifdef SK_DEBUG
int fSentinel; // known value to check for memory stomping (e.g., (CD)*)
int fID; // ID that can be used to track down leaks by clients.
#endif
int fStart;
int fEnd;
#if defined(SK_DEBUG)
int fID; // ID that can be used to track down leaks by clients.
#endif
#if defined(SK_DEBUG) || defined(SK_SANITIZE_ADDRESS)
int fSentinel; // set to a known value to check for memory stomping; poisoned in ASAN mode
#endif
};
GrMemoryPool(size_t preallocSize, size_t minAllocSize);