Make SkSmallAllocator obey the RAII invariants and move to heap structures when needed.
The biggest change is to the API which allowed code to bypass the destruction invariants. This destruction bypass feature was needed in only one use, and is totally encapsulated using createWithIniterT. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2488523003 Review-Url: https://codereview.chromium.org/2488523003
This commit is contained in:
parent
28930b4648
commit
d5dc657b8c
@ -896,14 +896,15 @@ SkBlitter* SkBlitter::Choose(const SkPixmap& device,
|
||||
size_t contextSize = shader->contextSize(rec);
|
||||
if (contextSize) {
|
||||
// Try to create the ShaderContext
|
||||
void* storage = allocator->reserveT<SkShader::Context>(contextSize);
|
||||
shaderContext = shader->createContext(rec, storage);
|
||||
shaderContext = allocator->createWithIniterT<SkShader::Context>(
|
||||
contextSize,
|
||||
[&rec, shader](void* storage) {
|
||||
return shader->createContext(rec, storage);
|
||||
});
|
||||
if (!shaderContext) {
|
||||
allocator->freeLast();
|
||||
return allocator->createT<SkNullBlitter>();
|
||||
}
|
||||
SkASSERT(shaderContext);
|
||||
SkASSERT((void*) shaderContext == storage);
|
||||
} else {
|
||||
return allocator->createT<SkNullBlitter>();
|
||||
}
|
||||
|
@ -496,9 +496,11 @@ public:
|
||||
}
|
||||
|
||||
if (SkDrawLooper* looper = paint.getLooper()) {
|
||||
void* buffer = fLooperContextAllocator.reserveT<SkDrawLooper::Context>(
|
||||
looper->contextSize());
|
||||
fLooperContext = looper->createContext(canvas, buffer);
|
||||
fLooperContext = fLooperContextAllocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
looper->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return looper->createContext(canvas, buffer);
|
||||
});
|
||||
fIsSimple = false;
|
||||
} else {
|
||||
fLooperContext = nullptr;
|
||||
|
@ -15,9 +15,12 @@
|
||||
bool SkDrawLooper::canComputeFastBounds(const SkPaint& paint) const {
|
||||
SkCanvas canvas;
|
||||
SkSmallAllocator<1, 32> allocator;
|
||||
void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
|
||||
|
||||
SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
|
||||
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
this->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return this->createContext(&canvas, buffer);
|
||||
});
|
||||
for (;;) {
|
||||
SkPaint p(paint);
|
||||
if (context->next(&canvas, &p)) {
|
||||
@ -39,10 +42,13 @@ void SkDrawLooper::computeFastBounds(const SkPaint& paint, const SkRect& s,
|
||||
|
||||
SkCanvas canvas;
|
||||
SkSmallAllocator<1, 32> allocator;
|
||||
void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
|
||||
|
||||
*dst = src; // catch case where there are no loops
|
||||
SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
|
||||
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
this->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return this->createContext(&canvas, buffer);
|
||||
});
|
||||
for (bool firstTime = true;; firstTime = false) {
|
||||
SkPaint p(paint);
|
||||
if (context->next(&canvas, &p)) {
|
||||
|
@ -99,8 +99,7 @@ SkBlitter* SkRasterPipelineBlitter::Create(const SkPixmap& dst,
|
||||
paint.getBlendMode(),
|
||||
paint_color(dst, paint));
|
||||
auto earlyOut = [&] {
|
||||
blitter->~SkRasterPipelineBlitter();
|
||||
alloc->freeLast();
|
||||
alloc->deleteLast();
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
|
@ -8,78 +8,90 @@
|
||||
#ifndef SkSmallAllocator_DEFINED
|
||||
#define SkSmallAllocator_DEFINED
|
||||
|
||||
#include "SkTDArray.h"
|
||||
#include "SkTArray.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
#include <new>
|
||||
#include <utility>
|
||||
|
||||
/*
|
||||
* Template class for allocating small objects without additional heap memory
|
||||
* allocations. kMaxObjects is a hard limit on the number of objects that can
|
||||
* be allocated using this class. After that, attempts to create more objects
|
||||
* with this class will assert and return nullptr.
|
||||
* allocations.
|
||||
*
|
||||
* kTotalBytes is the total number of bytes provided for storage for all
|
||||
* objects created by this allocator. If an object to be created is larger
|
||||
* than the storage (minus storage already used), it will be allocated on the
|
||||
* heap. This class's destructor will handle calling the destructor for each
|
||||
* object it allocated and freeing its memory.
|
||||
*
|
||||
* Current the class always aligns each allocation to 16-bytes to be safe, but future
|
||||
* may reduce this to only the alignment that is required per alloc.
|
||||
*/
|
||||
template<uint32_t kMaxObjects, size_t kTotalBytes>
|
||||
template<uint32_t kExpectedObjects, size_t kTotalBytes>
|
||||
class SkSmallAllocator : SkNoncopyable {
|
||||
public:
|
||||
SkSmallAllocator()
|
||||
: fStorageUsed(0)
|
||||
, fNumObjects(0)
|
||||
{}
|
||||
|
||||
~SkSmallAllocator() {
|
||||
// Destruct in reverse order, in case an earlier object points to a
|
||||
// later object.
|
||||
while (fNumObjects > 0) {
|
||||
fNumObjects--;
|
||||
Rec* rec = &fRecs[fNumObjects];
|
||||
rec->fKillProc(rec->fObj);
|
||||
// Safe to do if fObj is in fStorage, since fHeapStorage will
|
||||
// point to nullptr.
|
||||
sk_free(rec->fHeapStorage);
|
||||
while (fRecs.count() > 0) {
|
||||
this->deleteLast();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new object of type T. Its lifetime will be handled by this
|
||||
* SkSmallAllocator.
|
||||
* Note: If kMaxObjects have been created by this SkSmallAllocator, nullptr
|
||||
* will be returned.
|
||||
*/
|
||||
template<typename T, typename... Args>
|
||||
T* createT(Args&&... args) {
|
||||
void* buf = this->reserveT<T>();
|
||||
if (nullptr == buf) {
|
||||
return nullptr;
|
||||
}
|
||||
void* buf = this->reserve(sizeof(T), DefaultDestructor<T>);
|
||||
return new (buf) T(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve a specified amount of space (must be enough space for one T).
|
||||
* The space will be in fStorage if there is room, or on the heap otherwise.
|
||||
* Either way, this class will call ~T() in its destructor and free the heap
|
||||
* allocation if necessary.
|
||||
* Unlike createT(), this method will not call the constructor of T.
|
||||
* Create a new object of size using initer to initialize the memory. The initer function has
|
||||
* the signature T* initer(void* storage). If initer is unable to initialize the memory it
|
||||
* should return nullptr where SkSmallAllocator will free the memory.
|
||||
*/
|
||||
template<typename T> void* reserveT(size_t storageRequired = sizeof(T)) {
|
||||
SkASSERT(fNumObjects < kMaxObjects);
|
||||
SkASSERT(storageRequired >= sizeof(T));
|
||||
if (kMaxObjects == fNumObjects) {
|
||||
return nullptr;
|
||||
template <typename T, typename Initer>
|
||||
T* createWithIniterT(size_t size, Initer initer) {
|
||||
SkASSERT(size >= sizeof(T));
|
||||
|
||||
void* storage = this->reserve(size, DefaultDestructor<T>);
|
||||
T* candidate = initer(storage);
|
||||
if (!candidate) {
|
||||
// Initializing didn't workout so free the memory.
|
||||
this->freeLast();
|
||||
}
|
||||
|
||||
return candidate;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the last object allocated and call its destructor. This can be called multiple times
|
||||
* removing objects from the pool in reverse order.
|
||||
*/
|
||||
void deleteLast() {
|
||||
SkASSERT(fRecs.count() > 0);
|
||||
Rec& rec = fRecs.back();
|
||||
rec.fDestructor(rec.fObj);
|
||||
this->freeLast();
|
||||
}
|
||||
|
||||
private:
|
||||
using Destructor = void(*)(void*);
|
||||
struct Rec {
|
||||
size_t fStorageSize; // 0 if allocated on heap
|
||||
char* fObj;
|
||||
Destructor fDestructor;
|
||||
};
|
||||
|
||||
// Used to call the destructor for allocated objects.
|
||||
template<typename T>
|
||||
static void DefaultDestructor(void* ptr) {
|
||||
static_cast<T*>(ptr)->~T();
|
||||
}
|
||||
|
||||
// Reserve storageRequired from fStorage if possible otherwise allocate on the heap.
|
||||
void* reserve(size_t storageRequired, Destructor destructor) {
|
||||
const size_t storageRemaining = sizeof(fStorage) - fStorageUsed;
|
||||
Rec* rec = &fRecs[fNumObjects];
|
||||
Rec& rec = fRecs.push_back();
|
||||
if (storageRequired > storageRemaining) {
|
||||
// Allocate on the heap. Ideally we want to avoid this situation.
|
||||
|
||||
@ -87,53 +99,30 @@ public:
|
||||
// and storage remaining is 3392. Increasing the base storage
|
||||
// causes google 3 tests to fail.
|
||||
|
||||
rec->fStorageSize = 0;
|
||||
rec->fHeapStorage = sk_malloc_throw(storageRequired);
|
||||
rec->fObj = static_cast<void*>(rec->fHeapStorage);
|
||||
rec.fStorageSize = 0;
|
||||
rec.fObj = new char [storageRequired];
|
||||
} else {
|
||||
// There is space in fStorage.
|
||||
rec->fStorageSize = storageRequired;
|
||||
rec->fHeapStorage = nullptr;
|
||||
rec->fObj = static_cast<void*>(fStorage + fStorageUsed);
|
||||
rec.fStorageSize = storageRequired;
|
||||
rec.fObj = &fStorage[fStorageUsed];
|
||||
fStorageUsed += storageRequired;
|
||||
}
|
||||
rec->fKillProc = DestroyT<T>;
|
||||
fNumObjects++;
|
||||
return rec->fObj;
|
||||
rec.fDestructor = destructor;
|
||||
return rec.fObj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the memory reserved last without calling the destructor.
|
||||
* Can be used in a nested way, i.e. after reserving A and B, calling
|
||||
* freeLast once will free B and calling it again will free A.
|
||||
*/
|
||||
void freeLast() {
|
||||
SkASSERT(fNumObjects > 0);
|
||||
Rec* rec = &fRecs[fNumObjects - 1];
|
||||
sk_free(rec->fHeapStorage);
|
||||
fStorageUsed -= rec->fStorageSize;
|
||||
|
||||
fNumObjects--;
|
||||
Rec& rec = fRecs.back();
|
||||
if (0 == rec.fStorageSize) {
|
||||
delete [] rec.fObj;
|
||||
}
|
||||
fStorageUsed -= rec.fStorageSize;
|
||||
fRecs.pop_back();
|
||||
}
|
||||
|
||||
private:
|
||||
struct Rec {
|
||||
size_t fStorageSize; // 0 if allocated on heap
|
||||
void* fObj;
|
||||
void* fHeapStorage;
|
||||
void (*fKillProc)(void*);
|
||||
};
|
||||
|
||||
// Used to call the destructor for allocated objects.
|
||||
template<typename T>
|
||||
static void DestroyT(void* ptr) {
|
||||
static_cast<T*>(ptr)->~T();
|
||||
}
|
||||
|
||||
alignas(16) char fStorage[kTotalBytes];
|
||||
size_t fStorageUsed; // Number of bytes used so far.
|
||||
uint32_t fNumObjects;
|
||||
Rec fRecs[kMaxObjects];
|
||||
size_t fStorageUsed {0}; // Number of bytes used so far.
|
||||
SkSTArray<kExpectedObjects, Rec, true> fRecs;
|
||||
char fStorage[kTotalBytes];
|
||||
};
|
||||
|
||||
#endif // SkSmallAllocator_DEFINED
|
||||
|
@ -60,8 +60,11 @@ static void test_frontToBack(skiatest::Reporter* reporter) {
|
||||
SkPaint paint;
|
||||
auto looper(looperBuilder.detach());
|
||||
SkSmallAllocator<1, 32> allocator;
|
||||
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
|
||||
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
|
||||
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
looper->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return looper->createContext(&canvas, buffer);
|
||||
});
|
||||
|
||||
// The back layer should come first.
|
||||
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));
|
||||
@ -100,8 +103,11 @@ static void test_backToFront(skiatest::Reporter* reporter) {
|
||||
SkPaint paint;
|
||||
auto looper(looperBuilder.detach());
|
||||
SkSmallAllocator<1, 32> allocator;
|
||||
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
|
||||
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
|
||||
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
looper->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return looper->createContext(&canvas, buffer);
|
||||
});
|
||||
|
||||
// The back layer should come first.
|
||||
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));
|
||||
@ -140,8 +146,11 @@ static void test_mixed(skiatest::Reporter* reporter) {
|
||||
SkPaint paint;
|
||||
sk_sp<SkDrawLooper> looper(looperBuilder.detach());
|
||||
SkSmallAllocator<1, 32> allocator;
|
||||
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
|
||||
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
|
||||
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
|
||||
looper->contextSize(),
|
||||
[&](void* buffer) {
|
||||
return looper->createContext(&canvas, buffer);
|
||||
});
|
||||
|
||||
// The back layer should come first.
|
||||
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));
|
||||
|
@ -30,7 +30,7 @@ int CountingClass::kCount;
|
||||
template<uint32_t kMaxObjects, size_t kBytes> void test_allocator(skiatest::Reporter* reporter) {
|
||||
{
|
||||
SkSmallAllocator<kMaxObjects, kBytes> alloc;
|
||||
for (uint32_t i = 0; i < kMaxObjects; ++i) {
|
||||
for (uint32_t i = 0; i < kMaxObjects + 1; ++i) {
|
||||
CountingClass* c = alloc.template createT<CountingClass>();
|
||||
REPORTER_ASSERT(reporter, c != nullptr);
|
||||
REPORTER_ASSERT(reporter, CountingClass::GetCount() == static_cast<int>(i+1));
|
||||
@ -43,18 +43,15 @@ template<uint32_t kMaxObjects, size_t kBytes> void test_allocator(skiatest::Repo
|
||||
// were created in fStorage or on the heap.
|
||||
DEF_TEST(SmallAllocator_destructor, reporter) {
|
||||
// Four times as many bytes as objects will never require any heap
|
||||
// allocations (since SkAlign4(sizeof(CountingClass)) == 4 and the allocator
|
||||
// will stop once it reaches kMaxObjects).
|
||||
// allocations (since SkAlign4(sizeof(CountingClass)) == 4).
|
||||
test_allocator<5, 20>(reporter);
|
||||
test_allocator<10, 40>(reporter);
|
||||
test_allocator<20, 80>(reporter);
|
||||
|
||||
#ifndef SK_DEBUG
|
||||
// Allowing less bytes than objects means some will be allocated on the
|
||||
// heap. Don't run these in debug where we assert.
|
||||
test_allocator<50, 20>(reporter);
|
||||
test_allocator<100, 20>(reporter);
|
||||
#endif
|
||||
}
|
||||
|
||||
class Dummy {
|
||||
@ -81,3 +78,16 @@ DEF_TEST(SmallAllocator_pointer, reporter) {
|
||||
REPORTER_ASSERT(reporter, container != nullptr);
|
||||
REPORTER_ASSERT(reporter, container->getDummy() == &d);
|
||||
}
|
||||
|
||||
// Test that using a createWithIniterT works as expected.
|
||||
DEF_TEST(SmallAllocator_initer, reporter) {
|
||||
SkSmallAllocator<1, 8> alloc;
|
||||
Dummy d;
|
||||
DummyContainer* container = alloc.createWithIniterT<DummyContainer>(
|
||||
sizeof(DummyContainer),
|
||||
[&](void* storage) {
|
||||
return new (storage) DummyContainer(&d);
|
||||
});
|
||||
REPORTER_ASSERT(reporter, container != nullptr);
|
||||
REPORTER_ASSERT(reporter, container->getDummy() == &d);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user