Revert of Make SkSmallAllocator obey the RAII invariants and be expandable (patchset #6 id:100001 of https://codereview.chromium.org/2488523003/ )

Reason for revert:
Crashing Mac Perf and Test bots.

This is a flaky but extremely likely crash.  I've only seen one Mac Perf or Test bot that had this patch that didn't crash.

This should be easy to reproduce like this:
  $ gn gen out --args=is_debug=false
  $ ninja -C out dm
  $ out/dm -m xfermodes3 --config gpu

This is crashing every time I run it on my laptop, and never when I revert this CL.

Building in release and running --config gpu probably don't matter.

Original issue's description:
> Make SkSmallAllocator obey the RAII invariants and move to heap structures when needed.
>
> The biggest change is to the API which allowed code to bypass the
> destruction invariants. This destruction bypass feature was needed in
> only one use, and is totally encapsulated using createWithIniterT.
>
> BUG=skia:
> GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2488523003
>
> Committed: https://skia.googlesource.com/skia/+/d5dc657b8c3ac916f98005dafdedafe02f023449

TBR=bungeman@google.com,herb@google.com
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=skia:

Review-Url: https://codereview.chromium.org/2485853005
This commit is contained in:
mtklein 2016-11-09 14:50:31 -08:00 committed by Commit bot
parent 27dcee1643
commit 7c591161d0
7 changed files with 100 additions and 116 deletions

View File

@ -896,15 +896,14 @@ SkBlitter* SkBlitter::Choose(const SkPixmap& device,
size_t contextSize = shader->contextSize(rec);
if (contextSize) {
// Try to create the ShaderContext
shaderContext = allocator->createWithIniterT<SkShader::Context>(
contextSize,
[&rec, shader](void* storage) {
return shader->createContext(rec, storage);
});
void* storage = allocator->reserveT<SkShader::Context>(contextSize);
shaderContext = shader->createContext(rec, storage);
if (!shaderContext) {
allocator->freeLast();
return allocator->createT<SkNullBlitter>();
}
SkASSERT(shaderContext);
SkASSERT((void*) shaderContext == storage);
} else {
return allocator->createT<SkNullBlitter>();
}

View File

@ -496,11 +496,9 @@ public:
}
if (SkDrawLooper* looper = paint.getLooper()) {
fLooperContext = fLooperContextAllocator.createWithIniterT<SkDrawLooper::Context>(
looper->contextSize(),
[&](void* buffer) {
return looper->createContext(canvas, buffer);
});
void* buffer = fLooperContextAllocator.reserveT<SkDrawLooper::Context>(
looper->contextSize());
fLooperContext = looper->createContext(canvas, buffer);
fIsSimple = false;
} else {
fLooperContext = nullptr;

View File

@ -15,12 +15,9 @@
bool SkDrawLooper::canComputeFastBounds(const SkPaint& paint) const {
SkCanvas canvas;
SkSmallAllocator<1, 32> allocator;
void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
this->contextSize(),
[&](void* buffer) {
return this->createContext(&canvas, buffer);
});
SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
for (;;) {
SkPaint p(paint);
if (context->next(&canvas, &p)) {
@ -42,13 +39,10 @@ void SkDrawLooper::computeFastBounds(const SkPaint& paint, const SkRect& s,
SkCanvas canvas;
SkSmallAllocator<1, 32> allocator;
void* buffer = allocator.reserveT<SkDrawLooper::Context>(this->contextSize());
*dst = src; // catch case where there are no loops
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
this->contextSize(),
[&](void* buffer) {
return this->createContext(&canvas, buffer);
});
SkDrawLooper::Context* context = this->createContext(&canvas, buffer);
for (bool firstTime = true;; firstTime = false) {
SkPaint p(paint);
if (context->next(&canvas, &p)) {

View File

@ -99,7 +99,8 @@ SkBlitter* SkRasterPipelineBlitter::Create(const SkPixmap& dst,
paint.getBlendMode(),
paint_color(dst, paint));
auto earlyOut = [&] {
alloc->deleteLast();
blitter->~SkRasterPipelineBlitter();
alloc->freeLast();
return nullptr;
};

View File

@ -8,90 +8,78 @@
#ifndef SkSmallAllocator_DEFINED
#define SkSmallAllocator_DEFINED
#include "SkTArray.h"
#include "SkTDArray.h"
#include "SkTypes.h"
#include <new>
#include <utility>
/*
* Template class for allocating small objects without additional heap memory
* allocations.
* allocations. kMaxObjects is a hard limit on the number of objects that can
* be allocated using this class. After that, attempts to create more objects
* with this class will assert and return nullptr.
*
* kTotalBytes is the total number of bytes provided for storage for all
* objects created by this allocator. If an object to be created is larger
* than the storage (minus storage already used), it will be allocated on the
* heap. This class's destructor will handle calling the destructor for each
* object it allocated and freeing its memory.
*
* Current the class always aligns each allocation to 16-bytes to be safe, but future
* may reduce this to only the alignment that is required per alloc.
*/
template<uint32_t kExpectedObjects, size_t kTotalBytes>
template<uint32_t kMaxObjects, size_t kTotalBytes>
class SkSmallAllocator : SkNoncopyable {
public:
SkSmallAllocator()
: fStorageUsed(0)
, fNumObjects(0)
{}
~SkSmallAllocator() {
// Destruct in reverse order, in case an earlier object points to a
// later object.
while (fRecs.count() > 0) {
this->deleteLast();
while (fNumObjects > 0) {
fNumObjects--;
Rec* rec = &fRecs[fNumObjects];
rec->fKillProc(rec->fObj);
// Safe to do if fObj is in fStorage, since fHeapStorage will
// point to nullptr.
sk_free(rec->fHeapStorage);
}
}
/*
* Create a new object of type T. Its lifetime will be handled by this
* SkSmallAllocator.
* Note: If kMaxObjects have been created by this SkSmallAllocator, nullptr
* will be returned.
*/
template<typename T, typename... Args>
T* createT(Args&&... args) {
void* buf = this->reserve(sizeof(T), DefaultDestructor<T>);
void* buf = this->reserveT<T>();
if (nullptr == buf) {
return nullptr;
}
return new (buf) T(std::forward<Args>(args)...);
}
/*
* Create a new object of size using initer to initialize the memory. The initer function has
* the signature T* initer(void* storage). If initer is unable to initialize the memory it
* should return nullptr where SkSmallAllocator will free the memory.
* Reserve a specified amount of space (must be enough space for one T).
* The space will be in fStorage if there is room, or on the heap otherwise.
* Either way, this class will call ~T() in its destructor and free the heap
* allocation if necessary.
* Unlike createT(), this method will not call the constructor of T.
*/
template <typename T, typename Initer>
T* createWithIniterT(size_t size, Initer initer) {
SkASSERT(size >= sizeof(T));
void* storage = this->reserve(size, DefaultDestructor<T>);
T* candidate = initer(storage);
if (!candidate) {
// Initializing didn't workout so free the memory.
this->freeLast();
template<typename T> void* reserveT(size_t storageRequired = sizeof(T)) {
SkASSERT(fNumObjects < kMaxObjects);
SkASSERT(storageRequired >= sizeof(T));
if (kMaxObjects == fNumObjects) {
return nullptr;
}
return candidate;
}
/*
* Free the last object allocated and call its destructor. This can be called multiple times
* removing objects from the pool in reverse order.
*/
void deleteLast() {
SkASSERT(fRecs.count() > 0);
Rec& rec = fRecs.back();
rec.fDestructor(rec.fObj);
this->freeLast();
}
private:
using Destructor = void(*)(void*);
struct Rec {
size_t fStorageSize; // 0 if allocated on heap
char* fObj;
Destructor fDestructor;
};
// Used to call the destructor for allocated objects.
template<typename T>
static void DefaultDestructor(void* ptr) {
static_cast<T*>(ptr)->~T();
}
// Reserve storageRequired from fStorage if possible otherwise allocate on the heap.
void* reserve(size_t storageRequired, Destructor destructor) {
const size_t storageRemaining = sizeof(fStorage) - fStorageUsed;
Rec& rec = fRecs.push_back();
Rec* rec = &fRecs[fNumObjects];
if (storageRequired > storageRemaining) {
// Allocate on the heap. Ideally we want to avoid this situation.
@ -99,30 +87,53 @@ private:
// and storage remaining is 3392. Increasing the base storage
// causes google 3 tests to fail.
rec.fStorageSize = 0;
rec.fObj = new char [storageRequired];
rec->fStorageSize = 0;
rec->fHeapStorage = sk_malloc_throw(storageRequired);
rec->fObj = static_cast<void*>(rec->fHeapStorage);
} else {
// There is space in fStorage.
rec.fStorageSize = storageRequired;
rec.fObj = &fStorage[fStorageUsed];
rec->fStorageSize = storageRequired;
rec->fHeapStorage = nullptr;
rec->fObj = static_cast<void*>(fStorage + fStorageUsed);
fStorageUsed += storageRequired;
}
rec.fDestructor = destructor;
return rec.fObj;
rec->fKillProc = DestroyT<T>;
fNumObjects++;
return rec->fObj;
}
/*
* Free the memory reserved last without calling the destructor.
* Can be used in a nested way, i.e. after reserving A and B, calling
* freeLast once will free B and calling it again will free A.
*/
void freeLast() {
Rec& rec = fRecs.back();
if (0 == rec.fStorageSize) {
delete [] rec.fObj;
}
fStorageUsed -= rec.fStorageSize;
fRecs.pop_back();
SkASSERT(fNumObjects > 0);
Rec* rec = &fRecs[fNumObjects - 1];
sk_free(rec->fHeapStorage);
fStorageUsed -= rec->fStorageSize;
fNumObjects--;
}
size_t fStorageUsed {0}; // Number of bytes used so far.
SkSTArray<kExpectedObjects, Rec, true> fRecs;
char fStorage[kTotalBytes];
private:
struct Rec {
size_t fStorageSize; // 0 if allocated on heap
void* fObj;
void* fHeapStorage;
void (*fKillProc)(void*);
};
// Used to call the destructor for allocated objects.
template<typename T>
static void DestroyT(void* ptr) {
static_cast<T*>(ptr)->~T();
}
alignas(16) char fStorage[kTotalBytes];
size_t fStorageUsed; // Number of bytes used so far.
uint32_t fNumObjects;
Rec fRecs[kMaxObjects];
};
#endif // SkSmallAllocator_DEFINED

View File

@ -60,11 +60,8 @@ static void test_frontToBack(skiatest::Reporter* reporter) {
SkPaint paint;
auto looper(looperBuilder.detach());
SkSmallAllocator<1, 32> allocator;
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
looper->contextSize(),
[&](void* buffer) {
return looper->createContext(&canvas, buffer);
});
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
// The back layer should come first.
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));
@ -103,11 +100,8 @@ static void test_backToFront(skiatest::Reporter* reporter) {
SkPaint paint;
auto looper(looperBuilder.detach());
SkSmallAllocator<1, 32> allocator;
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
looper->contextSize(),
[&](void* buffer) {
return looper->createContext(&canvas, buffer);
});
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
// The back layer should come first.
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));
@ -146,11 +140,8 @@ static void test_mixed(skiatest::Reporter* reporter) {
SkPaint paint;
sk_sp<SkDrawLooper> looper(looperBuilder.detach());
SkSmallAllocator<1, 32> allocator;
SkDrawLooper::Context* context = allocator.createWithIniterT<SkDrawLooper::Context>(
looper->contextSize(),
[&](void* buffer) {
return looper->createContext(&canvas, buffer);
});
void* buffer = allocator.reserveT<SkDrawLooper::Context>(looper->contextSize());
SkDrawLooper::Context* context = looper->createContext(&canvas, buffer);
// The back layer should come first.
REPORTER_ASSERT(reporter, context->next(&canvas, &paint));

View File

@ -30,7 +30,7 @@ int CountingClass::kCount;
template<uint32_t kMaxObjects, size_t kBytes> void test_allocator(skiatest::Reporter* reporter) {
{
SkSmallAllocator<kMaxObjects, kBytes> alloc;
for (uint32_t i = 0; i < kMaxObjects + 1; ++i) {
for (uint32_t i = 0; i < kMaxObjects; ++i) {
CountingClass* c = alloc.template createT<CountingClass>();
REPORTER_ASSERT(reporter, c != nullptr);
REPORTER_ASSERT(reporter, CountingClass::GetCount() == static_cast<int>(i+1));
@ -43,15 +43,18 @@ template<uint32_t kMaxObjects, size_t kBytes> void test_allocator(skiatest::Repo
// were created in fStorage or on the heap.
DEF_TEST(SmallAllocator_destructor, reporter) {
// Four times as many bytes as objects will never require any heap
// allocations (since SkAlign4(sizeof(CountingClass)) == 4).
// allocations (since SkAlign4(sizeof(CountingClass)) == 4 and the allocator
// will stop once it reaches kMaxObjects).
test_allocator<5, 20>(reporter);
test_allocator<10, 40>(reporter);
test_allocator<20, 80>(reporter);
#ifndef SK_DEBUG
// Allowing less bytes than objects means some will be allocated on the
// heap. Don't run these in debug where we assert.
test_allocator<50, 20>(reporter);
test_allocator<100, 20>(reporter);
#endif
}
class Dummy {
@ -78,16 +81,3 @@ DEF_TEST(SmallAllocator_pointer, reporter) {
REPORTER_ASSERT(reporter, container != nullptr);
REPORTER_ASSERT(reporter, container->getDummy() == &d);
}
// Test that using a createWithIniterT works as expected.
DEF_TEST(SmallAllocator_initer, reporter) {
SkSmallAllocator<1, 8> alloc;
Dummy d;
DummyContainer* container = alloc.createWithIniterT<DummyContainer>(
sizeof(DummyContainer),
[&](void* storage) {
return new (storage) DummyContainer(&d);
});
REPORTER_ASSERT(reporter, container != nullptr);
REPORTER_ASSERT(reporter, container->getDummy() == &d);
}