Implement a benchmark for GrResourceCache
Adds "grresourcecache_add" and "grresourcecache_find" bench tests to test GrResourceCache::add and GrResourceCache::find. The tests work only with GPU backends, since GrResourceCache needs an GrGpu. Modifies bench tests to override SkBenchmark::isSuitableFor(Backend) function that specifies what kind of backend the test is inteded for. This replaces the previous "fIsRendering" flag that would indicate test that did no rendering. Adds SkCanvas::getGrContext() call to get the GrContext that the canvas ends up drawing to. The member function solves a common use-case that is also used in the benchmark added here. R=mtklein@google.com, bsalomon@google.com Author: kkinnunen@nvidia.com Review URL: https://codereview.chromium.org/73643005 git-svn-id: http://skia.googlecode.com/svn/trunk@12334 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
parent
bf6426120a
commit
644629c1c7
@ -33,7 +33,10 @@ public:
|
||||
for (int i = 0; i < U32COUNT; ++i) {
|
||||
fData[i] = rand.nextU();
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -29,7 +29,10 @@ public:
|
||||
fname++; // skip the slash
|
||||
}
|
||||
fName.printf("decode_%s_%s", gConfigName[c], fname);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -32,9 +32,10 @@ GrMemoryPool A::gPool(10 * (1 << 10), 10 * (1 << 10));
|
||||
*/
|
||||
class GrMemoryPoolBenchStack : public SkBenchmark {
|
||||
public:
|
||||
GrMemoryPoolBenchStack() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "grmemorypool_stack";
|
||||
@ -83,9 +84,10 @@ private:
|
||||
*/
|
||||
class GrMemoryPoolBenchRandom : public SkBenchmark {
|
||||
public:
|
||||
GrMemoryPoolBenchRandom() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "grmemorypool_random";
|
||||
@ -120,9 +122,10 @@ class GrMemoryPoolBenchQueue : public SkBenchmark {
|
||||
M = 4 * (1 << 10),
|
||||
};
|
||||
public:
|
||||
GrMemoryPoolBenchQueue() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "grmemorypool_queue";
|
||||
|
242
bench/GrResourceCacheBench.cpp
Normal file
242
bench/GrResourceCacheBench.cpp
Normal file
@ -0,0 +1,242 @@
|
||||
|
||||
/*
|
||||
* Copyright 2013 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
|
||||
#include "GrContext.h"
|
||||
#include "GrResource.h"
|
||||
#include "GrResourceCache.h"
|
||||
#include "GrStencilBuffer.h"
|
||||
#include "GrTexture.h"
|
||||
#include "SkBenchmark.h"
|
||||
#include "SkCanvas.h"
|
||||
|
||||
enum {
|
||||
CACHE_SIZE_COUNT = 2048,
|
||||
CACHE_SIZE_BYTES = 2 * 1024 * 1024,
|
||||
};
|
||||
|
||||
class StencilResource : public GrResource {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(StencilResource);
|
||||
StencilResource(GrGpu* gpu, int id)
|
||||
: INHERITED(gpu, false),
|
||||
fID(id) {
|
||||
}
|
||||
~StencilResource() {
|
||||
this->release();
|
||||
}
|
||||
|
||||
virtual size_t sizeInBytes() const SK_OVERRIDE {
|
||||
return 100 + ((fID % 1 == 0) ? -5 : 6);
|
||||
}
|
||||
|
||||
static GrResourceKey ComputeKey(int width, int height, int sampleCnt) {
|
||||
return GrStencilBuffer::ComputeKey(width, height, sampleCnt);
|
||||
}
|
||||
|
||||
int fID;
|
||||
|
||||
private:
|
||||
typedef GrResource INHERITED;
|
||||
};
|
||||
|
||||
class TextureResource : public GrResource {
|
||||
public:
|
||||
SK_DECLARE_INST_COUNT(TextureResource);
|
||||
TextureResource(GrGpu* gpu, int id)
|
||||
: INHERITED(gpu, false),
|
||||
fID(id) {
|
||||
}
|
||||
~TextureResource() {
|
||||
this->release();
|
||||
}
|
||||
|
||||
virtual size_t sizeInBytes() const SK_OVERRIDE {
|
||||
return 100 + ((fID % 1 == 0) ? -40 : 33);
|
||||
}
|
||||
|
||||
static GrResourceKey ComputeKey(const GrTextureDesc& desc) {
|
||||
return GrTexture::ComputeScratchKey(desc);
|
||||
}
|
||||
|
||||
int fID;
|
||||
|
||||
private:
|
||||
typedef GrResource INHERITED;
|
||||
};
|
||||
|
||||
SK_DEFINE_INST_COUNT(StencilResource)
|
||||
SK_DEFINE_INST_COUNT(TextureResource)
|
||||
|
||||
static void get_stencil(int i, int* w, int* h, int* s) {
|
||||
*w = i % 1024;
|
||||
*h = i * 2 % 1024;
|
||||
*s = i % 1 == 0 ? 0 : 4;
|
||||
}
|
||||
|
||||
static void get_texture_desc(int i, GrTextureDesc* desc) {
|
||||
desc->fFlags = kRenderTarget_GrTextureFlagBit |
|
||||
kNoStencil_GrTextureFlagBit;
|
||||
desc->fWidth = i % 1024;
|
||||
desc->fHeight = i * 2 % 1024;
|
||||
desc->fConfig = static_cast<GrPixelConfig>(i % (kLast_GrPixelConfig + 1));
|
||||
desc->fSampleCnt = i % 1 == 0 ? 0 : 4;
|
||||
}
|
||||
|
||||
static void populate_cache(GrResourceCache* cache, GrGpu* gpu, int resourceCount) {
|
||||
for (int i = 0; i < resourceCount; ++i) {
|
||||
int w, h, s;
|
||||
get_stencil(i, &w, &h, &s);
|
||||
GrResourceKey key = GrStencilBuffer::ComputeKey(w, h, s);
|
||||
GrResource* resource = SkNEW_ARGS(StencilResource, (gpu, i));
|
||||
cache->purgeAsNeeded(1, resource->sizeInBytes());
|
||||
cache->addResource(key, resource);
|
||||
resource->unref();
|
||||
}
|
||||
|
||||
for (int i = 0; i < resourceCount; ++i) {
|
||||
GrTextureDesc desc;
|
||||
get_texture_desc(i, &desc);
|
||||
GrResourceKey key = TextureResource::ComputeKey(desc);
|
||||
GrResource* resource = SkNEW_ARGS(TextureResource, (gpu, i));
|
||||
cache->purgeAsNeeded(1, resource->sizeInBytes());
|
||||
cache->addResource(key, resource);
|
||||
resource->unref();
|
||||
}
|
||||
}
|
||||
|
||||
static void check_cache_contents_or_die(GrResourceCache* cache, int k) {
|
||||
// Benchmark find calls that succeed.
|
||||
{
|
||||
GrTextureDesc desc;
|
||||
get_texture_desc(k, &desc);
|
||||
GrResourceKey key = TextureResource::ComputeKey(desc);
|
||||
GrResource* item = cache->find(key);
|
||||
if (NULL == item) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
if (static_cast<TextureResource*>(item)->fID != k) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
}
|
||||
{
|
||||
int w, h, s;
|
||||
get_stencil(k, &w, &h, &s);
|
||||
GrResourceKey key = StencilResource::ComputeKey(w, h, s);
|
||||
GrResource* item = cache->find(key);
|
||||
if (NULL == item) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
if (static_cast<TextureResource*>(item)->fID != k) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark also find calls that always fail.
|
||||
{
|
||||
GrTextureDesc desc;
|
||||
get_texture_desc(k, &desc);
|
||||
desc.fHeight |= 1;
|
||||
GrResourceKey key = TextureResource::ComputeKey(desc);
|
||||
GrResource* item = cache->find(key);
|
||||
if (NULL != item) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
}
|
||||
{
|
||||
int w, h, s;
|
||||
get_stencil(k, &w, &h, &s);
|
||||
h |= 1;
|
||||
GrResourceKey key = StencilResource::ComputeKey(w, h, s);
|
||||
GrResource* item = cache->find(key);
|
||||
if (NULL != item) {
|
||||
GrCrash("cache add does not work as expected");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class GrResourceCacheBenchAdd : public SkBenchmark {
|
||||
enum {
|
||||
RESOURCE_COUNT = CACHE_SIZE_COUNT / 2,
|
||||
DUPLICATE_COUNT = CACHE_SIZE_COUNT / 4,
|
||||
};
|
||||
|
||||
public:
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kGPU_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() SK_OVERRIDE {
|
||||
return "grresourcecache_add";
|
||||
}
|
||||
|
||||
virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
|
||||
GrGpu* gpu = canvas->getGrContext()->getGpu();
|
||||
|
||||
for (int i = 0; i < this->getLoops(); ++i) {
|
||||
GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
|
||||
populate_cache(&cache, gpu, DUPLICATE_COUNT);
|
||||
populate_cache(&cache, gpu, RESOURCE_COUNT);
|
||||
|
||||
// Check that cache works.
|
||||
for (int k = 0; k < RESOURCE_COUNT; k += 33) {
|
||||
check_cache_contents_or_die(&cache, k);
|
||||
}
|
||||
cache.purgeAllUnlocked();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef SkBenchmark INHERITED;
|
||||
};
|
||||
|
||||
class GrResourceCacheBenchFind : public SkBenchmark {
|
||||
enum {
|
||||
RESOURCE_COUNT = (CACHE_SIZE_COUNT / 2) - 100,
|
||||
DUPLICATE_COUNT = 100
|
||||
};
|
||||
|
||||
public:
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kGPU_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() SK_OVERRIDE {
|
||||
return "grresourcecache_find";
|
||||
}
|
||||
|
||||
virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
|
||||
GrGpu* gpu = canvas->getGrContext()->getGpu();
|
||||
GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
|
||||
populate_cache(&cache, gpu, DUPLICATE_COUNT);
|
||||
populate_cache(&cache, gpu, RESOURCE_COUNT);
|
||||
|
||||
for (int i = 0; i < this->getLoops(); ++i) {
|
||||
for (int k = 0; k < RESOURCE_COUNT; ++k) {
|
||||
check_cache_contents_or_die(&cache, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef SkBenchmark INHERITED;
|
||||
};
|
||||
|
||||
DEF_BENCH( return new GrResourceCacheBenchAdd(); )
|
||||
DEF_BENCH( return new GrResourceCacheBenchFind(); )
|
||||
|
||||
#endif
|
@ -26,7 +26,10 @@ public:
|
||||
, fStream()
|
||||
, fValid(false) {
|
||||
fName.append(SkOSPath::SkBasename(filename));
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -20,7 +20,10 @@ public:
|
||||
fName.printf("interp_%s", name);
|
||||
fFx = 3.3f;
|
||||
fDx = 0.1257f;
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest(int16_t dst[], float x, float dx, int count) = 0;
|
||||
|
@ -29,8 +29,10 @@ public:
|
||||
for (int i = 0; i < kBuffer; ++i) {
|
||||
fSrc[i] = rand.nextSScalar1();
|
||||
}
|
||||
}
|
||||
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest(float* SK_RESTRICT dst,
|
||||
@ -281,7 +283,10 @@ public:
|
||||
fProc = gRec[index].fProc;
|
||||
fName = gRec[index].fName;
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -346,7 +351,10 @@ public:
|
||||
} else {
|
||||
fName = "floor_std";
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void process(float) {}
|
||||
@ -404,7 +412,10 @@ public:
|
||||
} else {
|
||||
fName = "clz_intrinsic";
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
// just so the compiler doesn't remove our loops
|
||||
@ -457,7 +468,10 @@ public:
|
||||
}
|
||||
|
||||
fName = "point_normalize";
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
// just so the compiler doesn't remove our loops
|
||||
@ -501,7 +515,10 @@ public:
|
||||
fData[i%N] = rand.nextSScalar1();
|
||||
}
|
||||
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -534,7 +551,10 @@ class DivModBench : public SkBenchmark {
|
||||
public:
|
||||
explicit DivModBench(const char* name) {
|
||||
fName.printf("divmod_%s", name);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -15,7 +15,10 @@ class Matrix44Bench : public SkBenchmark {
|
||||
public:
|
||||
Matrix44Bench(const char name[]) {
|
||||
fName.printf("matrix44_%s", name);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest() = 0;
|
||||
|
@ -16,7 +16,10 @@ class MatrixBench : public SkBenchmark {
|
||||
public:
|
||||
MatrixBench(const char name[]) {
|
||||
fName.printf("matrix_%s", name);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest() = 0;
|
||||
|
@ -19,7 +19,10 @@ public:
|
||||
ChunkAllocBench(size_t minSize) {
|
||||
fMinSize = minSize;
|
||||
fName.printf("chunkalloc_" SK_SIZE_T_SPECIFIER, minSize);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -85,7 +88,10 @@ public:
|
||||
fName.appendf("_w");
|
||||
}
|
||||
fName.appendf("_"SK_SIZE_T_SPECIFIER, num);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -34,7 +34,10 @@ public:
|
||||
fMaxSize = maxSize;
|
||||
fName.printf("memset%d_" SK_SIZE_T_SPECIFIER "_" SK_SIZE_T_SPECIFIER,
|
||||
type, minSize, maxSize);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest() = 0;
|
||||
|
@ -9,9 +9,10 @@
|
||||
|
||||
class MutexBench : public SkBenchmark {
|
||||
public:
|
||||
MutexBench() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "mutex";
|
||||
|
@ -216,8 +216,8 @@ private:
|
||||
|
||||
class RandomPathBench : public SkBenchmark {
|
||||
public:
|
||||
RandomPathBench() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -761,7 +761,6 @@ public:
|
||||
};
|
||||
|
||||
ConservativelyContainsBench(Type type) {
|
||||
fIsRendering = false;
|
||||
fParity = false;
|
||||
fName = "conservatively_contains_";
|
||||
switch (type) {
|
||||
@ -780,6 +779,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
private:
|
||||
virtual const char* onGetName() SK_OVERRIDE {
|
||||
return fName.c_str();
|
||||
@ -903,7 +906,10 @@ public:
|
||||
for (int i = 0; i < CONICS; ++i) {
|
||||
rand_conic(&fConics[i], rand);
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -54,8 +54,10 @@ public:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -18,7 +18,10 @@ class PictureRecordBench : public SkBenchmark {
|
||||
public:
|
||||
PictureRecordBench(const char name[]) {
|
||||
fName.printf("picture_record_%s", name);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
enum {
|
||||
|
@ -34,8 +34,12 @@ public:
|
||||
if (fBulkLoad) {
|
||||
fName.append("_bulk");
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual ~BBoxBuildBench() {
|
||||
fTree->unref();
|
||||
}
|
||||
@ -84,8 +88,12 @@ public:
|
||||
if (fBulkLoad) {
|
||||
fName.append("_bulk");
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual ~BBoxQueryBench() {
|
||||
fTree->unref();
|
||||
}
|
||||
|
@ -16,9 +16,10 @@ enum {
|
||||
|
||||
class RefCntBench_Stack : public SkBenchmark {
|
||||
public:
|
||||
RefCntBench_Stack() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_stack";
|
||||
@ -53,9 +54,10 @@ SK_DEFINE_INST_COUNT(PlacedRefCnt)
|
||||
|
||||
class RefCntBench_Heap : public SkBenchmark {
|
||||
public:
|
||||
RefCntBench_Heap() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_heap";
|
||||
@ -79,9 +81,10 @@ private:
|
||||
|
||||
class RefCntBench_New : public SkBenchmark {
|
||||
public:
|
||||
RefCntBench_New() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_new";
|
||||
@ -106,9 +109,10 @@ private:
|
||||
|
||||
class WeakRefCntBench_Stack : public SkBenchmark {
|
||||
public:
|
||||
WeakRefCntBench_Stack() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_stack_weak";
|
||||
@ -136,9 +140,10 @@ public:
|
||||
|
||||
class WeakRefCntBench_Heap : public SkBenchmark {
|
||||
public:
|
||||
WeakRefCntBench_Heap() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_heap_weak";
|
||||
@ -162,9 +167,10 @@ private:
|
||||
|
||||
class WeakRefCntBench_New : public SkBenchmark {
|
||||
public:
|
||||
WeakRefCntBench_New() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "ref_cnt_new_weak";
|
||||
|
@ -97,7 +97,10 @@ public:
|
||||
fA.op(randrect(rand), SkRegion::kXOR_Op);
|
||||
fB.op(randrect(rand), SkRegion::kXOR_Op);
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -43,8 +43,10 @@ public:
|
||||
}
|
||||
|
||||
fB.setRect(0, 0, H, W);
|
||||
}
|
||||
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -16,7 +16,10 @@ class ScalarBench : public SkBenchmark {
|
||||
public:
|
||||
ScalarBench(const char name[]) {
|
||||
fName.printf("scalar_%s", name);
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
virtual void performTest() = 0;
|
||||
@ -143,7 +146,10 @@ public:
|
||||
fPts[i].fX = rand.nextSScalar1();
|
||||
fPts[i].fY = rand.nextSScalar1();
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -21,7 +21,6 @@ SkBenchmark::SkBenchmark() {
|
||||
fForceAA = true;
|
||||
fForceFilter = false;
|
||||
fDither = SkTriState::kDefault;
|
||||
fIsRendering = true;
|
||||
fOrMask = fClearMask = 0;
|
||||
fLoops = 1;
|
||||
}
|
||||
|
@ -49,6 +49,19 @@ public:
|
||||
const char* getName();
|
||||
SkIPoint getSize();
|
||||
|
||||
enum Backend {
|
||||
kNonRendering_Backend,
|
||||
kRaster_Backend,
|
||||
kGPU_Backend,
|
||||
kPDF_Backend,
|
||||
};
|
||||
|
||||
// Call to determine whether the benchmark is intended for
|
||||
// the rendering mode.
|
||||
virtual bool isSuitableFor(Backend backend) {
|
||||
return backend != kNonRendering_Backend;
|
||||
}
|
||||
|
||||
// Call before draw, allows the benchmark to do setup work outside of the
|
||||
// timer. When a benchmark is repeatedly drawn, this should be called once
|
||||
// before the initial draw.
|
||||
@ -77,13 +90,6 @@ public:
|
||||
fDither = state;
|
||||
}
|
||||
|
||||
/** If true; the benchmark does rendering; if false, the benchmark
|
||||
doesn't, and so need not be re-run in every different rendering
|
||||
mode. */
|
||||
bool isRendering() {
|
||||
return fIsRendering;
|
||||
}
|
||||
|
||||
/** Assign masks for paint-flags. These will be applied when setupPaint()
|
||||
* is called.
|
||||
*
|
||||
@ -120,8 +126,6 @@ protected:
|
||||
virtual void onPostDraw() {}
|
||||
|
||||
virtual SkIPoint onGetSize();
|
||||
/// Defaults to true.
|
||||
bool fIsRendering;
|
||||
|
||||
private:
|
||||
int fForceAlpha;
|
||||
|
@ -33,7 +33,10 @@ public:
|
||||
} else {
|
||||
fName.append("_write_zeroes");
|
||||
}
|
||||
fIsRendering = false;
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -104,10 +104,13 @@ class SortBench : public SkBenchmark {
|
||||
|
||||
public:
|
||||
SortBench(Type t, SortType s) : fType(t), fSortProc(gSorts[s].fProc) {
|
||||
fIsRendering = false;
|
||||
fName.printf("sort_%s_%s", gSorts[s].fName, gRec[t].fName);
|
||||
}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() SK_OVERRIDE {
|
||||
return fName.c_str();
|
||||
|
@ -12,7 +12,9 @@
|
||||
|
||||
class WriterBench : public SkBenchmark {
|
||||
public:
|
||||
WriterBench() { fIsRendering = false; }
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() SK_OVERRIDE {
|
||||
|
@ -63,8 +63,8 @@ private:
|
||||
|
||||
class XferCreateBench : public SkBenchmark {
|
||||
public:
|
||||
XferCreateBench() {
|
||||
fIsRendering = false;
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -157,27 +157,20 @@ static void performScale(SkCanvas* canvas, int w, int h) {
|
||||
canvas->translate(-x, -y);
|
||||
}
|
||||
|
||||
enum Backend {
|
||||
kNonRendering_Backend,
|
||||
kRaster_Backend,
|
||||
kGPU_Backend,
|
||||
kPDF_Backend,
|
||||
};
|
||||
|
||||
static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
|
||||
Backend backend, int sampleCount, GrContext* context) {
|
||||
SkBenchmark::Backend backend, int sampleCount, GrContext* context) {
|
||||
SkBaseDevice* device = NULL;
|
||||
SkBitmap bitmap;
|
||||
bitmap.setConfig(config, size.fX, size.fY);
|
||||
|
||||
switch (backend) {
|
||||
case kRaster_Backend:
|
||||
case SkBenchmark::kRaster_Backend:
|
||||
bitmap.allocPixels();
|
||||
erase(bitmap);
|
||||
device = SkNEW_ARGS(SkBitmapDevice, (bitmap));
|
||||
break;
|
||||
#if SK_SUPPORT_GPU
|
||||
case kGPU_Backend: {
|
||||
case SkBenchmark::kGPU_Backend: {
|
||||
GrTextureDesc desc;
|
||||
desc.fConfig = kSkia8888_GrPixelConfig;
|
||||
desc.fFlags = kRenderTarget_GrTextureFlagBit;
|
||||
@ -192,7 +185,7 @@ static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case kPDF_Backend:
|
||||
case SkBenchmark::kPDF_Backend:
|
||||
default:
|
||||
SkDEBUGFAIL("unsupported");
|
||||
}
|
||||
@ -223,22 +216,22 @@ static const struct Config {
|
||||
SkBitmap::Config config;
|
||||
const char* name;
|
||||
int sampleCount;
|
||||
Backend backend;
|
||||
SkBenchmark::Backend backend;
|
||||
GLContextType contextType;
|
||||
bool runByDefault;
|
||||
} gConfigs[] = {
|
||||
{ SkBitmap::kNo_Config, "NONRENDERING", 0, kNonRendering_Backend, kNative, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "8888", 0, kRaster_Backend, kNative, true},
|
||||
{ SkBitmap::kRGB_565_Config, "565", 0, kRaster_Backend, kNative, true},
|
||||
{ SkBitmap::kNo_Config, "NONRENDERING", 0, SkBenchmark::kNonRendering_Backend, kNative, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "8888", 0, SkBenchmark::kRaster_Backend, kNative, true},
|
||||
{ SkBitmap::kRGB_565_Config, "565", 0, SkBenchmark::kRaster_Backend, kNative, true},
|
||||
#if SK_SUPPORT_GPU
|
||||
{ SkBitmap::kARGB_8888_Config, "GPU", 0, kGPU_Backend, kNative, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "MSAA4", 4, kGPU_Backend, kNative, false},
|
||||
{ SkBitmap::kARGB_8888_Config, "MSAA16", 16, kGPU_Backend, kNative, false},
|
||||
{ SkBitmap::kARGB_8888_Config, "GPU", 0, SkBenchmark::kGPU_Backend, kNative, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "MSAA4", 4, SkBenchmark::kGPU_Backend, kNative, false},
|
||||
{ SkBitmap::kARGB_8888_Config, "MSAA16", 16, SkBenchmark::kGPU_Backend, kNative, false},
|
||||
#if SK_ANGLE
|
||||
{ SkBitmap::kARGB_8888_Config, "ANGLE", 0, kGPU_Backend, kANGLE, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "ANGLE", 0, SkBenchmark::kGPU_Backend, kANGLE, true},
|
||||
#endif // SK_ANGLE
|
||||
{ SkBitmap::kARGB_8888_Config, "Debug", 0, kGPU_Backend, kDebug, kIsDebug},
|
||||
{ SkBitmap::kARGB_8888_Config, "NULLGPU", 0, kGPU_Backend, kNull, true},
|
||||
{ SkBitmap::kARGB_8888_Config, "Debug", 0, SkBenchmark::kGPU_Backend, kDebug, kIsDebug},
|
||||
{ SkBitmap::kARGB_8888_Config, "NULLGPU", 0, SkBenchmark::kGPU_Backend, kNull, true},
|
||||
#endif // SK_SUPPORT_GPU
|
||||
};
|
||||
|
||||
@ -349,7 +342,7 @@ int tool_main(int argc, char** argv) {
|
||||
// Non-rendering configs only run in normal mode
|
||||
for (int i = 0; i < configs.count(); ++i) {
|
||||
const Config& config = gConfigs[configs[i]];
|
||||
if (kNonRendering_Backend == config.backend) {
|
||||
if (SkBenchmark::kNonRendering_Backend == config.backend) {
|
||||
configs.remove(i, 1);
|
||||
--i;
|
||||
}
|
||||
@ -364,7 +357,7 @@ int tool_main(int argc, char** argv) {
|
||||
for (int i = 0; i < configs.count(); ++i) {
|
||||
const Config& config = gConfigs[configs[i]];
|
||||
|
||||
if (kGPU_Backend == config.backend) {
|
||||
if (SkBenchmark::kGPU_Backend == config.backend) {
|
||||
GrContext* context = gContextFactory.get(config.contextType);
|
||||
if (NULL == context) {
|
||||
logger.logError(SkStringPrintf(
|
||||
@ -426,7 +419,7 @@ int tool_main(int argc, char** argv) {
|
||||
for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
|
||||
#if SK_SUPPORT_GPU
|
||||
const Config& config = gConfigs[i];
|
||||
if (kGPU_Backend != config.backend) {
|
||||
if (SkBenchmark::kGPU_Backend != config.backend) {
|
||||
continue;
|
||||
}
|
||||
GrContext* context = gContextFactory.get(config.contextType);
|
||||
@ -479,14 +472,14 @@ int tool_main(int argc, char** argv) {
|
||||
const int configIndex = configs[i];
|
||||
const Config& config = gConfigs[configIndex];
|
||||
|
||||
if ((kNonRendering_Backend == config.backend) == bench->isRendering()) {
|
||||
if (!bench->isSuitableFor(config.backend)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
GrContext* context = NULL;
|
||||
#if SK_SUPPORT_GPU
|
||||
SkGLContextHelper* glContext = NULL;
|
||||
if (kGPU_Backend == config.backend) {
|
||||
if (SkBenchmark::kGPU_Backend == config.backend) {
|
||||
context = gContextFactory.get(config.contextType);
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
@ -502,7 +495,7 @@ int tool_main(int argc, char** argv) {
|
||||
const SkPicture::RecordingFlags kRecordFlags =
|
||||
SkPicture::kUsePathBoundsForClip_RecordingFlag;
|
||||
|
||||
if (kNonRendering_Backend != config.backend) {
|
||||
if (SkBenchmark::kNonRendering_Backend != config.backend) {
|
||||
device.reset(make_device(config.config,
|
||||
dim,
|
||||
config.backend,
|
||||
@ -552,7 +545,7 @@ int tool_main(int argc, char** argv) {
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
SkGLContextHelper* contextHelper = NULL;
|
||||
if (kGPU_Backend == config.backend) {
|
||||
if (SkBenchmark::kGPU_Backend == config.backend) {
|
||||
contextHelper = gContextFactory.getGLContext(config.contextType);
|
||||
}
|
||||
BenchTimer timer(contextHelper);
|
||||
@ -664,7 +657,7 @@ int tool_main(int argc, char** argv) {
|
||||
} while (!kIsDebug && !converged);
|
||||
if (FLAGS_verbose) { SkDebugf("\n"); }
|
||||
|
||||
if (FLAGS_outDir.count() && kNonRendering_Backend != config.backend) {
|
||||
if (FLAGS_outDir.count() && SkBenchmark::kNonRendering_Backend != config.backend) {
|
||||
saveFile(bench->getName(),
|
||||
config.name,
|
||||
FLAGS_outDir[0],
|
||||
|
@ -201,7 +201,7 @@ protected:
|
||||
this->drawCase4(canvas, kCol2X, kRow3Y, SkCanvas::kNone_DrawBitmapRectFlag, SkPaint::kHigh_FilterLevel);
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
GrContext* ctx = GM::GetGr(canvas);
|
||||
GrContext* ctx = canvas->getGrContext();
|
||||
int oldMaxTextureSize = 0;
|
||||
if (NULL != ctx) {
|
||||
// shrink the max texture size so all our textures can be reasonably sized
|
||||
|
13
gm/gm.cpp
13
gm/gm.cpp
@ -63,18 +63,5 @@ void GM::drawSizeBounds(SkCanvas* canvas, SkColor color) {
|
||||
canvas->drawRect(r, paint);
|
||||
}
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
// canvas could almost be a const&, but accessRenderTarget isn't const.
|
||||
/*static*/ GrContext* GM::GetGr(SkCanvas* canvas) {
|
||||
SkASSERT(NULL != canvas);
|
||||
SkBaseDevice* device = canvas->getTopDevice();
|
||||
GrRenderTarget* renderTarget = device->accessRenderTarget();
|
||||
if (NULL != renderTarget) {
|
||||
return renderTarget->getContext();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
// need to explicitly declare this, or we get some weird infinite loop llist
|
||||
template GMRegistry* SkTRegistry<GM*(*)(void*)>::gHead;
|
||||
|
4
gm/gm.h
4
gm/gm.h
@ -99,10 +99,6 @@ namespace skiagm {
|
||||
fCanvasIsDeferred = isDeferred;
|
||||
}
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
static GrContext* GetGr(/*very nearly const*/ SkCanvas*);
|
||||
#endif
|
||||
|
||||
const SkMatrix& getStarterMatrix() { return fStarterMatrix; }
|
||||
void setStarterMatrix(const SkMatrix& matrix) {
|
||||
fStarterMatrix = matrix;
|
||||
|
@ -186,7 +186,7 @@ protected:
|
||||
SkAutoTUnref<SkSurface> surf2(SkSurface::NewPicture(info.fWidth, info.fHeight));
|
||||
SkAutoTUnref<SkSurface> surf3(SkSurface::NewPicture(info.fWidth, info.fHeight));
|
||||
#if SK_SUPPORT_GPU
|
||||
GrContext* ctx = GM::GetGr(canvas);
|
||||
GrContext* ctx = canvas->getGrContext();
|
||||
|
||||
SkAutoTUnref<SkSurface> surf4(SkSurface::NewRenderTarget(ctx, info, 0));
|
||||
#endif
|
||||
|
@ -40,7 +40,7 @@ protected:
|
||||
virtual void onDraw(SkCanvas* canvas) {
|
||||
SkBaseDevice* device = canvas->getTopDevice();
|
||||
GrRenderTarget* target = device->accessRenderTarget();
|
||||
GrContext* ctx = GM::GetGr(canvas);
|
||||
GrContext* ctx = canvas->getGrContext();
|
||||
if (ctx && target) {
|
||||
SkAutoTArray<SkPMColor> gTextureData((2 * S) * (2 * S));
|
||||
static const int stride = 2 * S;
|
||||
|
@ -31,6 +31,7 @@
|
||||
'../bench/GameBench.cpp',
|
||||
'../bench/GradientBench.cpp',
|
||||
'../bench/GrMemoryPoolBench.cpp',
|
||||
'../bench/GrResourceCacheBench.cpp',
|
||||
'../bench/ImageCacheBench.cpp',
|
||||
'../bench/ImageDecodeBench.cpp',
|
||||
'../bench/InterpBench.cpp',
|
||||
|
@ -29,6 +29,7 @@ class SkMetaData;
|
||||
class SkPicture;
|
||||
class SkRRect;
|
||||
class SkSurface_Base;
|
||||
class GrContext;
|
||||
|
||||
/** \class SkCanvas
|
||||
|
||||
@ -109,6 +110,12 @@ public:
|
||||
int width, int height,
|
||||
bool isOpaque);
|
||||
|
||||
/**
|
||||
* Return the GPU context of the device that is associated with the canvas.
|
||||
* For a canvas with non-GPU device, NULL is returned.
|
||||
*/
|
||||
GrContext* getGrContext();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
@ -26,6 +26,10 @@
|
||||
#include "SkTLazy.h"
|
||||
#include "SkUtils.h"
|
||||
|
||||
#if SK_SUPPORT_GPU
|
||||
#include "GrRenderTarget.h"
|
||||
#endif
|
||||
|
||||
SK_DEFINE_INST_COUNT(SkBounder)
|
||||
SK_DEFINE_INST_COUNT(SkCanvas)
|
||||
SK_DEFINE_INST_COUNT(SkDrawFilter)
|
||||
@ -1570,6 +1574,20 @@ SkBaseDevice* SkCanvas::createCompatibleDevice(SkBitmap::Config config,
|
||||
}
|
||||
}
|
||||
|
||||
GrContext* SkCanvas::getGrContext() {
|
||||
#if SK_SUPPORT_GPU
|
||||
SkBaseDevice* device = this->getTopDevice();
|
||||
if (NULL != device) {
|
||||
GrRenderTarget* renderTarget = device->accessRenderTarget();
|
||||
if (NULL != renderTarget) {
|
||||
return renderTarget->getContext();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// These are the virtual drawing methods
|
||||
|
Loading…
Reference in New Issue
Block a user