skia2/bench/benchmain.cpp

668 lines
23 KiB
C++
Raw Normal View History

Automatic update of all copyright notices to reflect new license terms. I have manually examined all of these diffs and restored a few files that seem to require manual adjustment. The following files still need to be modified manually, in a separate CL: android_sample/SampleApp/AndroidManifest.xml android_sample/SampleApp/res/layout/layout.xml android_sample/SampleApp/res/menu/sample.xml android_sample/SampleApp/res/values/strings.xml android_sample/SampleApp/src/com/skia/sampleapp/SampleApp.java android_sample/SampleApp/src/com/skia/sampleapp/SampleView.java experimental/CiCarbonSampleMain.c experimental/CocoaDebugger/main.m experimental/FileReaderApp/main.m experimental/SimpleCocoaApp/main.m experimental/iOSSampleApp/Shared/SkAlertPrompt.h experimental/iOSSampleApp/Shared/SkAlertPrompt.m experimental/iOSSampleApp/SkiOSSampleApp-Base.xcconfig experimental/iOSSampleApp/SkiOSSampleApp-Debug.xcconfig experimental/iOSSampleApp/SkiOSSampleApp-Release.xcconfig gpu/src/android/GrGLDefaultInterface_android.cpp gyp/common.gypi gyp_skia include/ports/SkHarfBuzzFont.h include/views/SkOSWindow_wxwidgets.h make.bat make.py src/opts/memset.arm.S src/opts/memset16_neon.S src/opts/memset32_neon.S src/opts/opts_check_arm.cpp src/ports/SkDebug_brew.cpp src/ports/SkMemory_brew.cpp src/ports/SkOSFile_brew.cpp src/ports/SkXMLParser_empty.cpp src/utils/ios/SkImageDecoder_iOS.mm src/utils/ios/SkOSFile_iOS.mm src/utils/ios/SkStream_NSData.mm tests/FillPathTest.cpp Review URL: http://codereview.appspot.com/4816058 git-svn-id: http://skia.googlecode.com/svn/trunk@1982 2bbb7eff-a529-9590-31e7-b0007b416f81
2011-07-28 14:26:00 +00:00
/*
* Copyright 2011 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#if SK_SUPPORT_GPU
#include "GrContext.h"
#include "GrContextFactory.h"
#include "GrRenderTarget.h"
#include "SkGpuDevice.h"
#include "gl/GrGLDefines.h"
#else
class GrContext;
#endif // SK_SUPPORT_GPU
#include "BenchTimer.h"
#include "SkBenchLogger.h"
#include "SkBenchmark.h"
#include "SkBitmapDevice.h"
#include "SkCanvas.h"
#include "SkColorPriv.h"
#include "SkCommandLineFlags.h"
#include "SkDeferredCanvas.h"
#include "SkGraphics.h"
#include "SkImageEncoder.h"
#include "SkOSFile.h"
#include "SkPicture.h"
#include "SkString.h"
#include <limits>
enum BenchMode {
kNormal_BenchMode,
kDeferred_BenchMode,
kDeferredSilent_BenchMode,
kRecord_BenchMode,
kPictureRecord_BenchMode
};
const char* BenchMode_Name[] = {
"normal", "deferred", "deferredSilent", "record", "picturerecord"
};
static const char kDefaultsConfigStr[] = "defaults";
///////////////////////////////////////////////////////////////////////////////
static void erase(SkBitmap& bm) {
if (bm.config() == SkBitmap::kA8_Config) {
bm.eraseColor(SK_ColorTRANSPARENT);
} else {
bm.eraseColor(SK_ColorWHITE);
}
}
class Iter {
public:
Iter() : fBench(BenchRegistry::Head()) {}
SkBenchmark* next() {
if (fBench) {
BenchRegistry::Factory f = fBench->factory();
fBench = fBench->next();
return f();
}
return NULL;
}
private:
const BenchRegistry* fBench;
};
class AutoPrePostDraw {
public:
AutoPrePostDraw(SkBenchmark* bench) : fBench(bench) {
fBench->preDraw();
}
~AutoPrePostDraw() {
fBench->postDraw();
}
private:
SkBenchmark* fBench;
};
static void make_filename(const char name[], SkString* path) {
path->set(name);
for (int i = 0; name[i]; i++) {
switch (name[i]) {
case '/':
case '\\':
case ' ':
case ':':
path->writable_str()[i] = '-';
break;
default:
break;
}
}
}
static void saveFile(const char name[], const char config[], const char dir[],
const SkBitmap& bm) {
SkBitmap copy;
if (!bm.copyTo(&copy, SkBitmap::kARGB_8888_Config)) {
return;
}
if (bm.config() == SkBitmap::kA8_Config) {
// turn alpha into gray-scale
size_t size = copy.getSize() >> 2;
SkPMColor* p = copy.getAddr32(0, 0);
for (size_t i = 0; i < size; i++) {
int c = (*p >> SK_A32_SHIFT) & 0xFF;
c = 255 - c;
c |= (c << 24) | (c << 16) | (c << 8);
*p++ = c | (SK_A32_MASK << SK_A32_SHIFT);
}
}
SkString filename;
make_filename(name, &filename);
filename.appendf("_%s.png", config);
SkString path = SkOSPath::SkPathJoin(dir, filename.c_str());
::remove(path.c_str());
SkImageEncoder::EncodeFile(path.c_str(), copy, SkImageEncoder::kPNG_Type, 100);
}
static void performClip(SkCanvas* canvas, int w, int h) {
SkRect r;
r.set(SkIntToScalar(10), SkIntToScalar(10),
SkIntToScalar(w*2/3), SkIntToScalar(h*2/3));
canvas->clipRect(r, SkRegion::kIntersect_Op);
r.set(SkIntToScalar(w/3), SkIntToScalar(h/3),
SkIntToScalar(w-10), SkIntToScalar(h-10));
canvas->clipRect(r, SkRegion::kXOR_Op);
}
static void performRotate(SkCanvas* canvas, int w, int h) {
const SkScalar x = SkIntToScalar(w) / 2;
const SkScalar y = SkIntToScalar(h) / 2;
canvas->translate(x, y);
canvas->rotate(SkIntToScalar(35));
canvas->translate(-x, -y);
}
static void performScale(SkCanvas* canvas, int w, int h) {
const SkScalar x = SkIntToScalar(w) / 2;
const SkScalar y = SkIntToScalar(h) / 2;
canvas->translate(x, y);
// just enough so we can't take the sprite case
canvas->scale(SK_Scalar1 * 99/100, SK_Scalar1 * 99/100);
canvas->translate(-x, -y);
}
enum Backend {
kNonRendering_Backend,
kRaster_Backend,
kGPU_Backend,
kPDF_Backend,
};
static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
Backend backend, int sampleCount, GrContext* context) {
SkBaseDevice* device = NULL;
SkBitmap bitmap;
bitmap.setConfig(config, size.fX, size.fY);
switch (backend) {
case kRaster_Backend:
bitmap.allocPixels();
erase(bitmap);
device = SkNEW_ARGS(SkBitmapDevice, (bitmap));
break;
#if SK_SUPPORT_GPU
case kGPU_Backend: {
GrTextureDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
desc.fWidth = size.fX;
desc.fHeight = size.fY;
desc.fSampleCnt = sampleCount;
SkAutoTUnref<GrTexture> texture(context->createUncachedTexture(desc, NULL, 0));
if (!texture) {
return NULL;
}
device = SkNEW_ARGS(SkGpuDevice, (context, texture.get()));
break;
}
#endif
case kPDF_Backend:
default:
SkDEBUGFAIL("unsupported");
}
return device;
}
#if SK_SUPPORT_GPU
GrContextFactory gContextFactory;
typedef GrContextFactory::GLContextType GLContextType;
static const GLContextType kNative = GrContextFactory::kNative_GLContextType;
#if SK_ANGLE
static const GLContextType kANGLE = GrContextFactory::kANGLE_GLContextType;
#else
static const GLContextType kANGLE = kNative;
#endif
static const GLContextType kDebug = GrContextFactory::kDebug_GLContextType;
static const GLContextType kNull = GrContextFactory::kNull_GLContextType;
#else
typedef int GLContextType;
static const GLContextType kNative = 0, kANGLE = 0, kDebug = 0, kNull = 0;
#endif
#ifdef SK_DEBUG
static const bool kIsDebug = true;
#else
static const bool kIsDebug = false;
#endif
static const struct Config {
SkBitmap::Config config;
const char* name;
int sampleCount;
Backend backend;
GLContextType contextType;
bool runByDefault;
} gConfigs[] = {
{ SkBitmap::kNo_Config, "NONRENDERING", 0, kNonRendering_Backend, kNative, true},
{ SkBitmap::kARGB_8888_Config, "8888", 0, kRaster_Backend, kNative, true},
{ SkBitmap::kRGB_565_Config, "565", 0, kRaster_Backend, kNative, true},
#if SK_SUPPORT_GPU
{ SkBitmap::kARGB_8888_Config, "GPU", 0, kGPU_Backend, kNative, true},
{ SkBitmap::kARGB_8888_Config, "MSAA4", 4, kGPU_Backend, kNative, false},
{ SkBitmap::kARGB_8888_Config, "MSAA16", 16, kGPU_Backend, kNative, false},
#if SK_ANGLE
{ SkBitmap::kARGB_8888_Config, "ANGLE", 0, kGPU_Backend, kANGLE, true},
#endif // SK_ANGLE
{ SkBitmap::kARGB_8888_Config, "Debug", 0, kGPU_Backend, kDebug, kIsDebug},
{ SkBitmap::kARGB_8888_Config, "NULLGPU", 0, kGPU_Backend, kNull, true},
#endif // SK_SUPPORT_GPU
};
DEFINE_string(outDir, "", "If given, image of each bench will be put in outDir.");
DEFINE_string(timers, "cg", "Timers to display. "
"Options: w(all) W(all, truncated) c(pu) C(pu, truncated) g(pu)");
DEFINE_bool(rotate, false, "Rotate canvas before bench run?");
DEFINE_bool(scale, false, "Scale canvas before bench run?");
DEFINE_bool(clip, false, "Clip canvas before bench run?");
DEFINE_bool(forceAA, true, "Force anti-aliasing?");
DEFINE_bool(forceFilter, false, "Force bitmap filtering?");
DEFINE_string(forceDither, "default", "Force dithering: true, false, or default?");
DEFINE_bool(forceBlend, false, "Force alpha blending?");
DEFINE_int32(gpuCacheBytes, -1, "GPU cache size limit in bytes. 0 to disable cache.");
DEFINE_int32(gpuCacheCount, -1, "GPU cache size limit in object count. 0 to disable cache.");
DEFINE_string(match, "", "[~][^]substring[$] [...] of test name to run.\n"
"Multiple matches may be separated by spaces.\n"
"~ causes a matching test to always be skipped\n"
"^ requires the start of the test to match\n"
"$ requires the end of the test to match\n"
"^ and $ requires an exact match\n"
"If a test does not match any list entry,\n"
"it is skipped unless some list entry starts with ~\n");
DEFINE_string(mode, "normal",
"normal: draw to a normal canvas;\n"
"deferred: draw to a deferred canvas;\n"
"deferredSilent: deferred with silent playback;\n"
"record: draw to an SkPicture;\n"
"picturerecord: draw from an SkPicture to an SkPicture.\n");
DEFINE_string(config, kDefaultsConfigStr,
"Run configs given. By default, runs the configs marked \"runByDefault\" in gConfigs.");
DEFINE_string(logFile, "", "Also write stdout here.");
DEFINE_int32(minMs, 20, "Shortest time we'll allow a benchmark to run.");
DEFINE_int32(maxMs, 4000, "Longest time we'll allow a benchmark to run.");
DEFINE_double(error, 0.01,
"Ratio of subsequent bench measurements must drop within 1±error to converge.");
DEFINE_string(timeFormat, "%9.2f", "Format to print results, in milliseconds per 1000 loops.");
DEFINE_bool2(verbose, v, false, "Print more.");
// Has this bench converged? First arguments are milliseconds / loop iteration,
// last is overall runtime in milliseconds.
static bool HasConverged(double prevPerLoop, double currPerLoop, double currRaw) {
if (currRaw < FLAGS_minMs) {
return false;
}
const double low = 1 - FLAGS_error, high = 1 + FLAGS_error;
const double ratio = currPerLoop / prevPerLoop;
return low < ratio && ratio < high;
}
int tool_main(int argc, char** argv);
int tool_main(int argc, char** argv) {
#if SK_ENABLE_INST_COUNT
gPrintInstCount = true;
#endif
SkAutoGraphics ag;
SkCommandLineFlags::Parse(argc, argv);
// First, parse some flags.
SkBenchLogger logger;
if (FLAGS_logFile.count()) {
logger.SetLogFile(FLAGS_logFile[0]);
}
const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF;
SkTriState::State dither = SkTriState::kDefault;
for (size_t i = 0; i < 3; i++) {
if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) {
dither = static_cast<SkTriState::State>(i);
}
}
BenchMode benchMode = kNormal_BenchMode;
for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) {
if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) {
benchMode = static_cast<BenchMode>(i);
}
}
SkTDArray<int> configs;
bool runDefaultConfigs = false;
// Try user-given configs first.
for (int i = 0; i < FLAGS_config.count(); i++) {
for (size_t j = 0; j < SK_ARRAY_COUNT(gConfigs); j++) {
if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) {
*configs.append() = j;
} else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) {
runDefaultConfigs = true;
}
}
}
// If there weren't any, fill in with defaults.
if (runDefaultConfigs) {
for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
if (gConfigs[i].runByDefault) {
*configs.append() = i;
}
}
}
// Filter out things we can't run.
if (kNormal_BenchMode != benchMode) {
// Non-rendering configs only run in normal mode
for (int i = 0; i < configs.count(); ++i) {
const Config& config = gConfigs[configs[i]];
if (kNonRendering_Backend == config.backend) {
configs.remove(i, 1);
--i;
}
}
}
#if SK_SUPPORT_GPU
for (int i = 0; i < configs.count(); ++i) {
const Config& config = gConfigs[configs[i]];
if (kGPU_Backend == config.backend) {
GrContext* context = gContextFactory.get(config.contextType);
if (NULL == context) {
logger.logError(SkStringPrintf(
"Error creating GrContext for config %s. Config will be skipped.\n",
config.name));
configs.remove(i);
--i;
continue;
}
if (config.sampleCount > context->getMaxSampleCount()){
logger.logError(SkStringPrintf(
"Sample count (%d) for config %s is unsupported. Config will be skipped.\n",
config.sampleCount, config.name));
configs.remove(i);
--i;
continue;
}
}
}
#endif
// All flags should be parsed now. Report our settings.
if (kIsDebug) {
logger.logError("bench was built in Debug mode, so we're going to hide the times."
" It's for your own good!\n");
}
SkString str("skia bench:");
str.appendf(" mode=%s", FLAGS_mode[0]);
str.appendf(" alpha=0x%02X antialias=%d filter=%d dither=%s",
alpha, FLAGS_forceAA, FLAGS_forceFilter, SkTriState::Name[dither]);
str.appendf(" rotate=%d scale=%d clip=%d", FLAGS_rotate, FLAGS_scale, FLAGS_clip);
#if defined(SK_SCALAR_IS_FIXED)
str.append(" scalar=fixed");
#else
str.append(" scalar=float");
#endif
#if defined(SK_BUILD_FOR_WIN32)
str.append(" system=WIN32");
#elif defined(SK_BUILD_FOR_MAC)
str.append(" system=MAC");
#elif defined(SK_BUILD_FOR_ANDROID)
str.append(" system=ANDROID");
#elif defined(SK_BUILD_FOR_UNIX)
str.append(" system=UNIX");
#else
str.append(" system=other");
#endif
#if defined(SK_DEBUG)
str.append(" DEBUG");
#endif
str.append("\n");
logger.logProgress(str);
// Set texture cache limits if non-default.
for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
#if SK_SUPPORT_GPU
const Config& config = gConfigs[i];
if (kGPU_Backend != config.backend) {
continue;
}
GrContext* context = gContextFactory.get(config.contextType);
if (NULL == context) {
continue;
}
size_t bytes;
int count;
context->getTextureCacheLimits(&count, &bytes);
if (-1 != FLAGS_gpuCacheBytes) {
bytes = static_cast<size_t>(FLAGS_gpuCacheBytes);
}
if (-1 != FLAGS_gpuCacheCount) {
count = FLAGS_gpuCacheCount;
}
context->setTextureCacheLimits(count, bytes);
#endif
}
// Find the longest name of the benches we're going to run to make the output pretty.
Iter names;
SkBenchmark* bench;
int longestName = 0;
while ((bench = names.next()) != NULL) {
SkAutoTUnref<SkBenchmark> benchUnref(bench);
if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
continue;
}
const int length = strlen(bench->getName());
longestName = length > longestName ? length : longestName;
}
// Run each bench in each configuration it supports and we asked for.
Iter iter;
while ((bench = iter.next()) != NULL) {
SkAutoTUnref<SkBenchmark> benchUnref(bench);
if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
continue;
}
bench->setForceAlpha(alpha);
bench->setForceAA(FLAGS_forceAA);
bench->setForceFilter(FLAGS_forceFilter);
bench->setDither(dither);
AutoPrePostDraw appd(bench);
bool loggedBenchName = false;
for (int i = 0; i < configs.count(); ++i) {
const int configIndex = configs[i];
const Config& config = gConfigs[configIndex];
if ((kNonRendering_Backend == config.backend) == bench->isRendering()) {
continue;
}
GrContext* context = NULL;
#if SK_SUPPORT_GPU
SkGLContextHelper* glContext = NULL;
if (kGPU_Backend == config.backend) {
context = gContextFactory.get(config.contextType);
if (NULL == context) {
continue;
}
glContext = gContextFactory.getGLContext(config.contextType);
}
#endif
SkAutoTUnref<SkBaseDevice> device;
SkAutoTUnref<SkCanvas> canvas;
SkPicture recordFrom, recordTo;
const SkIPoint dim = bench->getSize();
const SkPicture::RecordingFlags kRecordFlags =
SkPicture::kUsePathBoundsForClip_RecordingFlag;
if (kNonRendering_Backend != config.backend) {
device.reset(make_device(config.config,
dim,
config.backend,
config.sampleCount,
context));
if (!device.get()) {
logger.logError(SkStringPrintf(
"Device creation failure for config %s. Will skip.\n", config.name));
continue;
}
switch(benchMode) {
case kDeferredSilent_BenchMode:
case kDeferred_BenchMode:
canvas.reset(SkDeferredCanvas::Create(device.get()));
break;
case kRecord_BenchMode:
canvas.reset(SkRef(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags)));
break;
case kPictureRecord_BenchMode:
bench->draw(recordFrom.beginRecording(dim.fX, dim.fY, kRecordFlags));
recordFrom.endRecording();
canvas.reset(SkRef(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags)));
break;
case kNormal_BenchMode:
canvas.reset(new SkCanvas(device.get()));
break;
default:
SkASSERT(false);
}
}
if (NULL != canvas) {
canvas->clear(SK_ColorWHITE);
if (FLAGS_clip) { performClip(canvas, dim.fX, dim.fY); }
if (FLAGS_scale) { performScale(canvas, dim.fX, dim.fY); }
if (FLAGS_rotate) { performRotate(canvas, dim.fX, dim.fY); }
}
if (!loggedBenchName) {
loggedBenchName = true;
SkString str;
str.printf("running bench [%3d %3d] %*s ",
dim.fX, dim.fY, longestName, bench->getName());
logger.logProgress(str);
}
#if SK_SUPPORT_GPU
SkGLContextHelper* contextHelper = NULL;
if (kGPU_Backend == config.backend) {
contextHelper = gContextFactory.getGLContext(config.contextType);
}
BenchTimer timer(contextHelper);
#else
BenchTimer timer;
#endif
double previous = std::numeric_limits<double>::infinity();
bool converged = false;
bench->setLoops(0);
if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); }
do {
// Ramp up 1 -> 4 -> 16 -> ... -> ~1 billion.
const int loops = bench->getLoops();
if (loops >= (1<<30) || timer.fWall > FLAGS_maxMs) {
// If you find it takes more than a billion loops to get up to 20ms of runtime,
// you've got a computer clocked at several THz or have a broken benchmark. ;)
// "1B ought to be enough for anybody."
logger.logError(SkStringPrintf(
"Can't get %s %s to converge in %dms.\n",
bench->getName(), config.name, FLAGS_maxMs));
break;
}
bench->setLoops(loops == 0 ? 1 : loops * 2);
if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) {
// Clear the recorded commands so that they do not accumulate.
canvas.reset(recordTo.beginRecording(dim.fX, dim.fY, kRecordFlags));
}
timer.start();
if (NULL != canvas) {
canvas->save();
}
if (benchMode == kPictureRecord_BenchMode) {
recordFrom.draw(canvas);
} else {
bench->draw(canvas);
}
if (kDeferredSilent_BenchMode == benchMode) {
static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush();
} else if (NULL != canvas) {
canvas->flush();
}
if (NULL != canvas) {
canvas->restore();
}
// Stop truncated timers before GL calls complete, and stop the full timers after.
timer.truncatedEnd();
#if SK_SUPPORT_GPU
if (NULL != glContext) {
context->flush();
SK_GL(*glContext, Finish());
}
#endif
timer.end();
const double current = timer.fWall / bench->getLoops();
if (FLAGS_verbose && current > previous) { SkDebugf(""); }
if (FLAGS_verbose) { SkDebugf("%.3g ", current); }
converged = HasConverged(previous, current, timer.fWall);
previous = current;
} while (!kIsDebug && !converged);
if (FLAGS_verbose) { SkDebugf("\n"); }
if (FLAGS_outDir.count() && kNonRendering_Backend != config.backend) {
saveFile(bench->getName(),
config.name,
FLAGS_outDir[0],
device->accessBitmap(false));
}
if (kIsDebug) {
// Let's not mislead ourselves by looking at Debug build bench times!
continue;
}
// Normalize to ms per 1000 iterations.
const double normalize = 1000.0 / bench->getLoops();
const struct { char shortName; const char* longName; double ms; } times[] = {
{'w', "msecs", normalize * timer.fWall},
{'W', "Wmsecs", normalize * timer.fTruncatedWall},
{'c', "cmsecs", normalize * timer.fCpu},
{'C', "Cmsecs", normalize * timer.fTruncatedCpu},
{'g', "gmsecs", normalize * timer.fGpu},
};
SkString result;
result.appendf(" %s:", config.name);
for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) {
if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) {
result.appendf(" %s = ", times[i].longName);
result.appendf(FLAGS_timeFormat[0], times[i].ms);
}
}
logger.logProgress(result);
}
if (loggedBenchName) {
logger.logProgress("\n");
}
}
#if SK_SUPPORT_GPU
gContextFactory.destroyContexts();
#endif
return 0;
}
#if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL)
int main(int argc, char * const argv[]) {
return tool_main(argc, (char**) argv);
}
#endif