Create module system for VisualBench

The newly created VisualLightweightBenchModule is just the old VisualBench.cpp, but gutted to only include timing code.

Future CLs will harden this abstraction, but for this CL the module owns a backpointer to VisualBench.cpp for a couple of calls.

BUG=skia:

Review URL: https://codereview.chromium.org/1304083007
This commit is contained in:
joshualitt 2015-09-08 07:08:11 -07:00 committed by Commit bot
parent 8fd97eab1d
commit 189aef7834
8 changed files with 421 additions and 295 deletions

View File

@ -24,12 +24,8 @@
],
'sources': [
'../gm/gm.cpp',
'../tools/VisualBench/VisualBench.h',
'../tools/VisualBench/VisualBench.cpp',
'../tools/VisualBench/VisualBenchmarkStream.h',
'../tools/VisualBench/VisualBenchmarkStream.cpp',
'../tools/VisualBench/VisualSKPBench.h',
'../tools/VisualBench/VisualSKPBench.cpp',
'<!@(python find.py ../tools/VisualBench "*.cpp")',
'<!@(python find.py ../tools/VisualBench "*.h")',
'<!@(python find.py ../bench "*.cpp")',
],
'sources!': [

View File

@ -12,72 +12,23 @@
#include "SkApplication.h"
#include "SkCanvas.h"
#include "SkCommandLineFlags.h"
#include "SkForceLinking.h"
#include "SkGraphics.h"
#include "SkGr.h"
#include "SkImageDecoder.h"
#include "SkOSFile.h"
#include "SkStream.h"
#include "Stats.h"
#include "VisualLightweightBenchModule.h"
#include "gl/GrGLInterface.h"
__SK_FORCE_IMAGE_DECODER_LINKING;
// Between samples we reset context
// Between frames we swap buffers
// Between flushes we call flush on GrContext
DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allows to lag.");
DEFINE_int32(samples, 10, "Number of times to time each skp.");
DEFINE_int32(frames, 5, "Number of frames of each skp to render per sample.");
DEFINE_double(loopMs, 5, "Target loop time in millseconds.");
DEFINE_int32(msaa, 0, "Number of msaa samples.");
DEFINE_bool2(fullscreen, f, true, "Run fullscreen.");
DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
DEFINE_string(key, "", ""); // dummy to enable gm tests that have platform-specific names
DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
DEFINE_string(properties, "",
"Space-separated key/value pairs to add to JSON identifying this run.");
static SkString humanize(double ms) {
if (FLAGS_verbose) {
return SkStringPrintf("%llu", (uint64_t)(ms*1e6));
}
return HumanizeMs(ms);
}
#define HUMANIZE(time) humanize(time).c_str()
VisualBench::VisualBench(void* hwnd, int argc, char** argv)
: INHERITED(hwnd)
, fCurrentSample(0)
, fCurrentFrame(0)
, fLoops(1)
, fState(kPreWarmLoops_State)
, fBenchmark(nullptr)
, fResults(new ResultsWriter) {
, fModule(new VisualLightweightBenchModule(this)) {
SkCommandLineFlags::Parse(argc, argv);
this->setTitle();
this->setupBackend();
fBenchmarkStream.reset(new VisualBenchmarkStream);
// Print header
SkDebugf("curr/maxrss\tloops\tflushes\tmin\tmedian\tmean\tmax\tstddev\tbench\n");
// setup json logging if required
if (!FLAGS_outResultsFile.isEmpty()) {
fResults.reset(new NanoJSONResultsWriter(FLAGS_outResultsFile[0]));
}
if (1 == FLAGS_properties.count() % 2) {
SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
} else {
for (int i = 1; i < FLAGS_properties.count(); i += 2) {
fResults->property(FLAGS_properties[i - 1], FLAGS_properties[i]);
}
}
}
VisualBench::~VisualBench() {
@ -140,192 +91,13 @@ void VisualBench::setupRenderTarget() {
}
}
inline void VisualBench::renderFrame(SkCanvas* canvas) {
fBenchmark->draw(fLoops, canvas);
canvas->flush();
INHERITED::present();
}
void VisualBench::printStats() {
const SkTArray<double>& measurements = fRecords.back().fMeasurements;
const char* shortName = fBenchmark->getUniqueName();
// update log
// Note: We currently log only the minimum. It would be interesting to log more information
SkString configName;
if (FLAGS_msaa > 0) {
configName.appendf("msaa_%d", FLAGS_msaa);
} else {
configName.appendf("gpu");
}
fResults->config(configName.c_str());
fResults->configOption("name", fBenchmark->getUniqueName());
SkASSERT(measurements.count());
Stats stats(measurements);
fResults->metric("min_ms", stats.min);
// Print output
if (FLAGS_verbose) {
for (int i = 0; i < measurements.count(); i++) {
SkDebugf("%s ", HUMANIZE(measurements[i]));
}
SkDebugf("%s\n", shortName);
} else {
const double stdDevPercent = 100 * sqrt(stats.var) / stats.mean;
SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\n",
sk_tools::getCurrResidentSetSizeMB(),
sk_tools::getMaxResidentSetSizeMB(),
fLoops,
HUMANIZE(stats.min),
HUMANIZE(stats.median),
HUMANIZE(stats.mean),
HUMANIZE(stats.max),
stdDevPercent,
shortName);
}
}
bool VisualBench::advanceRecordIfNecessary(SkCanvas* canvas) {
if (fBenchmark) {
return true;
}
fBenchmark.reset(fBenchmarkStream->next());
if (!fBenchmark) {
return false;
}
canvas->clear(0xffffffff);
fBenchmark->preDraw();
fRecords.push_back();
// Log bench name
fResults->bench(fBenchmark->getUniqueName(), fBenchmark->getSize().fX,
fBenchmark->getSize().fY);
return true;
}
inline void VisualBench::nextState(State nextState) {
fState = nextState;
}
void VisualBench::perCanvasPreDraw(SkCanvas* canvas, State nextState) {
fBenchmark->perCanvasPreDraw(canvas);
fCurrentFrame = 0;
this->nextState(nextState);
}
void VisualBench::preWarm(State nextState) {
if (fCurrentFrame >= FLAGS_gpuFrameLag) {
// we currently time across all frames to make sure we capture all GPU work
this->nextState(nextState);
fCurrentFrame = 0;
fTimer.start();
} else {
fCurrentFrame++;
}
}
void VisualBench::draw(SkCanvas* canvas) {
if (!this->advanceRecordIfNecessary(canvas)) {
SkDebugf("Exiting VisualBench successfully\n");
this->closeWindow();
return;
}
this->renderFrame(canvas);
switch (fState) {
case kPreWarmLoopsPerCanvasPreDraw_State: {
this->perCanvasPreDraw(canvas, kPreWarmLoops_State);
break;
}
case kPreWarmLoops_State: {
this->preWarm(kTuneLoops_State);
break;
}
case kTuneLoops_State: {
this->tuneLoops();
break;
}
case kPreWarmTimingPerCanvasPreDraw_State: {
this->perCanvasPreDraw(canvas, kPreWarmTiming_State);
break;
}
case kPreWarmTiming_State: {
this->preWarm(kTiming_State);
break;
}
case kTiming_State: {
this->timing(canvas);
break;
}
}
fModule->draw(canvas);
// Invalidate the window to force a redraw. Poor man's animation mechanism.
this->inval(nullptr);
}
inline double VisualBench::elapsed() {
fTimer.end();
return fTimer.fWall;
}
void VisualBench::resetTimingState() {
fCurrentFrame = 0;
fTimer = WallTimer();
this->resetContext();
}
void VisualBench::scaleLoops(double elapsedMs) {
// Scale back the number of loops
fLoops = (int)ceil(fLoops * FLAGS_loopMs / elapsedMs);
}
inline void VisualBench::tuneLoops() {
if (1 << 30 == fLoops) {
// We're about to wrap. Something's wrong with the bench.
SkDebugf("InnerLoops wrapped\n");
fLoops = 0;
} else {
double elapsedMs = this->elapsed();
if (elapsedMs > FLAGS_loopMs) {
this->scaleLoops(elapsedMs);
this->nextState(kPreWarmTimingPerCanvasPreDraw_State);
} else {
fLoops *= 2;
this->nextState(kPreWarmLoops_State);
}
this->resetTimingState();
}
}
void VisualBench::recordMeasurement() {
double measurement = this->elapsed() / (FLAGS_frames * fLoops);
fRecords.back().fMeasurements.push_back(measurement);
}
void VisualBench::postDraw(SkCanvas* canvas) {
fBenchmark->perCanvasPostDraw(canvas);
fBenchmark.reset(nullptr);
fCurrentSample = 0;
fLoops = 1;
}
inline void VisualBench::timing(SkCanvas* canvas) {
if (fCurrentFrame >= FLAGS_frames) {
this->recordMeasurement();
if (fCurrentSample++ >= FLAGS_samples) {
this->printStats();
this->postDraw(canvas);
this->nextState(kPreWarmLoopsPerCanvasPreDraw_State);
} else {
this->nextState(kPreWarmTimingPerCanvasPreDraw_State);
}
this->resetTimingState();
} else {
fCurrentFrame++;
}
}
void VisualBench::onSizeChange() {
this->setupRenderTarget();
}

View File

@ -11,12 +11,11 @@
#include "SkWindow.h"
#include "ResultsWriter.h"
#include "SkPicture.h"
#include "SkString.h"
#include "SkSurface.h"
#include "Timer.h"
#include "VisualBenchmarkStream.h"
#include "VisualFlags.h"
#include "VisualModule.h"
#include "gl/SkGLContext.h"
class GrContext;
@ -32,6 +31,8 @@ public:
VisualBench(void* hwnd, int argc, char** argv);
~VisualBench() override;
void reset() { this->resetContext(); }
protected:
SkSurface* createSurface() override;
@ -40,73 +41,19 @@ protected:
void onSizeChange() override;
private:
/*
* The heart of visual bench is an event driven timing loop.
* kPreWarmLoopsPerCanvasPreDraw_State: Before we begin timing, Benchmarks have a hook to
* access the canvas. Then we prewarm before the autotune
* loops step.
* kPreWarmLoops_State: We prewarm the gpu before auto tuning to enter a steady
* work state
* kTuneLoops_State: Then we tune the loops of the benchmark to ensure we
* are doing a measurable amount of work
* kPreWarmTimingPerCanvasPreDraw_State: Because reset the context after tuning loops to ensure
* coherent state, we need to give the benchmark
* another hook
* kPreWarmTiming_State: We prewarm the gpu again to enter a steady state
* kTiming_State: Finally we time the benchmark. When finished timing
* if we have enough samples then we'll start the next
* benchmark in the kPreWarmLoopsPerCanvasPreDraw_State.
* otherwise, we enter the
* kPreWarmTimingPerCanvasPreDraw_State for another sample
* In either case we reset the context.
*/
enum State {
kPreWarmLoopsPerCanvasPreDraw_State,
kPreWarmLoops_State,
kTuneLoops_State,
kPreWarmTimingPerCanvasPreDraw_State,
kPreWarmTiming_State,
kTiming_State,
};
void setTitle();
bool setupBackend();
void resetContext();
void setupRenderTarget();
bool onHandleChar(SkUnichar unichar) override;
void printStats();
bool advanceRecordIfNecessary(SkCanvas*);
inline void renderFrame(SkCanvas*);
inline void nextState(State);
void perCanvasPreDraw(SkCanvas*, State);
void preWarm(State nextState);
void scaleLoops(double elapsedMs);
inline void tuneLoops();
inline void timing(SkCanvas*);
inline double elapsed();
void resetTimingState();
void postDraw(SkCanvas*);
void recordMeasurement();
struct Record {
SkTArray<double> fMeasurements;
};
int fCurrentSample;
int fCurrentFrame;
int fLoops;
SkTArray<Record> fRecords;
WallTimer fTimer;
State fState;
SkAutoTDelete<VisualBenchmarkStream> fBenchmarkStream;
SkAutoTUnref<Benchmark> fBenchmark;
// support framework
SkAutoTDelete<VisualModule> fModule;
SkAutoTUnref<SkSurface> fSurface;
SkAutoTUnref<GrContext> fContext;
SkAutoTUnref<GrRenderTarget> fRenderTarget;
AttachmentInfo fAttachmentInfo;
SkAutoTUnref<const GrGLInterface> fInterface;
SkAutoTDelete<ResultsWriter> fResults;
typedef SkOSWindow INHERITED;
};

View File

@ -0,0 +1,10 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "VisualFlags.h"
DEFINE_int32(msaa, 0, "Number of msaa samples.");

View File

@ -0,0 +1,15 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef VisualFlags_DEFINED
#define VisualFlags_DEFINED
#include "SkCommandLineFlags.h"
DECLARE_int32(msaa);
#endif

View File

@ -0,0 +1,256 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
*/
#include "VisualLightweightBenchModule.h"
#include "ProcStats.h"
#include "SkApplication.h"
#include "SkCanvas.h"
#include "SkCommandLineFlags.h"
#include "SkForceLinking.h"
#include "SkGraphics.h"
#include "SkGr.h"
#include "SkImageDecoder.h"
#include "SkOSFile.h"
#include "SkStream.h"
#include "Stats.h"
#include "gl/GrGLInterface.h"
__SK_FORCE_IMAGE_DECODER_LINKING;
// Between samples we reset context
// Between frames we swap buffers
DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allows to lag.");
DEFINE_int32(samples, 10, "Number of times to time each skp.");
DEFINE_int32(frames, 5, "Number of frames of each skp to render per sample.");
DEFINE_double(loopMs, 5, "Target loop time in millseconds.");
DEFINE_bool2(verbose, v, false, "enable verbose output from the test driver.");
DEFINE_string(key, "", ""); // dummy to enable gm tests that have platform-specific names
DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
DEFINE_string(properties, "",
"Space-separated key/value pairs to add to JSON identifying this run.");
static SkString humanize(double ms) {
if (FLAGS_verbose) {
return SkStringPrintf("%llu", (uint64_t)(ms*1e6));
}
return HumanizeMs(ms);
}
#define HUMANIZE(time) humanize(time).c_str()
VisualLightweightBenchModule::VisualLightweightBenchModule(VisualBench* owner)
: fCurrentSample(0)
, fCurrentFrame(0)
, fLoops(1)
, fState(kPreWarmLoops_State)
, fBenchmark(nullptr)
, fOwner(SkRef(owner))
, fResults(new ResultsWriter) {
fBenchmarkStream.reset(new VisualBenchmarkStream);
// Print header
SkDebugf("curr/maxrss\tloops\tflushes\tmin\tmedian\tmean\tmax\tstddev\tbench\n");
// setup json logging if required
if (!FLAGS_outResultsFile.isEmpty()) {
fResults.reset(new NanoJSONResultsWriter(FLAGS_outResultsFile[0]));
}
if (1 == FLAGS_properties.count() % 2) {
SkDebugf("ERROR: --properties must be passed with an even number of arguments.\n");
} else {
for (int i = 1; i < FLAGS_properties.count(); i += 2) {
fResults->property(FLAGS_properties[i - 1], FLAGS_properties[i]);
}
}
}
inline void VisualLightweightBenchModule::renderFrame(SkCanvas* canvas) {
fBenchmark->draw(fLoops, canvas);
canvas->flush();
fOwner->present();
}
void VisualLightweightBenchModule::printStats() {
const SkTArray<double>& measurements = fRecords.back().fMeasurements;
const char* shortName = fBenchmark->getUniqueName();
// update log
// Note: We currently log only the minimum. It would be interesting to log more information
SkString configName;
if (FLAGS_msaa > 0) {
configName.appendf("msaa_%d", FLAGS_msaa);
} else {
configName.appendf("gpu");
}
fResults->config(configName.c_str());
fResults->configOption("name", fBenchmark->getUniqueName());
SkASSERT(measurements.count());
Stats stats(measurements);
fResults->metric("min_ms", stats.min);
// Print output
if (FLAGS_verbose) {
for (int i = 0; i < measurements.count(); i++) {
SkDebugf("%s ", HUMANIZE(measurements[i]));
}
SkDebugf("%s\n", shortName);
} else {
const double stdDevPercent = 100 * sqrt(stats.var) / stats.mean;
SkDebugf("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\n",
sk_tools::getCurrResidentSetSizeMB(),
sk_tools::getMaxResidentSetSizeMB(),
fLoops,
HUMANIZE(stats.min),
HUMANIZE(stats.median),
HUMANIZE(stats.mean),
HUMANIZE(stats.max),
stdDevPercent,
shortName);
}
}
bool VisualLightweightBenchModule::advanceRecordIfNecessary(SkCanvas* canvas) {
if (fBenchmark) {
return true;
}
fBenchmark.reset(fBenchmarkStream->next());
if (!fBenchmark) {
return false;
}
canvas->clear(0xffffffff);
fBenchmark->preDraw();
fRecords.push_back();
// Log bench name
fResults->bench(fBenchmark->getUniqueName(), fBenchmark->getSize().fX,
fBenchmark->getSize().fY);
return true;
}
inline void VisualLightweightBenchModule::nextState(State nextState) {
fState = nextState;
}
void VisualLightweightBenchModule::perCanvasPreDraw(SkCanvas* canvas, State nextState) {
fBenchmark->perCanvasPreDraw(canvas);
fCurrentFrame = 0;
this->nextState(nextState);
}
void VisualLightweightBenchModule::preWarm(State nextState) {
if (fCurrentFrame >= FLAGS_gpuFrameLag) {
// we currently time across all frames to make sure we capture all GPU work
this->nextState(nextState);
fCurrentFrame = 0;
fTimer.start();
} else {
fCurrentFrame++;
}
}
void VisualLightweightBenchModule::draw(SkCanvas* canvas) {
if (!this->advanceRecordIfNecessary(canvas)) {
SkDebugf("Exiting VisualBench successfully\n");
fOwner->closeWindow();
return;
}
this->renderFrame(canvas);
switch (fState) {
case kPreWarmLoopsPerCanvasPreDraw_State: {
this->perCanvasPreDraw(canvas, kPreWarmLoops_State);
break;
}
case kPreWarmLoops_State: {
this->preWarm(kTuneLoops_State);
break;
}
case kTuneLoops_State: {
this->tuneLoops();
break;
}
case kPreWarmTimingPerCanvasPreDraw_State: {
this->perCanvasPreDraw(canvas, kPreWarmTiming_State);
break;
}
case kPreWarmTiming_State: {
this->preWarm(kTiming_State);
break;
}
case kTiming_State: {
this->timing(canvas);
break;
}
}
}
inline double VisualLightweightBenchModule::elapsed() {
fTimer.end();
return fTimer.fWall;
}
void VisualLightweightBenchModule::resetTimingState() {
fCurrentFrame = 0;
fTimer = WallTimer();
fOwner->reset();
}
void VisualLightweightBenchModule::scaleLoops(double elapsedMs) {
// Scale back the number of loops
fLoops = (int)ceil(fLoops * FLAGS_loopMs / elapsedMs);
}
inline void VisualLightweightBenchModule::tuneLoops() {
if (1 << 30 == fLoops) {
// We're about to wrap. Something's wrong with the bench.
SkDebugf("InnerLoops wrapped\n");
fLoops = 0;
} else {
double elapsedMs = this->elapsed();
if (elapsedMs > FLAGS_loopMs) {
this->scaleLoops(elapsedMs);
this->nextState(kPreWarmTimingPerCanvasPreDraw_State);
} else {
fLoops *= 2;
this->nextState(kPreWarmLoops_State);
}
this->resetTimingState();
}
}
void VisualLightweightBenchModule::recordMeasurement() {
double measurement = this->elapsed() / (FLAGS_frames * fLoops);
fRecords.back().fMeasurements.push_back(measurement);
}
void VisualLightweightBenchModule::postDraw(SkCanvas* canvas) {
fBenchmark->perCanvasPostDraw(canvas);
fBenchmark.reset(nullptr);
fCurrentSample = 0;
fLoops = 1;
}
inline void VisualLightweightBenchModule::timing(SkCanvas* canvas) {
if (fCurrentFrame >= FLAGS_frames) {
this->recordMeasurement();
if (fCurrentSample++ >= FLAGS_samples) {
this->printStats();
this->postDraw(canvas);
this->nextState(kPreWarmLoopsPerCanvasPreDraw_State);
} else {
this->nextState(kPreWarmTimingPerCanvasPreDraw_State);
}
this->resetTimingState();
} else {
fCurrentFrame++;
}
}

View File

@ -0,0 +1,98 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
*/
#ifndef VisualLightweightBenchModule_DEFINED
#define VisualLightweightBenchModule_DEFINED
#include "VisualModule.h"
#include "ResultsWriter.h"
#include "SkPicture.h"
#include "Timer.h"
#include "VisualBench.h"
#include "VisualBenchmarkStream.h"
class SkCanvas;
/*
* This module is designed to be a minimal overhead timing module for VisualBench
*/
class VisualLightweightBenchModule : public VisualModule {
public:
// TODO get rid of backpointer
VisualLightweightBenchModule(VisualBench* owner);
void draw(SkCanvas* canvas) override;
private:
/*
* The heart of visual bench is an event driven timing loop.
* kPreWarmLoopsPerCanvasPreDraw_State: Before we begin timing, Benchmarks have a hook to
* access the canvas. Then we prewarm before the autotune
* loops step.
* kPreWarmLoops_State: We prewarm the gpu before auto tuning to enter a steady
* work state
* kTuneLoops_State: Then we tune the loops of the benchmark to ensure we
* are doing a measurable amount of work
* kPreWarmTimingPerCanvasPreDraw_State: Because reset the context after tuning loops to ensure
* coherent state, we need to give the benchmark
* another hook
* kPreWarmTiming_State: We prewarm the gpu again to enter a steady state
* kTiming_State: Finally we time the benchmark. When finished timing
* if we have enough samples then we'll start the next
* benchmark in the kPreWarmLoopsPerCanvasPreDraw_State.
* otherwise, we enter the
* kPreWarmTimingPerCanvasPreDraw_State for another sample
* In either case we reset the context.
*/
enum State {
kPreWarmLoopsPerCanvasPreDraw_State,
kPreWarmLoops_State,
kTuneLoops_State,
kPreWarmTimingPerCanvasPreDraw_State,
kPreWarmTiming_State,
kTiming_State,
};
void setTitle();
bool setupBackend();
void setupRenderTarget();
void printStats();
bool advanceRecordIfNecessary(SkCanvas*);
inline void renderFrame(SkCanvas*);
inline void nextState(State);
void perCanvasPreDraw(SkCanvas*, State);
void preWarm(State nextState);
void scaleLoops(double elapsedMs);
inline void tuneLoops();
inline void timing(SkCanvas*);
inline double elapsed();
void resetTimingState();
void postDraw(SkCanvas*);
void recordMeasurement();
struct Record {
SkTArray<double> fMeasurements;
};
int fCurrentSample;
int fCurrentFrame;
int fLoops;
SkTArray<Record> fRecords;
WallTimer fTimer;
State fState;
SkAutoTDelete<VisualBenchmarkStream> fBenchmarkStream;
SkAutoTUnref<Benchmark> fBenchmark;
// support framework
SkAutoTUnref<VisualBench> fOwner;
SkAutoTDelete<ResultsWriter> fResults;
typedef VisualModule INHERITED;
};
#endif

View File

@ -0,0 +1,32 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
*/
#ifndef VisualModule_DEFINED
#define VisualModule_DEFINED
#include "SkRefCnt.h"
class SkCanvas;
/*
* VisualModule is the base class for all of the various types of activities VisualBench supports.
*
* The common theme tying these all together is they need to display an image to the screen. Later,
* on we some modules will also be interactive
*/
class VisualModule : public SkRefCnt {
public:
virtual ~VisualModule() {}
virtual void draw(SkCanvas* canvas)=0;
private:
typedef SkRefCnt INHERITED;
};
#endif