Fix alpha textures in NV ES3 contexts on Windows.

Make unit tests iterate over all the rendering GL context types rather than using kNative.

Fix the extension printing when gStartupSpew is set.

R=jvanverth@google.com

Author: bsalomon@google.com

Review URL: https://codereview.chromium.org/398183002
This commit is contained in:
bsalomon 2014-07-17 10:50:59 -07:00 committed by Commit bot
parent e57452debd
commit e904c09a3a
11 changed files with 407 additions and 308 deletions

View File

@ -65,6 +65,8 @@ public:
return fInterface->hasExtension(ext); return fInterface->hasExtension(ext);
} }
const GrGLExtensions& extensions() const { return fInterface->fExtensions; }
/** /**
* Reset the information * Reset the information
*/ */

View File

@ -219,6 +219,9 @@ GrGLVendor GrGLGetVendorFromString(const char* vendorString) {
if (0 == strcmp(vendorString, "Qualcomm")) { if (0 == strcmp(vendorString, "Qualcomm")) {
return kQualcomm_GrGLVendor; return kQualcomm_GrGLVendor;
} }
if (0 == strcmp(vendorString, "NVIDIA Corporation")) {
return kNVIDIA_GrGLVendor;
}
} }
return kOther_GrGLVendor; return kOther_GrGLVendor;
} }

View File

@ -34,6 +34,7 @@ enum GrGLVendor {
kImagination_GrGLVendor, kImagination_GrGLVendor,
kIntel_GrGLVendor, kIntel_GrGLVendor,
kQualcomm_GrGLVendor, kQualcomm_GrGLVendor,
kNVIDIA_GrGLVendor,
kOther_GrGLVendor kOther_GrGLVendor
}; };

View File

@ -137,9 +137,7 @@ GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context)
GrPrintf("------ RENDERER %s\n", renderer); GrPrintf("------ RENDERER %s\n", renderer);
GrPrintf("------ VERSION %s\n", version); GrPrintf("------ VERSION %s\n", version);
GrPrintf("------ EXTENSIONS\n"); GrPrintf("------ EXTENSIONS\n");
#if 0 // TODO: Reenable this after GrGLInterface's extensions can be accessed safely. ctx.extensions().print();
ctx.extensions().print();
#endif
GrPrintf("\n"); GrPrintf("\n");
GrPrintf(this->glCaps().dump().c_str()); GrPrintf(this->glCaps().dump().c_str());
} }
@ -574,13 +572,21 @@ bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc,
} }
GrGLenum internalFormat; GrGLenum internalFormat;
GrGLenum externalFormat; GrGLenum externalFormat = 0x0; // suprress warning
GrGLenum externalType; GrGLenum externalType = 0x0;// suprress warning
// glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
// format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the
// size of the internal format whenever possible and so only use a sized internal format when // size of the internal format whenever possible and so only use a sized internal format when
// using texture storage. // using texture storage.
if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat, bool useSizedFormat = useTexStorage;
// At least some versions of the desktop ES3 drivers for NVIDIA won't accept GL_RED in
// glTexImage2D for the internal format but will accept GL_R8.
if (!useSizedFormat && kNVIDIA_GrGLVendor == this->glContext().vendor() &&
kGLES_GrGLStandard == this->glStandard() && this->glVersion() >= GR_GL_VER(3, 0)) {
useSizedFormat = true;
}
if (!this->configToGLFormats(dataConfig, useSizedFormat, &internalFormat,
&externalFormat, &externalType)) { &externalFormat, &externalType)) {
return false; return false;
} }

View File

@ -681,68 +681,83 @@ static PixelPtr get_surface_ptr(SkSurface* surface, bool useGpu) {
static void TestDeferredCanvasSurface(skiatest::Reporter* reporter, GrContextFactory* factory) { static void TestDeferredCanvasSurface(skiatest::Reporter* reporter, GrContextFactory* factory) {
SkImageInfo imageSpec = SkImageInfo::MakeN32Premul(10, 10); SkImageInfo imageSpec = SkImageInfo::MakeN32Premul(10, 10);
SkSurface* surface;
bool useGpu = NULL != factory; bool useGpu = NULL != factory;
int cnt;
#if SK_SUPPORT_GPU #if SK_SUPPORT_GPU
if (useGpu) { if (useGpu) {
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); cnt = GrContextFactory::kGLContextTypeCnt;
if (NULL == context) {
return;
}
surface = SkSurface::NewRenderTarget(context, imageSpec);
} else { } else {
surface = SkSurface::NewRaster(imageSpec); cnt = 1;
} }
#else #else
SkASSERT(!useGpu); SkASSERT(!useGpu);
surface = SkSurface::NewRaster(imageSpec); cnt = 1;
#endif #endif
SkASSERT(NULL != surface); for (int i = 0; i < cnt; ++i) {
SkAutoTUnref<SkSurface> aur(surface); SkSurface* surface;
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface)); #if SK_SUPPORT_GPU
if (useGpu) {
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
continue;
}
GrContext* context = factory->get(glCtxType);
if (NULL == context) {
return;
}
SkImage* image1 = canvas->newImageSnapshot(); surface = SkSurface::NewRenderTarget(context, imageSpec);
SkAutoTUnref<SkImage> aur_i1(image1); } else
PixelPtr pixels1 = get_surface_ptr(surface, useGpu); #endif
// The following clear would normally trigger a copy on write, but {
// it won't because rendering is deferred. surface = SkSurface::NewRaster(imageSpec);
canvas->clear(SK_ColorBLACK); }
// Obtaining a snapshot directly from the surface (as opposed to the SkASSERT(NULL != surface);
// SkDeferredCanvas) will not trigger a flush of deferred draw operations SkAutoTUnref<SkSurface> aur(surface);
// and will therefore return the same image as the previous snapshot. SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
SkImage* image2 = surface->newImageSnapshot();
SkAutoTUnref<SkImage> aur_i2(image2); SkImage* image1 = canvas->newImageSnapshot();
// Images identical because of deferral SkAutoTUnref<SkImage> aur_i1(image1);
REPORTER_ASSERT(reporter, image1->uniqueID() == image2->uniqueID()); PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
// Now we obtain a snpshot via the deferred canvas, which triggers a flush. // The following clear would normally trigger a copy on write, but
// Because there is a pending clear, this will generate a different image. // it won't because rendering is deferred.
SkImage* image3 = canvas->newImageSnapshot(); canvas->clear(SK_ColorBLACK);
SkAutoTUnref<SkImage> aur_i3(image3); // Obtaining a snapshot directly from the surface (as opposed to the
REPORTER_ASSERT(reporter, image1->uniqueID() != image3->uniqueID()); // SkDeferredCanvas) will not trigger a flush of deferred draw operations
// Verify that backing store is now a different buffer because of copy on // and will therefore return the same image as the previous snapshot.
// write SkImage* image2 = surface->newImageSnapshot();
PixelPtr pixels2 = get_surface_ptr(surface, useGpu); SkAutoTUnref<SkImage> aur_i2(image2);
REPORTER_ASSERT(reporter, pixels1 != pixels2); // Images identical because of deferral
// Verify copy-on write with a draw operation that gets deferred by REPORTER_ASSERT(reporter, image1->uniqueID() == image2->uniqueID());
// the in order draw buffer. // Now we obtain a snpshot via the deferred canvas, which triggers a flush.
SkPaint paint; // Because there is a pending clear, this will generate a different image.
canvas->drawPaint(paint); SkImage* image3 = canvas->newImageSnapshot();
SkImage* image4 = canvas->newImageSnapshot(); // implicit flush SkAutoTUnref<SkImage> aur_i3(image3);
SkAutoTUnref<SkImage> aur_i4(image4); REPORTER_ASSERT(reporter, image1->uniqueID() != image3->uniqueID());
REPORTER_ASSERT(reporter, image4->uniqueID() != image3->uniqueID()); // Verify that backing store is now a different buffer because of copy on
PixelPtr pixels3 = get_surface_ptr(surface, useGpu); // write
REPORTER_ASSERT(reporter, pixels2 != pixels3); PixelPtr pixels2 = get_surface_ptr(surface, useGpu);
// Verify that a direct canvas flush with a pending draw does not trigger REPORTER_ASSERT(reporter, pixels1 != pixels2);
// a copy on write when the surface is not sharing its buffer with an // Verify copy-on write with a draw operation that gets deferred by
// SkImage. // the in order draw buffer.
canvas->clear(SK_ColorWHITE); SkPaint paint;
canvas->flush(); canvas->drawPaint(paint);
PixelPtr pixels4 = get_surface_ptr(surface, useGpu); SkImage* image4 = canvas->newImageSnapshot(); // implicit flush
canvas->drawPaint(paint); SkAutoTUnref<SkImage> aur_i4(image4);
canvas->flush(); REPORTER_ASSERT(reporter, image4->uniqueID() != image3->uniqueID());
PixelPtr pixels5 = get_surface_ptr(surface, useGpu); PixelPtr pixels3 = get_surface_ptr(surface, useGpu);
REPORTER_ASSERT(reporter, pixels4 == pixels5); REPORTER_ASSERT(reporter, pixels2 != pixels3);
// Verify that a direct canvas flush with a pending draw does not trigger
// a copy on write when the surface is not sharing its buffer with an
// SkImage.
canvas->clear(SK_ColorWHITE);
canvas->flush();
PixelPtr pixels4 = get_surface_ptr(surface, useGpu);
canvas->drawPaint(paint);
canvas->flush();
PixelPtr pixels5 = get_surface_ptr(surface, useGpu);
REPORTER_ASSERT(reporter, pixels4 == pixels5);
}
} }
static void TestDeferredCanvasSetSurface(skiatest::Reporter* reporter, GrContextFactory* factory) { static void TestDeferredCanvasSetSurface(skiatest::Reporter* reporter, GrContextFactory* factory) {
@ -750,42 +765,57 @@ static void TestDeferredCanvasSetSurface(skiatest::Reporter* reporter, GrContext
SkSurface* surface; SkSurface* surface;
SkSurface* alternateSurface; SkSurface* alternateSurface;
bool useGpu = NULL != factory; bool useGpu = NULL != factory;
int cnt;
#if SK_SUPPORT_GPU #if SK_SUPPORT_GPU
if (useGpu) { if (useGpu) {
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); cnt = GrContextFactory::kGLContextTypeCnt;
if (NULL == context) {
return;
}
surface = SkSurface::NewRenderTarget(context, imageSpec);
alternateSurface = SkSurface::NewRenderTarget(context, imageSpec);
} else { } else {
surface = SkSurface::NewRaster(imageSpec); cnt = 1;
alternateSurface = SkSurface::NewRaster(imageSpec);
} }
#else #else
SkASSERT(!useGpu); SkASSERT(!useGpu);
surface = SkSurface::NewRaster(imageSpec); cnt = 1;
alternateSurface = SkSurface::NewRaster(imageSpec);
#endif #endif
SkASSERT(NULL != surface);
SkASSERT(NULL != alternateSurface); for (int i = 0; i < cnt; ++i) {
SkAutoTUnref<SkSurface> aur1(surface); #if SK_SUPPORT_GPU
SkAutoTUnref<SkSurface> aur2(alternateSurface); if (useGpu) {
PixelPtr pixels1 = get_surface_ptr(surface, useGpu); GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
PixelPtr pixels2 = get_surface_ptr(alternateSurface, useGpu); if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface)); continue;
SkAutoTUnref<SkImage> image1(canvas->newImageSnapshot()); }
canvas->setSurface(alternateSurface); GrContext* context = factory->get(glCtxType);
SkAutoTUnref<SkImage> image2(canvas->newImageSnapshot()); if (NULL == context) {
REPORTER_ASSERT(reporter, image1->uniqueID() != image2->uniqueID()); continue;
// Verify that none of the above operations triggered a surface copy on write. }
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1); surface = SkSurface::NewRenderTarget(context, imageSpec);
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) == pixels2); alternateSurface = SkSurface::NewRenderTarget(context, imageSpec);
// Verify that a flushed draw command will trigger a copy on write on alternateSurface. } else
canvas->clear(SK_ColorWHITE); #endif
canvas->flush(); {
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1); surface = SkSurface::NewRaster(imageSpec);
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) != pixels2); alternateSurface = SkSurface::NewRaster(imageSpec);
}
SkASSERT(NULL != surface);
SkASSERT(NULL != alternateSurface);
SkAutoTUnref<SkSurface> aur1(surface);
SkAutoTUnref<SkSurface> aur2(alternateSurface);
PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
PixelPtr pixels2 = get_surface_ptr(alternateSurface, useGpu);
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
SkAutoTUnref<SkImage> image1(canvas->newImageSnapshot());
canvas->setSurface(alternateSurface);
SkAutoTUnref<SkImage> image2(canvas->newImageSnapshot());
REPORTER_ASSERT(reporter, image1->uniqueID() != image2->uniqueID());
// Verify that none of the above operations triggered a surface copy on write.
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) == pixels2);
// Verify that a flushed draw command will trigger a copy on write on alternateSurface.
canvas->clear(SK_ColorWHITE);
canvas->flush();
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) != pixels2);
}
} }
static void TestDeferredCanvasCreateCompatibleDevice(skiatest::Reporter* reporter) { static void TestDeferredCanvasCreateCompatibleDevice(skiatest::Reporter* reporter) {

View File

@ -147,6 +147,10 @@ bool GrGpuGL::programUnitTest(int maxStages) {
dummyDesc.fHeight = 22; dummyDesc.fHeight = 22;
SkAutoTUnref<GrTexture> dummyTexture2(this->createTexture(dummyDesc, NULL, 0)); SkAutoTUnref<GrTexture> dummyTexture2(this->createTexture(dummyDesc, NULL, 0));
if (!dummyTexture1 || ! dummyTexture2) {
return false;
}
static const int NUM_TESTS = 512; static const int NUM_TESTS = 512;
SkRandom random; SkRandom random;

View File

@ -49,92 +49,100 @@ static void create_layers(skiatest::Reporter* reporter,
// locking & unlocking textures). // locking & unlocking textures).
// TODO: need to add checks on VRAM usage! // TODO: need to add checks on VRAM usage!
DEF_GPUTEST(GpuLayerCache, reporter, factory) { DEF_GPUTEST(GpuLayerCache, reporter, factory) {
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
if (NULL == context) { continue;
return;
}
SkPictureRecorder recorder;
recorder.beginRecording(1, 1);
SkAutoTUnref<const SkPicture> picture(recorder.endRecording());
GrLayerCache cache(context);
create_layers(reporter, &cache, *picture);
// Lock the layers making them all 512x512
GrTextureDesc desc;
desc.fWidth = 512;
desc.fHeight = 512;
desc.fConfig = kSkia8888_GrPixelConfig;
for (int i = 0; i < kNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i);
REPORTER_ASSERT(reporter, NULL != layer);
bool foundInCache = cache.lock(layer, desc);
REPORTER_ASSERT(reporter, !foundInCache);
foundInCache = cache.lock(layer, desc);
REPORTER_ASSERT(reporter, foundInCache);
REPORTER_ASSERT(reporter, NULL != layer->texture());
#if USE_ATLAS
// The first 4 layers should be in the atlas (and thus have non-empty
// rects)
if (i < 4) {
REPORTER_ASSERT(reporter, layer->isAtlased());
} else {
#endif
REPORTER_ASSERT(reporter, !layer->isAtlased());
#if USE_ATLAS
} }
#endif
}
// Unlock the textures GrContext* context = factory->get(glCtxType);
for (int i = 0; i < kNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i);
REPORTER_ASSERT(reporter, NULL != layer);
cache.unlock(layer); if (NULL == context) {
} continue;
}
for (int i = 0; i < kNumLayers; ++i) { SkPictureRecorder recorder;
GrCachedLayer* layer = cache.findLayer(picture, i); recorder.beginRecording(1, 1);
REPORTER_ASSERT(reporter, NULL != layer); SkAutoTUnref<const SkPicture> picture(recorder.endRecording());
GrLayerCache cache(context);
create_layers(reporter, &cache, *picture);
// Lock the layers making them all 512x512
GrTextureDesc desc;
desc.fWidth = 512;
desc.fHeight = 512;
desc.fConfig = kSkia8888_GrPixelConfig;
for (int i = 0; i < kNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i);
REPORTER_ASSERT(reporter, NULL != layer);
bool foundInCache = cache.lock(layer, desc);
REPORTER_ASSERT(reporter, !foundInCache);
foundInCache = cache.lock(layer, desc);
REPORTER_ASSERT(reporter, foundInCache);
#if USE_ATLAS
// The first 4 layers should be in the atlas (and thus do not
// currently unlock). The final layer should be unlocked.
if (i < 4) {
REPORTER_ASSERT(reporter, NULL != layer->texture()); REPORTER_ASSERT(reporter, NULL != layer->texture());
REPORTER_ASSERT(reporter, layer->isAtlased()); #if USE_ATLAS
} else { // The first 4 layers should be in the atlas (and thus have non-empty
// rects)
if (i < 4) {
REPORTER_ASSERT(reporter, layer->isAtlased());
} else {
#endif #endif
REPORTER_ASSERT(reporter, NULL == layer->texture());
REPORTER_ASSERT(reporter, !layer->isAtlased()); REPORTER_ASSERT(reporter, !layer->isAtlased());
#if USE_ATLAS #if USE_ATLAS
}
#endif
} }
// Unlock the textures
for (int i = 0; i < kNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i);
REPORTER_ASSERT(reporter, NULL != layer);
cache.unlock(layer);
}
for (int i = 0; i < kNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture, i);
REPORTER_ASSERT(reporter, NULL != layer);
#if USE_ATLAS
// The first 4 layers should be in the atlas (and thus do not
// currently unlock). The final layer should be unlocked.
if (i < 4) {
REPORTER_ASSERT(reporter, NULL != layer->texture());
REPORTER_ASSERT(reporter, layer->isAtlased());
} else {
#endif
REPORTER_ASSERT(reporter, NULL == layer->texture());
REPORTER_ASSERT(reporter, !layer->isAtlased());
#if USE_ATLAS
}
#endif
}
// Free them all SkGpuDevice-style. This will not free up the
// atlas' texture but will eliminate all the layers.
cache.purge(picture);
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
// TODO: add VRAM/resource cache check here
#if 0
// Re-create the layers
create_layers(reporter, &cache, picture);
// Free them again GrContext-style. This should free up everything.
cache.freeAll();
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
// TODO: add VRAM/resource cache check here
#endif #endif
} }
// Free them all SkGpuDevice-style. This will not free up the
// atlas' texture but will eliminate all the layers.
cache.purge(picture);
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
// TODO: add VRAM/resource cache check here
#if 0
// Re-create the layers
create_layers(reporter, &cache, picture);
// Free them again GrContext-style. This should free up everything.
cache.freeAll();
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
// TODO: add VRAM/resource cache check here
#endif
} }
#endif #endif

View File

@ -16,17 +16,17 @@ DEF_GPUTEST(GrContextFactory, reporter, factory) {
// Before we ask for a context, we expect the GL context to not be there. // Before we ask for a context, we expect the GL context to not be there.
REPORTER_ASSERT(reporter, REPORTER_ASSERT(reporter,
NULL == factory->getGLContext(GrContextFactory::kNative_GLContextType)); NULL == factory->getGLContext(GrContextFactory::kNull_GLContextType));
// After we ask for a context, we expect that the GL context to be there. // After we ask for a context, we expect that the GL context to be there.
factory->get(GrContextFactory::kNative_GLContextType); factory->get(GrContextFactory::kNull_GLContextType);
REPORTER_ASSERT(reporter, REPORTER_ASSERT(reporter,
factory->getGLContext(GrContextFactory::kNative_GLContextType) != NULL); factory->getGLContext(GrContextFactory::kNull_GLContextType) != NULL);
// If we did not ask for a context with the particular GL context, we would // If we did not ask for a context with the particular GL context, we would
// expect the particular GL context to not be there. // expect the particular GL context to not be there.
REPORTER_ASSERT(reporter, REPORTER_ASSERT(reporter,
NULL == factory->getGLContext(GrContextFactory::kNull_GLContextType)); NULL == factory->getGLContext(GrContextFactory::kDebug_GLContextType));
} }
#endif #endif

View File

@ -92,17 +92,29 @@ void rasterToGpu(skiatest::Reporter* reporter, GrContext* context) {
} }
DEF_GPUTEST(ImageNewShader_GPU, reporter, factory) { DEF_GPUTEST(ImageNewShader_GPU, reporter, factory) {
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
// GPU -> GPU if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
gpuToGpu(reporter, context); continue;
}
// GPU -> RASTER GrContext* context = factory->get(glCtxType);
gpuToRaster(reporter, context);
if (NULL == context) {
continue;
}
// GPU -> GPU
gpuToGpu(reporter, context);
// GPU -> RASTER
gpuToRaster(reporter, context);
// RASTER -> GPU // RASTER -> GPU
rasterToGpu(reporter, context); rasterToGpu(reporter, context);
}
} }
#endif #endif

View File

@ -780,129 +780,140 @@ static void test_gpu_veto(skiatest::Reporter* reporter) {
static void test_gpu_picture_optimization(skiatest::Reporter* reporter, static void test_gpu_picture_optimization(skiatest::Reporter* reporter,
GrContextFactory* factory) { GrContextFactory* factory) {
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
continue;
}
static const int kWidth = 100; GrContext* context = factory->get(glCtxType);
static const int kHeight = 100;
SkAutoTUnref<SkPicture> pict; if (NULL == context) {
continue;
}
// create a picture with the structure: static const int kWidth = 100;
// 1) static const int kHeight = 100;
// SaveLayer
// Restore
// 2)
// SaveLayer
// Translate
// SaveLayer w/ bound
// Restore
// Restore
// 3)
// SaveLayer w/ copyable paint
// Restore
// 4)
// SaveLayer w/ non-copyable paint
// Restore
{
SkPictureRecorder recorder;
SkCanvas* c = recorder.beginRecording(kWidth, kHeight); SkAutoTUnref<SkPicture> pict;
// create a picture with the structure:
// 1) // 1)
c->saveLayer(NULL, NULL); // SaveLayer
c->restore(); // Restore
// 2) // 2)
c->saveLayer(NULL, NULL); // SaveLayer
c->translate(kWidth/2, kHeight/2); // Translate
SkRect r = SkRect::MakeXYWH(0, 0, kWidth/2, kHeight/2); // SaveLayer w/ bound
c->saveLayer(&r, NULL); // Restore
c->restore(); // Restore
c->restore();
// 3) // 3)
{ // SaveLayer w/ copyable paint
SkPaint p; // Restore
p.setColor(SK_ColorRED);
c->saveLayer(NULL, &p);
c->restore();
}
// 4) // 4)
// TODO: this case will need to be removed once the paint's are immutable // SaveLayer w/ non-copyable paint
// Restore
{ {
SkPaint p; SkPictureRecorder recorder;
SkAutoTUnref<SkColorFilter> cf(SkLumaColorFilter::Create());
p.setImageFilter(SkColorFilterImageFilter::Create(cf.get()))->unref(); SkCanvas* c = recorder.beginRecording(kWidth, kHeight);
c->saveLayer(NULL, &p); // 1)
c->saveLayer(NULL, NULL);
c->restore(); c->restore();
// 2)
c->saveLayer(NULL, NULL);
c->translate(kWidth/2, kHeight/2);
SkRect r = SkRect::MakeXYWH(0, 0, kWidth/2, kHeight/2);
c->saveLayer(&r, NULL);
c->restore();
c->restore();
// 3)
{
SkPaint p;
p.setColor(SK_ColorRED);
c->saveLayer(NULL, &p);
c->restore();
}
// 4)
// TODO: this case will need to be removed once the paint's are immutable
{
SkPaint p;
SkAutoTUnref<SkColorFilter> cf(SkLumaColorFilter::Create());
p.setImageFilter(SkColorFilterImageFilter::Create(cf.get()))->unref();
c->saveLayer(NULL, &p);
c->restore();
}
pict.reset(recorder.endRecording());
} }
pict.reset(recorder.endRecording()); // Now test out the SaveLayer extraction
} {
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
// Now test out the SaveLayer extraction SkAutoTUnref<SkSurface> surface(SkSurface::NewScratchRenderTarget(context, info));
{
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
SkAutoTUnref<SkSurface> surface(SkSurface::NewScratchRenderTarget(context, info)); SkCanvas* canvas = surface->getCanvas();
SkCanvas* canvas = surface->getCanvas(); canvas->EXPERIMENTAL_optimize(pict);
canvas->EXPERIMENTAL_optimize(pict); SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey();
SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey(); const SkPicture::AccelData* data = pict->EXPERIMENTAL_getAccelData(key);
REPORTER_ASSERT(reporter, NULL != data);
const SkPicture::AccelData* data = pict->EXPERIMENTAL_getAccelData(key); const GPUAccelData *gpuData = static_cast<const GPUAccelData*>(data);
REPORTER_ASSERT(reporter, NULL != data); REPORTER_ASSERT(reporter, 5 == gpuData->numSaveLayers());
const GPUAccelData *gpuData = static_cast<const GPUAccelData*>(data); const GPUAccelData::SaveLayerInfo& info0 = gpuData->saveLayerInfo(0);
REPORTER_ASSERT(reporter, 5 == gpuData->numSaveLayers()); // The parent/child layer appear in reverse order
const GPUAccelData::SaveLayerInfo& info1 = gpuData->saveLayerInfo(2);
const GPUAccelData::SaveLayerInfo& info0 = gpuData->saveLayerInfo(0); const GPUAccelData::SaveLayerInfo& info2 = gpuData->saveLayerInfo(1);
// The parent/child layer appear in reverse order const GPUAccelData::SaveLayerInfo& info3 = gpuData->saveLayerInfo(3);
const GPUAccelData::SaveLayerInfo& info1 = gpuData->saveLayerInfo(2);
const GPUAccelData::SaveLayerInfo& info2 = gpuData->saveLayerInfo(1);
const GPUAccelData::SaveLayerInfo& info3 = gpuData->saveLayerInfo(3);
// const GPUAccelData::SaveLayerInfo& info4 = gpuData->saveLayerInfo(4); // const GPUAccelData::SaveLayerInfo& info4 = gpuData->saveLayerInfo(4);
REPORTER_ASSERT(reporter, info0.fValid); REPORTER_ASSERT(reporter, info0.fValid);
REPORTER_ASSERT(reporter, kWidth == info0.fSize.fWidth && kHeight == info0.fSize.fHeight); REPORTER_ASSERT(reporter, kWidth == info0.fSize.fWidth && kHeight == info0.fSize.fHeight);
REPORTER_ASSERT(reporter, info0.fCTM.isIdentity()); REPORTER_ASSERT(reporter, info0.fCTM.isIdentity());
REPORTER_ASSERT(reporter, 0 == info0.fOffset.fX && 0 == info0.fOffset.fY); REPORTER_ASSERT(reporter, 0 == info0.fOffset.fX && 0 == info0.fOffset.fY);
REPORTER_ASSERT(reporter, NULL != info0.fPaint); REPORTER_ASSERT(reporter, NULL != info0.fPaint);
REPORTER_ASSERT(reporter, !info0.fIsNested && !info0.fHasNestedLayers); REPORTER_ASSERT(reporter, !info0.fIsNested && !info0.fHasNestedLayers);
REPORTER_ASSERT(reporter, info1.fValid); REPORTER_ASSERT(reporter, info1.fValid);
REPORTER_ASSERT(reporter, kWidth == info1.fSize.fWidth && kHeight == info1.fSize.fHeight); REPORTER_ASSERT(reporter, kWidth == info1.fSize.fWidth && kHeight == info1.fSize.fHeight);
REPORTER_ASSERT(reporter, info1.fCTM.isIdentity()); REPORTER_ASSERT(reporter, info1.fCTM.isIdentity());
REPORTER_ASSERT(reporter, 0 == info1.fOffset.fX && 0 == info1.fOffset.fY); REPORTER_ASSERT(reporter, 0 == info1.fOffset.fX && 0 == info1.fOffset.fY);
REPORTER_ASSERT(reporter, NULL != info1.fPaint); REPORTER_ASSERT(reporter, NULL != info1.fPaint);
REPORTER_ASSERT(reporter, !info1.fIsNested && info1.fHasNestedLayers); // has a nested SL REPORTER_ASSERT(reporter, !info1.fIsNested && info1.fHasNestedLayers); // has a nested SL
REPORTER_ASSERT(reporter, info2.fValid); REPORTER_ASSERT(reporter, info2.fValid);
REPORTER_ASSERT(reporter, kWidth/2 == info2.fSize.fWidth && REPORTER_ASSERT(reporter, kWidth/2 == info2.fSize.fWidth &&
kHeight/2 == info2.fSize.fHeight); // bound reduces size kHeight/2 == info2.fSize.fHeight); // bound reduces size
REPORTER_ASSERT(reporter, info2.fCTM.isIdentity()); // translated REPORTER_ASSERT(reporter, info2.fCTM.isIdentity()); // translated
REPORTER_ASSERT(reporter, kWidth/2 == info2.fOffset.fX && REPORTER_ASSERT(reporter, kWidth/2 == info2.fOffset.fX &&
kHeight/2 == info2.fOffset.fY); kHeight/2 == info2.fOffset.fY);
REPORTER_ASSERT(reporter, NULL != info1.fPaint); REPORTER_ASSERT(reporter, NULL != info1.fPaint);
REPORTER_ASSERT(reporter, info2.fIsNested && !info2.fHasNestedLayers); // is nested REPORTER_ASSERT(reporter, info2.fIsNested && !info2.fHasNestedLayers); // is nested
REPORTER_ASSERT(reporter, info3.fValid); REPORTER_ASSERT(reporter, info3.fValid);
REPORTER_ASSERT(reporter, kWidth == info3.fSize.fWidth && kHeight == info3.fSize.fHeight); REPORTER_ASSERT(reporter, kWidth == info3.fSize.fWidth && kHeight == info3.fSize.fHeight);
REPORTER_ASSERT(reporter, info3.fCTM.isIdentity()); REPORTER_ASSERT(reporter, info3.fCTM.isIdentity());
REPORTER_ASSERT(reporter, 0 == info3.fOffset.fX && 0 == info3.fOffset.fY); REPORTER_ASSERT(reporter, 0 == info3.fOffset.fX && 0 == info3.fOffset.fY);
REPORTER_ASSERT(reporter, NULL != info3.fPaint); REPORTER_ASSERT(reporter, NULL != info3.fPaint);
REPORTER_ASSERT(reporter, !info3.fIsNested && !info3.fHasNestedLayers); REPORTER_ASSERT(reporter, !info3.fIsNested && !info3.fHasNestedLayers);
#if 0 // needs more though for GrGatherCanvas #if 0 // needs more though for GrGatherCanvas
REPORTER_ASSERT(reporter, !info4.fValid); // paint is/was uncopyable REPORTER_ASSERT(reporter, !info4.fValid); // paint is/was uncopyable
REPORTER_ASSERT(reporter, kWidth == info4.fSize.fWidth && kHeight == info4.fSize.fHeight); REPORTER_ASSERT(reporter, kWidth == info4.fSize.fWidth && kHeight == info4.fSize.fHeight);
REPORTER_ASSERT(reporter, 0 == info4.fOffset.fX && 0 == info4.fOffset.fY); REPORTER_ASSERT(reporter, 0 == info4.fOffset.fX && 0 == info4.fOffset.fY);
REPORTER_ASSERT(reporter, info4.fCTM.isIdentity()); REPORTER_ASSERT(reporter, info4.fCTM.isIdentity());
REPORTER_ASSERT(reporter, NULL == info4.fPaint); // paint is/was uncopyable REPORTER_ASSERT(reporter, NULL == info4.fPaint); // paint is/was uncopyable
REPORTER_ASSERT(reporter, !info4.fIsNested && !info4.fHasNestedLayers); REPORTER_ASSERT(reporter, !info4.fIsNested && !info4.fHasNestedLayers);
#endif #endif
}
} }
} }

View File

@ -170,37 +170,53 @@ static void test_canvaspeek(skiatest::Reporter* reporter,
const SkColor color = SK_ColorRED; const SkColor color = SK_ColorRED;
const SkPMColor pmcolor = SkPreMultiplyColor(color); const SkPMColor pmcolor = SkPreMultiplyColor(color);
GrContext* context = NULL; int cnt;
#if SK_SUPPORT_GPU #if SK_SUPPORT_GPU
context = factory->get(GrContextFactory::kNative_GLContextType); cnt = GrContextFactory::kGLContextTypeCnt;
#else
cnt = 1;
#endif #endif
for (size_t i = 0; i < SK_ARRAY_COUNT(gRec); ++i) { for (int i= 0; i < cnt; ++i) {
SkImageInfo info, requestInfo; GrContext* context = NULL;
size_t rowBytes; #if SK_SUPPORT_GPU
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
continue;
}
context = factory->get(glCtxType);
SkAutoTUnref<SkSurface> surface(createSurface(gRec[i].fType, context, if (NULL == context) {
&requestInfo)); continue;
surface->getCanvas()->clear(color); }
#endif
for (size_t i = 0; i < SK_ARRAY_COUNT(gRec); ++i) {
SkImageInfo info, requestInfo;
size_t rowBytes;
const void* addr = surface->getCanvas()->peekPixels(&info, &rowBytes); SkAutoTUnref<SkSurface> surface(createSurface(gRec[i].fType, context,
bool success = (NULL != addr); &requestInfo));
REPORTER_ASSERT(reporter, gRec[i].fPeekShouldSucceed == success); surface->getCanvas()->clear(color);
SkImageInfo info2; const void* addr = surface->getCanvas()->peekPixels(&info, &rowBytes);
size_t rb2; bool success = (NULL != addr);
const void* addr2 = surface->peekPixels(&info2, &rb2); REPORTER_ASSERT(reporter, gRec[i].fPeekShouldSucceed == success);
if (success) { SkImageInfo info2;
REPORTER_ASSERT(reporter, requestInfo == info); size_t rb2;
REPORTER_ASSERT(reporter, requestInfo.minRowBytes() <= rowBytes); const void* addr2 = surface->peekPixels(&info2, &rb2);
REPORTER_ASSERT(reporter, pmcolor == *(const SkPMColor*)addr);
REPORTER_ASSERT(reporter, addr2 == addr); if (success) {
REPORTER_ASSERT(reporter, info2 == info); REPORTER_ASSERT(reporter, requestInfo == info);
REPORTER_ASSERT(reporter, rb2 == rowBytes); REPORTER_ASSERT(reporter, requestInfo.minRowBytes() <= rowBytes);
} else { REPORTER_ASSERT(reporter, pmcolor == *(const SkPMColor*)addr);
REPORTER_ASSERT(reporter, NULL == addr2);
REPORTER_ASSERT(reporter, addr2 == addr);
REPORTER_ASSERT(reporter, info2 == info);
REPORTER_ASSERT(reporter, rb2 == rowBytes);
} else {
REPORTER_ASSERT(reporter, NULL == addr2);
}
} }
} }
} }
@ -429,22 +445,28 @@ DEF_GPUTEST(Surface, reporter, factory) {
#if SK_SUPPORT_GPU #if SK_SUPPORT_GPU
TestGetTexture(reporter, kRaster_SurfaceType, NULL); TestGetTexture(reporter, kRaster_SurfaceType, NULL);
if (NULL != factory) { if (NULL != factory) {
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType); for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
if (NULL != context) { GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
TestSurfaceInCache(reporter, kGpu_SurfaceType, context); if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
TestSurfaceInCache(reporter, kGpuScratch_SurfaceType, context); continue;
Test_crbug263329(reporter, kGpu_SurfaceType, context); }
Test_crbug263329(reporter, kGpuScratch_SurfaceType, context); GrContext* context = factory->get(glCtxType);
TestSurfaceCopyOnWrite(reporter, kGpu_SurfaceType, context); if (NULL != context) {
TestSurfaceCopyOnWrite(reporter, kGpuScratch_SurfaceType, context); TestSurfaceInCache(reporter, kGpu_SurfaceType, context);
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpu_SurfaceType, context); TestSurfaceInCache(reporter, kGpuScratch_SurfaceType, context);
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpuScratch_SurfaceType, context); Test_crbug263329(reporter, kGpu_SurfaceType, context);
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode); Test_crbug263329(reporter, kGpuScratch_SurfaceType, context);
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode); TestSurfaceCopyOnWrite(reporter, kGpu_SurfaceType, context);
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kRetain_ContentChangeMode); TestSurfaceCopyOnWrite(reporter, kGpuScratch_SurfaceType, context);
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kRetain_ContentChangeMode); TestSurfaceWritableAfterSnapshotRelease(reporter, kGpu_SurfaceType, context);
TestGetTexture(reporter, kGpu_SurfaceType, context); TestSurfaceWritableAfterSnapshotRelease(reporter, kGpuScratch_SurfaceType, context);
TestGetTexture(reporter, kGpuScratch_SurfaceType, context); TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
TestGetTexture(reporter, kGpu_SurfaceType, context);
TestGetTexture(reporter, kGpuScratch_SurfaceType, context);
}
} }
} }
#endif #endif