Fix alpha textures in NV ES3 contexts on Windows.
Make unit tests iterate over all the rendering GL context types rather than using kNative. Fix the extension printing when gStartupSpew is set. R=jvanverth@google.com Author: bsalomon@google.com Review URL: https://codereview.chromium.org/398183002
This commit is contained in:
parent
e57452debd
commit
e904c09a3a
@ -65,6 +65,8 @@ public:
|
||||
return fInterface->hasExtension(ext);
|
||||
}
|
||||
|
||||
const GrGLExtensions& extensions() const { return fInterface->fExtensions; }
|
||||
|
||||
/**
|
||||
* Reset the information
|
||||
*/
|
||||
|
@ -219,6 +219,9 @@ GrGLVendor GrGLGetVendorFromString(const char* vendorString) {
|
||||
if (0 == strcmp(vendorString, "Qualcomm")) {
|
||||
return kQualcomm_GrGLVendor;
|
||||
}
|
||||
if (0 == strcmp(vendorString, "NVIDIA Corporation")) {
|
||||
return kNVIDIA_GrGLVendor;
|
||||
}
|
||||
}
|
||||
return kOther_GrGLVendor;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ enum GrGLVendor {
|
||||
kImagination_GrGLVendor,
|
||||
kIntel_GrGLVendor,
|
||||
kQualcomm_GrGLVendor,
|
||||
kNVIDIA_GrGLVendor,
|
||||
|
||||
kOther_GrGLVendor
|
||||
};
|
||||
|
@ -137,9 +137,7 @@ GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context)
|
||||
GrPrintf("------ RENDERER %s\n", renderer);
|
||||
GrPrintf("------ VERSION %s\n", version);
|
||||
GrPrintf("------ EXTENSIONS\n");
|
||||
#if 0 // TODO: Reenable this after GrGLInterface's extensions can be accessed safely.
|
||||
ctx.extensions().print();
|
||||
#endif
|
||||
ctx.extensions().print();
|
||||
GrPrintf("\n");
|
||||
GrPrintf(this->glCaps().dump().c_str());
|
||||
}
|
||||
@ -574,13 +572,21 @@ bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc,
|
||||
}
|
||||
|
||||
GrGLenum internalFormat;
|
||||
GrGLenum externalFormat;
|
||||
GrGLenum externalType;
|
||||
GrGLenum externalFormat = 0x0; // suprress warning
|
||||
GrGLenum externalType = 0x0;// suprress warning
|
||||
|
||||
// glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
|
||||
// format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the
|
||||
// size of the internal format whenever possible and so only use a sized internal format when
|
||||
// using texture storage.
|
||||
if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat,
|
||||
bool useSizedFormat = useTexStorage;
|
||||
// At least some versions of the desktop ES3 drivers for NVIDIA won't accept GL_RED in
|
||||
// glTexImage2D for the internal format but will accept GL_R8.
|
||||
if (!useSizedFormat && kNVIDIA_GrGLVendor == this->glContext().vendor() &&
|
||||
kGLES_GrGLStandard == this->glStandard() && this->glVersion() >= GR_GL_VER(3, 0)) {
|
||||
useSizedFormat = true;
|
||||
}
|
||||
if (!this->configToGLFormats(dataConfig, useSizedFormat, &internalFormat,
|
||||
&externalFormat, &externalType)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -681,68 +681,83 @@ static PixelPtr get_surface_ptr(SkSurface* surface, bool useGpu) {
|
||||
|
||||
static void TestDeferredCanvasSurface(skiatest::Reporter* reporter, GrContextFactory* factory) {
|
||||
SkImageInfo imageSpec = SkImageInfo::MakeN32Premul(10, 10);
|
||||
SkSurface* surface;
|
||||
bool useGpu = NULL != factory;
|
||||
int cnt;
|
||||
#if SK_SUPPORT_GPU
|
||||
if (useGpu) {
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
if (NULL == context) {
|
||||
return;
|
||||
}
|
||||
|
||||
surface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
cnt = GrContextFactory::kGLContextTypeCnt;
|
||||
} else {
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
cnt = 1;
|
||||
}
|
||||
#else
|
||||
SkASSERT(!useGpu);
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
cnt = 1;
|
||||
#endif
|
||||
SkASSERT(NULL != surface);
|
||||
SkAutoTUnref<SkSurface> aur(surface);
|
||||
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
|
||||
for (int i = 0; i < cnt; ++i) {
|
||||
SkSurface* surface;
|
||||
#if SK_SUPPORT_GPU
|
||||
if (useGpu) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
if (NULL == context) {
|
||||
return;
|
||||
}
|
||||
|
||||
SkImage* image1 = canvas->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i1(image1);
|
||||
PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
|
||||
// The following clear would normally trigger a copy on write, but
|
||||
// it won't because rendering is deferred.
|
||||
canvas->clear(SK_ColorBLACK);
|
||||
// Obtaining a snapshot directly from the surface (as opposed to the
|
||||
// SkDeferredCanvas) will not trigger a flush of deferred draw operations
|
||||
// and will therefore return the same image as the previous snapshot.
|
||||
SkImage* image2 = surface->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i2(image2);
|
||||
// Images identical because of deferral
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() == image2->uniqueID());
|
||||
// Now we obtain a snpshot via the deferred canvas, which triggers a flush.
|
||||
// Because there is a pending clear, this will generate a different image.
|
||||
SkImage* image3 = canvas->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i3(image3);
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() != image3->uniqueID());
|
||||
// Verify that backing store is now a different buffer because of copy on
|
||||
// write
|
||||
PixelPtr pixels2 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels1 != pixels2);
|
||||
// Verify copy-on write with a draw operation that gets deferred by
|
||||
// the in order draw buffer.
|
||||
SkPaint paint;
|
||||
canvas->drawPaint(paint);
|
||||
SkImage* image4 = canvas->newImageSnapshot(); // implicit flush
|
||||
SkAutoTUnref<SkImage> aur_i4(image4);
|
||||
REPORTER_ASSERT(reporter, image4->uniqueID() != image3->uniqueID());
|
||||
PixelPtr pixels3 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels2 != pixels3);
|
||||
// Verify that a direct canvas flush with a pending draw does not trigger
|
||||
// a copy on write when the surface is not sharing its buffer with an
|
||||
// SkImage.
|
||||
canvas->clear(SK_ColorWHITE);
|
||||
canvas->flush();
|
||||
PixelPtr pixels4 = get_surface_ptr(surface, useGpu);
|
||||
canvas->drawPaint(paint);
|
||||
canvas->flush();
|
||||
PixelPtr pixels5 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels4 == pixels5);
|
||||
surface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
}
|
||||
SkASSERT(NULL != surface);
|
||||
SkAutoTUnref<SkSurface> aur(surface);
|
||||
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
|
||||
|
||||
SkImage* image1 = canvas->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i1(image1);
|
||||
PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
|
||||
// The following clear would normally trigger a copy on write, but
|
||||
// it won't because rendering is deferred.
|
||||
canvas->clear(SK_ColorBLACK);
|
||||
// Obtaining a snapshot directly from the surface (as opposed to the
|
||||
// SkDeferredCanvas) will not trigger a flush of deferred draw operations
|
||||
// and will therefore return the same image as the previous snapshot.
|
||||
SkImage* image2 = surface->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i2(image2);
|
||||
// Images identical because of deferral
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() == image2->uniqueID());
|
||||
// Now we obtain a snpshot via the deferred canvas, which triggers a flush.
|
||||
// Because there is a pending clear, this will generate a different image.
|
||||
SkImage* image3 = canvas->newImageSnapshot();
|
||||
SkAutoTUnref<SkImage> aur_i3(image3);
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() != image3->uniqueID());
|
||||
// Verify that backing store is now a different buffer because of copy on
|
||||
// write
|
||||
PixelPtr pixels2 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels1 != pixels2);
|
||||
// Verify copy-on write with a draw operation that gets deferred by
|
||||
// the in order draw buffer.
|
||||
SkPaint paint;
|
||||
canvas->drawPaint(paint);
|
||||
SkImage* image4 = canvas->newImageSnapshot(); // implicit flush
|
||||
SkAutoTUnref<SkImage> aur_i4(image4);
|
||||
REPORTER_ASSERT(reporter, image4->uniqueID() != image3->uniqueID());
|
||||
PixelPtr pixels3 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels2 != pixels3);
|
||||
// Verify that a direct canvas flush with a pending draw does not trigger
|
||||
// a copy on write when the surface is not sharing its buffer with an
|
||||
// SkImage.
|
||||
canvas->clear(SK_ColorWHITE);
|
||||
canvas->flush();
|
||||
PixelPtr pixels4 = get_surface_ptr(surface, useGpu);
|
||||
canvas->drawPaint(paint);
|
||||
canvas->flush();
|
||||
PixelPtr pixels5 = get_surface_ptr(surface, useGpu);
|
||||
REPORTER_ASSERT(reporter, pixels4 == pixels5);
|
||||
}
|
||||
}
|
||||
|
||||
static void TestDeferredCanvasSetSurface(skiatest::Reporter* reporter, GrContextFactory* factory) {
|
||||
@ -750,42 +765,57 @@ static void TestDeferredCanvasSetSurface(skiatest::Reporter* reporter, GrContext
|
||||
SkSurface* surface;
|
||||
SkSurface* alternateSurface;
|
||||
bool useGpu = NULL != factory;
|
||||
int cnt;
|
||||
#if SK_SUPPORT_GPU
|
||||
if (useGpu) {
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
if (NULL == context) {
|
||||
return;
|
||||
}
|
||||
surface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
alternateSurface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
cnt = GrContextFactory::kGLContextTypeCnt;
|
||||
} else {
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
alternateSurface = SkSurface::NewRaster(imageSpec);
|
||||
cnt = 1;
|
||||
}
|
||||
#else
|
||||
SkASSERT(!useGpu);
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
alternateSurface = SkSurface::NewRaster(imageSpec);
|
||||
cnt = 1;
|
||||
#endif
|
||||
SkASSERT(NULL != surface);
|
||||
SkASSERT(NULL != alternateSurface);
|
||||
SkAutoTUnref<SkSurface> aur1(surface);
|
||||
SkAutoTUnref<SkSurface> aur2(alternateSurface);
|
||||
PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
|
||||
PixelPtr pixels2 = get_surface_ptr(alternateSurface, useGpu);
|
||||
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
|
||||
SkAutoTUnref<SkImage> image1(canvas->newImageSnapshot());
|
||||
canvas->setSurface(alternateSurface);
|
||||
SkAutoTUnref<SkImage> image2(canvas->newImageSnapshot());
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() != image2->uniqueID());
|
||||
// Verify that none of the above operations triggered a surface copy on write.
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) == pixels2);
|
||||
// Verify that a flushed draw command will trigger a copy on write on alternateSurface.
|
||||
canvas->clear(SK_ColorWHITE);
|
||||
canvas->flush();
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) != pixels2);
|
||||
|
||||
for (int i = 0; i < cnt; ++i) {
|
||||
#if SK_SUPPORT_GPU
|
||||
if (useGpu) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
}
|
||||
surface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
alternateSurface = SkSurface::NewRenderTarget(context, imageSpec);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
surface = SkSurface::NewRaster(imageSpec);
|
||||
alternateSurface = SkSurface::NewRaster(imageSpec);
|
||||
}
|
||||
SkASSERT(NULL != surface);
|
||||
SkASSERT(NULL != alternateSurface);
|
||||
SkAutoTUnref<SkSurface> aur1(surface);
|
||||
SkAutoTUnref<SkSurface> aur2(alternateSurface);
|
||||
PixelPtr pixels1 = get_surface_ptr(surface, useGpu);
|
||||
PixelPtr pixels2 = get_surface_ptr(alternateSurface, useGpu);
|
||||
SkAutoTUnref<SkDeferredCanvas> canvas(SkDeferredCanvas::Create(surface));
|
||||
SkAutoTUnref<SkImage> image1(canvas->newImageSnapshot());
|
||||
canvas->setSurface(alternateSurface);
|
||||
SkAutoTUnref<SkImage> image2(canvas->newImageSnapshot());
|
||||
REPORTER_ASSERT(reporter, image1->uniqueID() != image2->uniqueID());
|
||||
// Verify that none of the above operations triggered a surface copy on write.
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) == pixels2);
|
||||
// Verify that a flushed draw command will trigger a copy on write on alternateSurface.
|
||||
canvas->clear(SK_ColorWHITE);
|
||||
canvas->flush();
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(surface, useGpu) == pixels1);
|
||||
REPORTER_ASSERT(reporter, get_surface_ptr(alternateSurface, useGpu) != pixels2);
|
||||
}
|
||||
}
|
||||
|
||||
static void TestDeferredCanvasCreateCompatibleDevice(skiatest::Reporter* reporter) {
|
||||
|
@ -147,6 +147,10 @@ bool GrGpuGL::programUnitTest(int maxStages) {
|
||||
dummyDesc.fHeight = 22;
|
||||
SkAutoTUnref<GrTexture> dummyTexture2(this->createTexture(dummyDesc, NULL, 0));
|
||||
|
||||
if (!dummyTexture1 || ! dummyTexture2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static const int NUM_TESTS = 512;
|
||||
|
||||
SkRandom random;
|
||||
|
@ -49,92 +49,100 @@ static void create_layers(skiatest::Reporter* reporter,
|
||||
// locking & unlocking textures).
|
||||
// TODO: need to add checks on VRAM usage!
|
||||
DEF_GPUTEST(GpuLayerCache, reporter, factory) {
|
||||
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
if (NULL == context) {
|
||||
return;
|
||||
}
|
||||
|
||||
SkPictureRecorder recorder;
|
||||
recorder.beginRecording(1, 1);
|
||||
SkAutoTUnref<const SkPicture> picture(recorder.endRecording());
|
||||
|
||||
GrLayerCache cache(context);
|
||||
|
||||
create_layers(reporter, &cache, *picture);
|
||||
|
||||
// Lock the layers making them all 512x512
|
||||
GrTextureDesc desc;
|
||||
desc.fWidth = 512;
|
||||
desc.fHeight = 512;
|
||||
desc.fConfig = kSkia8888_GrPixelConfig;
|
||||
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
|
||||
bool foundInCache = cache.lock(layer, desc);
|
||||
REPORTER_ASSERT(reporter, !foundInCache);
|
||||
foundInCache = cache.lock(layer, desc);
|
||||
REPORTER_ASSERT(reporter, foundInCache);
|
||||
|
||||
REPORTER_ASSERT(reporter, NULL != layer->texture());
|
||||
#if USE_ATLAS
|
||||
// The first 4 layers should be in the atlas (and thus have non-empty
|
||||
// rects)
|
||||
if (i < 4) {
|
||||
REPORTER_ASSERT(reporter, layer->isAtlased());
|
||||
} else {
|
||||
#endif
|
||||
REPORTER_ASSERT(reporter, !layer->isAtlased());
|
||||
#if USE_ATLAS
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Unlock the textures
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
|
||||
cache.unlock(layer);
|
||||
}
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
SkPictureRecorder recorder;
|
||||
recorder.beginRecording(1, 1);
|
||||
SkAutoTUnref<const SkPicture> picture(recorder.endRecording());
|
||||
|
||||
GrLayerCache cache(context);
|
||||
|
||||
create_layers(reporter, &cache, *picture);
|
||||
|
||||
// Lock the layers making them all 512x512
|
||||
GrTextureDesc desc;
|
||||
desc.fWidth = 512;
|
||||
desc.fHeight = 512;
|
||||
desc.fConfig = kSkia8888_GrPixelConfig;
|
||||
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
|
||||
bool foundInCache = cache.lock(layer, desc);
|
||||
REPORTER_ASSERT(reporter, !foundInCache);
|
||||
foundInCache = cache.lock(layer, desc);
|
||||
REPORTER_ASSERT(reporter, foundInCache);
|
||||
|
||||
#if USE_ATLAS
|
||||
// The first 4 layers should be in the atlas (and thus do not
|
||||
// currently unlock). The final layer should be unlocked.
|
||||
if (i < 4) {
|
||||
REPORTER_ASSERT(reporter, NULL != layer->texture());
|
||||
REPORTER_ASSERT(reporter, layer->isAtlased());
|
||||
} else {
|
||||
#if USE_ATLAS
|
||||
// The first 4 layers should be in the atlas (and thus have non-empty
|
||||
// rects)
|
||||
if (i < 4) {
|
||||
REPORTER_ASSERT(reporter, layer->isAtlased());
|
||||
} else {
|
||||
#endif
|
||||
REPORTER_ASSERT(reporter, NULL == layer->texture());
|
||||
REPORTER_ASSERT(reporter, !layer->isAtlased());
|
||||
#if USE_ATLAS
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Unlock the textures
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
|
||||
cache.unlock(layer);
|
||||
}
|
||||
|
||||
for (int i = 0; i < kNumLayers; ++i) {
|
||||
GrCachedLayer* layer = cache.findLayer(picture, i);
|
||||
REPORTER_ASSERT(reporter, NULL != layer);
|
||||
|
||||
#if USE_ATLAS
|
||||
// The first 4 layers should be in the atlas (and thus do not
|
||||
// currently unlock). The final layer should be unlocked.
|
||||
if (i < 4) {
|
||||
REPORTER_ASSERT(reporter, NULL != layer->texture());
|
||||
REPORTER_ASSERT(reporter, layer->isAtlased());
|
||||
} else {
|
||||
#endif
|
||||
REPORTER_ASSERT(reporter, NULL == layer->texture());
|
||||
REPORTER_ASSERT(reporter, !layer->isAtlased());
|
||||
#if USE_ATLAS
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Free them all SkGpuDevice-style. This will not free up the
|
||||
// atlas' texture but will eliminate all the layers.
|
||||
cache.purge(picture);
|
||||
|
||||
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
|
||||
// TODO: add VRAM/resource cache check here
|
||||
#if 0
|
||||
// Re-create the layers
|
||||
create_layers(reporter, &cache, picture);
|
||||
|
||||
// Free them again GrContext-style. This should free up everything.
|
||||
cache.freeAll();
|
||||
|
||||
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
|
||||
// TODO: add VRAM/resource cache check here
|
||||
#endif
|
||||
}
|
||||
|
||||
// Free them all SkGpuDevice-style. This will not free up the
|
||||
// atlas' texture but will eliminate all the layers.
|
||||
cache.purge(picture);
|
||||
|
||||
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
|
||||
// TODO: add VRAM/resource cache check here
|
||||
#if 0
|
||||
// Re-create the layers
|
||||
create_layers(reporter, &cache, picture);
|
||||
|
||||
// Free them again GrContext-style. This should free up everything.
|
||||
cache.freeAll();
|
||||
|
||||
REPORTER_ASSERT(reporter, GetNumLayers::NumLayers(&cache) == 0);
|
||||
// TODO: add VRAM/resource cache check here
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -16,17 +16,17 @@ DEF_GPUTEST(GrContextFactory, reporter, factory) {
|
||||
|
||||
// Before we ask for a context, we expect the GL context to not be there.
|
||||
REPORTER_ASSERT(reporter,
|
||||
NULL == factory->getGLContext(GrContextFactory::kNative_GLContextType));
|
||||
NULL == factory->getGLContext(GrContextFactory::kNull_GLContextType));
|
||||
|
||||
// After we ask for a context, we expect that the GL context to be there.
|
||||
factory->get(GrContextFactory::kNative_GLContextType);
|
||||
factory->get(GrContextFactory::kNull_GLContextType);
|
||||
REPORTER_ASSERT(reporter,
|
||||
factory->getGLContext(GrContextFactory::kNative_GLContextType) != NULL);
|
||||
factory->getGLContext(GrContextFactory::kNull_GLContextType) != NULL);
|
||||
|
||||
// If we did not ask for a context with the particular GL context, we would
|
||||
// expect the particular GL context to not be there.
|
||||
REPORTER_ASSERT(reporter,
|
||||
NULL == factory->getGLContext(GrContextFactory::kNull_GLContextType));
|
||||
NULL == factory->getGLContext(GrContextFactory::kDebug_GLContextType));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -92,17 +92,29 @@ void rasterToGpu(skiatest::Reporter* reporter, GrContext* context) {
|
||||
}
|
||||
|
||||
DEF_GPUTEST(ImageNewShader_GPU, reporter, factory) {
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
|
||||
// GPU -> GPU
|
||||
gpuToGpu(reporter, context);
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// GPU -> RASTER
|
||||
gpuToRaster(reporter, context);
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// GPU -> GPU
|
||||
gpuToGpu(reporter, context);
|
||||
|
||||
// GPU -> RASTER
|
||||
gpuToRaster(reporter, context);
|
||||
|
||||
|
||||
// RASTER -> GPU
|
||||
rasterToGpu(reporter, context);
|
||||
// RASTER -> GPU
|
||||
rasterToGpu(reporter, context);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -780,129 +780,140 @@ static void test_gpu_veto(skiatest::Reporter* reporter) {
|
||||
|
||||
static void test_gpu_picture_optimization(skiatest::Reporter* reporter,
|
||||
GrContextFactory* factory) {
|
||||
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
static const int kWidth = 100;
|
||||
static const int kHeight = 100;
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
|
||||
SkAutoTUnref<SkPicture> pict;
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// create a picture with the structure:
|
||||
// 1)
|
||||
// SaveLayer
|
||||
// Restore
|
||||
// 2)
|
||||
// SaveLayer
|
||||
// Translate
|
||||
// SaveLayer w/ bound
|
||||
// Restore
|
||||
// Restore
|
||||
// 3)
|
||||
// SaveLayer w/ copyable paint
|
||||
// Restore
|
||||
// 4)
|
||||
// SaveLayer w/ non-copyable paint
|
||||
// Restore
|
||||
{
|
||||
SkPictureRecorder recorder;
|
||||
static const int kWidth = 100;
|
||||
static const int kHeight = 100;
|
||||
|
||||
SkCanvas* c = recorder.beginRecording(kWidth, kHeight);
|
||||
SkAutoTUnref<SkPicture> pict;
|
||||
|
||||
// create a picture with the structure:
|
||||
// 1)
|
||||
c->saveLayer(NULL, NULL);
|
||||
c->restore();
|
||||
|
||||
// SaveLayer
|
||||
// Restore
|
||||
// 2)
|
||||
c->saveLayer(NULL, NULL);
|
||||
c->translate(kWidth/2, kHeight/2);
|
||||
SkRect r = SkRect::MakeXYWH(0, 0, kWidth/2, kHeight/2);
|
||||
c->saveLayer(&r, NULL);
|
||||
c->restore();
|
||||
c->restore();
|
||||
|
||||
// SaveLayer
|
||||
// Translate
|
||||
// SaveLayer w/ bound
|
||||
// Restore
|
||||
// Restore
|
||||
// 3)
|
||||
{
|
||||
SkPaint p;
|
||||
p.setColor(SK_ColorRED);
|
||||
c->saveLayer(NULL, &p);
|
||||
c->restore();
|
||||
}
|
||||
// SaveLayer w/ copyable paint
|
||||
// Restore
|
||||
// 4)
|
||||
// TODO: this case will need to be removed once the paint's are immutable
|
||||
// SaveLayer w/ non-copyable paint
|
||||
// Restore
|
||||
{
|
||||
SkPaint p;
|
||||
SkAutoTUnref<SkColorFilter> cf(SkLumaColorFilter::Create());
|
||||
p.setImageFilter(SkColorFilterImageFilter::Create(cf.get()))->unref();
|
||||
c->saveLayer(NULL, &p);
|
||||
SkPictureRecorder recorder;
|
||||
|
||||
SkCanvas* c = recorder.beginRecording(kWidth, kHeight);
|
||||
// 1)
|
||||
c->saveLayer(NULL, NULL);
|
||||
c->restore();
|
||||
|
||||
// 2)
|
||||
c->saveLayer(NULL, NULL);
|
||||
c->translate(kWidth/2, kHeight/2);
|
||||
SkRect r = SkRect::MakeXYWH(0, 0, kWidth/2, kHeight/2);
|
||||
c->saveLayer(&r, NULL);
|
||||
c->restore();
|
||||
c->restore();
|
||||
|
||||
// 3)
|
||||
{
|
||||
SkPaint p;
|
||||
p.setColor(SK_ColorRED);
|
||||
c->saveLayer(NULL, &p);
|
||||
c->restore();
|
||||
}
|
||||
// 4)
|
||||
// TODO: this case will need to be removed once the paint's are immutable
|
||||
{
|
||||
SkPaint p;
|
||||
SkAutoTUnref<SkColorFilter> cf(SkLumaColorFilter::Create());
|
||||
p.setImageFilter(SkColorFilterImageFilter::Create(cf.get()))->unref();
|
||||
c->saveLayer(NULL, &p);
|
||||
c->restore();
|
||||
}
|
||||
|
||||
pict.reset(recorder.endRecording());
|
||||
}
|
||||
|
||||
pict.reset(recorder.endRecording());
|
||||
}
|
||||
// Now test out the SaveLayer extraction
|
||||
{
|
||||
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
|
||||
|
||||
// Now test out the SaveLayer extraction
|
||||
{
|
||||
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
|
||||
SkAutoTUnref<SkSurface> surface(SkSurface::NewScratchRenderTarget(context, info));
|
||||
|
||||
SkAutoTUnref<SkSurface> surface(SkSurface::NewScratchRenderTarget(context, info));
|
||||
SkCanvas* canvas = surface->getCanvas();
|
||||
|
||||
SkCanvas* canvas = surface->getCanvas();
|
||||
canvas->EXPERIMENTAL_optimize(pict);
|
||||
|
||||
canvas->EXPERIMENTAL_optimize(pict);
|
||||
SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey();
|
||||
|
||||
SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey();
|
||||
const SkPicture::AccelData* data = pict->EXPERIMENTAL_getAccelData(key);
|
||||
REPORTER_ASSERT(reporter, NULL != data);
|
||||
|
||||
const SkPicture::AccelData* data = pict->EXPERIMENTAL_getAccelData(key);
|
||||
REPORTER_ASSERT(reporter, NULL != data);
|
||||
const GPUAccelData *gpuData = static_cast<const GPUAccelData*>(data);
|
||||
REPORTER_ASSERT(reporter, 5 == gpuData->numSaveLayers());
|
||||
|
||||
const GPUAccelData *gpuData = static_cast<const GPUAccelData*>(data);
|
||||
REPORTER_ASSERT(reporter, 5 == gpuData->numSaveLayers());
|
||||
|
||||
const GPUAccelData::SaveLayerInfo& info0 = gpuData->saveLayerInfo(0);
|
||||
// The parent/child layer appear in reverse order
|
||||
const GPUAccelData::SaveLayerInfo& info1 = gpuData->saveLayerInfo(2);
|
||||
const GPUAccelData::SaveLayerInfo& info2 = gpuData->saveLayerInfo(1);
|
||||
const GPUAccelData::SaveLayerInfo& info3 = gpuData->saveLayerInfo(3);
|
||||
const GPUAccelData::SaveLayerInfo& info0 = gpuData->saveLayerInfo(0);
|
||||
// The parent/child layer appear in reverse order
|
||||
const GPUAccelData::SaveLayerInfo& info1 = gpuData->saveLayerInfo(2);
|
||||
const GPUAccelData::SaveLayerInfo& info2 = gpuData->saveLayerInfo(1);
|
||||
const GPUAccelData::SaveLayerInfo& info3 = gpuData->saveLayerInfo(3);
|
||||
// const GPUAccelData::SaveLayerInfo& info4 = gpuData->saveLayerInfo(4);
|
||||
|
||||
REPORTER_ASSERT(reporter, info0.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info0.fSize.fWidth && kHeight == info0.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info0.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info0.fOffset.fX && 0 == info0.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info0.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info0.fIsNested && !info0.fHasNestedLayers);
|
||||
REPORTER_ASSERT(reporter, info0.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info0.fSize.fWidth && kHeight == info0.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info0.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info0.fOffset.fX && 0 == info0.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info0.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info0.fIsNested && !info0.fHasNestedLayers);
|
||||
|
||||
REPORTER_ASSERT(reporter, info1.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info1.fSize.fWidth && kHeight == info1.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info1.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info1.fOffset.fX && 0 == info1.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info1.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info1.fIsNested && info1.fHasNestedLayers); // has a nested SL
|
||||
REPORTER_ASSERT(reporter, info1.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info1.fSize.fWidth && kHeight == info1.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info1.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info1.fOffset.fX && 0 == info1.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info1.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info1.fIsNested && info1.fHasNestedLayers); // has a nested SL
|
||||
|
||||
REPORTER_ASSERT(reporter, info2.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth/2 == info2.fSize.fWidth &&
|
||||
kHeight/2 == info2.fSize.fHeight); // bound reduces size
|
||||
REPORTER_ASSERT(reporter, info2.fCTM.isIdentity()); // translated
|
||||
REPORTER_ASSERT(reporter, kWidth/2 == info2.fOffset.fX &&
|
||||
kHeight/2 == info2.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info1.fPaint);
|
||||
REPORTER_ASSERT(reporter, info2.fIsNested && !info2.fHasNestedLayers); // is nested
|
||||
REPORTER_ASSERT(reporter, info2.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth/2 == info2.fSize.fWidth &&
|
||||
kHeight/2 == info2.fSize.fHeight); // bound reduces size
|
||||
REPORTER_ASSERT(reporter, info2.fCTM.isIdentity()); // translated
|
||||
REPORTER_ASSERT(reporter, kWidth/2 == info2.fOffset.fX &&
|
||||
kHeight/2 == info2.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info1.fPaint);
|
||||
REPORTER_ASSERT(reporter, info2.fIsNested && !info2.fHasNestedLayers); // is nested
|
||||
|
||||
REPORTER_ASSERT(reporter, info3.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info3.fSize.fWidth && kHeight == info3.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info3.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info3.fOffset.fX && 0 == info3.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info3.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info3.fIsNested && !info3.fHasNestedLayers);
|
||||
REPORTER_ASSERT(reporter, info3.fValid);
|
||||
REPORTER_ASSERT(reporter, kWidth == info3.fSize.fWidth && kHeight == info3.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, info3.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, 0 == info3.fOffset.fX && 0 == info3.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, NULL != info3.fPaint);
|
||||
REPORTER_ASSERT(reporter, !info3.fIsNested && !info3.fHasNestedLayers);
|
||||
|
||||
#if 0 // needs more though for GrGatherCanvas
|
||||
REPORTER_ASSERT(reporter, !info4.fValid); // paint is/was uncopyable
|
||||
REPORTER_ASSERT(reporter, kWidth == info4.fSize.fWidth && kHeight == info4.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, 0 == info4.fOffset.fX && 0 == info4.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, info4.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, NULL == info4.fPaint); // paint is/was uncopyable
|
||||
REPORTER_ASSERT(reporter, !info4.fIsNested && !info4.fHasNestedLayers);
|
||||
#endif
|
||||
#if 0 // needs more though for GrGatherCanvas
|
||||
REPORTER_ASSERT(reporter, !info4.fValid); // paint is/was uncopyable
|
||||
REPORTER_ASSERT(reporter, kWidth == info4.fSize.fWidth && kHeight == info4.fSize.fHeight);
|
||||
REPORTER_ASSERT(reporter, 0 == info4.fOffset.fX && 0 == info4.fOffset.fY);
|
||||
REPORTER_ASSERT(reporter, info4.fCTM.isIdentity());
|
||||
REPORTER_ASSERT(reporter, NULL == info4.fPaint); // paint is/was uncopyable
|
||||
REPORTER_ASSERT(reporter, !info4.fIsNested && !info4.fHasNestedLayers);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,37 +170,53 @@ static void test_canvaspeek(skiatest::Reporter* reporter,
|
||||
const SkColor color = SK_ColorRED;
|
||||
const SkPMColor pmcolor = SkPreMultiplyColor(color);
|
||||
|
||||
GrContext* context = NULL;
|
||||
int cnt;
|
||||
#if SK_SUPPORT_GPU
|
||||
context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
cnt = GrContextFactory::kGLContextTypeCnt;
|
||||
#else
|
||||
cnt = 1;
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < SK_ARRAY_COUNT(gRec); ++i) {
|
||||
SkImageInfo info, requestInfo;
|
||||
size_t rowBytes;
|
||||
for (int i= 0; i < cnt; ++i) {
|
||||
GrContext* context = NULL;
|
||||
#if SK_SUPPORT_GPU
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
context = factory->get(glCtxType);
|
||||
|
||||
SkAutoTUnref<SkSurface> surface(createSurface(gRec[i].fType, context,
|
||||
&requestInfo));
|
||||
surface->getCanvas()->clear(color);
|
||||
if (NULL == context) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
for (size_t i = 0; i < SK_ARRAY_COUNT(gRec); ++i) {
|
||||
SkImageInfo info, requestInfo;
|
||||
size_t rowBytes;
|
||||
|
||||
const void* addr = surface->getCanvas()->peekPixels(&info, &rowBytes);
|
||||
bool success = (NULL != addr);
|
||||
REPORTER_ASSERT(reporter, gRec[i].fPeekShouldSucceed == success);
|
||||
SkAutoTUnref<SkSurface> surface(createSurface(gRec[i].fType, context,
|
||||
&requestInfo));
|
||||
surface->getCanvas()->clear(color);
|
||||
|
||||
SkImageInfo info2;
|
||||
size_t rb2;
|
||||
const void* addr2 = surface->peekPixels(&info2, &rb2);
|
||||
const void* addr = surface->getCanvas()->peekPixels(&info, &rowBytes);
|
||||
bool success = (NULL != addr);
|
||||
REPORTER_ASSERT(reporter, gRec[i].fPeekShouldSucceed == success);
|
||||
|
||||
if (success) {
|
||||
REPORTER_ASSERT(reporter, requestInfo == info);
|
||||
REPORTER_ASSERT(reporter, requestInfo.minRowBytes() <= rowBytes);
|
||||
REPORTER_ASSERT(reporter, pmcolor == *(const SkPMColor*)addr);
|
||||
SkImageInfo info2;
|
||||
size_t rb2;
|
||||
const void* addr2 = surface->peekPixels(&info2, &rb2);
|
||||
|
||||
REPORTER_ASSERT(reporter, addr2 == addr);
|
||||
REPORTER_ASSERT(reporter, info2 == info);
|
||||
REPORTER_ASSERT(reporter, rb2 == rowBytes);
|
||||
} else {
|
||||
REPORTER_ASSERT(reporter, NULL == addr2);
|
||||
if (success) {
|
||||
REPORTER_ASSERT(reporter, requestInfo == info);
|
||||
REPORTER_ASSERT(reporter, requestInfo.minRowBytes() <= rowBytes);
|
||||
REPORTER_ASSERT(reporter, pmcolor == *(const SkPMColor*)addr);
|
||||
|
||||
REPORTER_ASSERT(reporter, addr2 == addr);
|
||||
REPORTER_ASSERT(reporter, info2 == info);
|
||||
REPORTER_ASSERT(reporter, rb2 == rowBytes);
|
||||
} else {
|
||||
REPORTER_ASSERT(reporter, NULL == addr2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -429,22 +445,28 @@ DEF_GPUTEST(Surface, reporter, factory) {
|
||||
#if SK_SUPPORT_GPU
|
||||
TestGetTexture(reporter, kRaster_SurfaceType, NULL);
|
||||
if (NULL != factory) {
|
||||
GrContext* context = factory->get(GrContextFactory::kNative_GLContextType);
|
||||
if (NULL != context) {
|
||||
TestSurfaceInCache(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceInCache(reporter, kGpuScratch_SurfaceType, context);
|
||||
Test_crbug263329(reporter, kGpu_SurfaceType, context);
|
||||
Test_crbug263329(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceCopyOnWrite(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceCopyOnWrite(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
|
||||
TestGetTexture(reporter, kGpu_SurfaceType, context);
|
||||
TestGetTexture(reporter, kGpuScratch_SurfaceType, context);
|
||||
for (int i= 0; i < GrContextFactory::kGLContextTypeCnt; ++i) {
|
||||
GrContextFactory::GLContextType glCtxType = (GrContextFactory::GLContextType) i;
|
||||
if (!GrContextFactory::IsRenderingGLContext(glCtxType)) {
|
||||
continue;
|
||||
}
|
||||
GrContext* context = factory->get(glCtxType);
|
||||
if (NULL != context) {
|
||||
TestSurfaceInCache(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceInCache(reporter, kGpuScratch_SurfaceType, context);
|
||||
Test_crbug263329(reporter, kGpu_SurfaceType, context);
|
||||
Test_crbug263329(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceCopyOnWrite(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceCopyOnWrite(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpu_SurfaceType, context);
|
||||
TestSurfaceWritableAfterSnapshotRelease(reporter, kGpuScratch_SurfaceType, context);
|
||||
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kDiscard_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpu_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
|
||||
TestSurfaceNoCanvas(reporter, kGpuScratch_SurfaceType, context, SkSurface::kRetain_ContentChangeMode);
|
||||
TestGetTexture(reporter, kGpu_SurfaceType, context);
|
||||
TestGetTexture(reporter, kGpuScratch_SurfaceType, context);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user