Remove GrLayerHoister

This relies on https://codereview.chromium.org/1944013002/ (Add legacy flag to allow Skia to remove Ganesh layer hoister) landing first so as to not break the DEPS roll.

GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1950523002

Review-Url: https://codereview.chromium.org/1950523002
This commit is contained in:
robertphillips 2016-07-13 13:27:16 -07:00 committed by Commit bot
parent e92badc3ff
commit dda54455a2
31 changed files with 17 additions and 3037 deletions

View File

@ -33,8 +33,7 @@ void RecordingBench::onDraw(int loops, SkCanvas*) {
const SkScalar w = fSrc->cullRect().width(),
h = fSrc->cullRect().height();
uint32_t flags = SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag
| SkPictureRecorder::kPlaybackDrawPicture_RecordFlag;
uint32_t flags = SkPictureRecorder::kPlaybackDrawPicture_RecordFlag;
for (int i = 0; i < loops; i++) {
SkPictureRecorder recorder;
fSrc->playback(recorder.beginRecording(w, h, fUseBBH ? &factory : nullptr, flags));

View File

@ -705,11 +705,10 @@ public:
// The SKP we read off disk doesn't have a BBH. Re-record so it grows one.
SkRTreeFactory factory;
SkPictureRecorder recorder;
static const int kFlags = SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag;
pic->playback(recorder.beginRecording(pic->cullRect().width(),
pic->cullRect().height(),
&factory,
fUseMPDs[fCurrentUseMPD] ? kFlags : 0));
0));
pic = recorder.finishRecordingAsPicture();
}
SkString name = SkOSPath::Basename(path.c_str());

View File

@ -59,8 +59,7 @@ static sk_sp<SkPicture> make_hex_plane_picture(SkColor fillColor) {
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kPicWidth),
SkIntToScalar(kPicHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
&bbhFactory);
SkScalar xPos, yPos = 0;
@ -108,8 +107,7 @@ static sk_sp<SkPicture> make_single_layer_hex_plane_picture() {
SkRTreeFactory bbhFactory;
static const SkScalar kBig = 10000.0f;
SkCanvas* canvas = recorder.beginRecording(kBig, kBig, &bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
SkCanvas* canvas = recorder.beginRecording(kBig, kBig, &bbhFactory);
canvas->saveLayer(nullptr, nullptr);
@ -165,8 +163,7 @@ static sk_sp<SkPicture> make_tri_picture() {
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kPicWidth),
SkIntToScalar(kPicHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
&bbhFactory);
SkRect r = tri.getBounds();
r.outset(2.0f, 2.0f); // outset for stroke
canvas->clipRect(r);
@ -185,8 +182,7 @@ static sk_sp<SkPicture> make_sub_picture(const SkPicture* tri) {
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kPicWidth),
SkIntToScalar(kPicHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
&bbhFactory);
canvas->scale(1.0f/2.0f, 1.0f/2.0f);
@ -220,8 +216,7 @@ static sk_sp<SkPicture> make_sierpinski_picture() {
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kPicWidth),
SkIntToScalar(kPicHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
&bbhFactory);
static const int kNumLevels = 4;
for (int i = 0; i < kNumLevels; ++i) {
@ -362,8 +357,7 @@ static void create_content(SkMultiPictureDraw* mpd, PFContentMtd pfGen,
SkCanvas* pictureCanvas = recorder.beginRecording(SkIntToScalar(kPicWidth),
SkIntToScalar(kPicHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
&bbhFactory);
(*pfGen)(pictureCanvas, pictures);

View File

@ -152,7 +152,6 @@
'<(skia_src_path)/core/SkImageCacherator.cpp',
'<(skia_src_path)/core/SkImageGenerator.cpp',
'<(skia_src_path)/core/SkImageGeneratorPriv.h',
'<(skia_src_path)/core/SkLayerInfo.h',
'<(skia_src_path)/core/SkLightingShader.h',
'<(skia_src_path)/core/SkLightingShader.cpp',
'<(skia_src_path)/core/SkLinearBitmapPipeline.cpp',

View File

@ -112,12 +112,6 @@
'<(skia_src_path)/gpu/GrImageIDTextureAdjuster.cpp',
'<(skia_src_path)/gpu/GrImageIDTextureAdjuster.h',
'<(skia_src_path)/gpu/GrInvariantOutput.cpp',
'<(skia_src_path)/gpu/GrLayerAtlas.cpp',
'<(skia_src_path)/gpu/GrLayerAtlas.h',
'<(skia_src_path)/gpu/GrLayerCache.cpp',
'<(skia_src_path)/gpu/GrLayerCache.h',
'<(skia_src_path)/gpu/GrLayerHoister.cpp',
'<(skia_src_path)/gpu/GrLayerHoister.h',
'<(skia_src_path)/gpu/GrMemoryPool.cpp',
'<(skia_src_path)/gpu/GrMemoryPool.h',
'<(skia_src_path)/gpu/GrMesh.h',
@ -154,8 +148,6 @@
'<(skia_src_path)/gpu/GrProcOptInfo.h',
'<(skia_src_path)/gpu/GrGpuResourceRef.cpp',
'<(skia_src_path)/gpu/GrQuad.h',
'<(skia_src_path)/gpu/GrRecordReplaceDraw.cpp',
'<(skia_src_path)/gpu/GrRecordReplaceDraw.h',
'<(skia_src_path)/gpu/GrRect.h',
'<(skia_src_path)/gpu/GrRectanizer.h',
'<(skia_src_path)/gpu/GrRectanizer_pow2.cpp',

View File

@ -310,19 +310,6 @@ protected:
virtual bool onAccessPixels(SkPixmap*) { return false; }
/**
* PRIVATE / EXPERIMENTAL -- do not call
* This entry point gives the backend an opportunity to take over the rendering
* of 'picture'. If optimization data is available (due to an earlier
* 'optimize' call) this entry point should make use of it and return true
* if all rendering has been done. If false is returned, SkCanvas will
* perform its own rendering pass. It is acceptable for the backend
* to perform some device-specific warm up tasks and then let SkCanvas
* perform the main rendering loop (by return false from here).
*/
virtual bool EXPERIMENTAL_drawPicture(SkCanvas*, const SkPicture*, const SkMatrix*,
const SkPaint*);
struct CreateInfo {
static SkPixelGeometry AdjustGeometry(const SkImageInfo&, TileUsage, SkPixelGeometry,
bool preserveLCDText);

View File

@ -31,9 +31,9 @@ public:
~SkPictureRecorder();
enum RecordFlags {
// This flag indicates that, if some BHH is being computed, saveLayer
// information should also be extracted at the same time.
#ifdef SK_SUPPORT_LEGACY_COMPUTESAVELAYER_FLAG
kComputeSaveLayerInfo_RecordFlag = 1 << 0,
#endif
// If you call drawPicture() or drawDrawable() on the recording canvas, this flag forces
// that object to playback its contents immediately rather than reffing the object.

View File

@ -30,7 +30,6 @@ class GrDrawContext;
class GrFragmentProcessor;
class GrGpu;
class GrIndexBuffer;
class GrLayerCache;
class GrOvalRenderer;
class GrPath;
class GrPipelineBuilder;
@ -346,7 +345,6 @@ public:
GrGpu* getGpu() { return fGpu; }
const GrGpu* getGpu() const { return fGpu; }
GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
GrLayerCache* getLayerCache() { return fLayerCache.get(); }
GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
bool abandoned() const;
GrResourceProvider* resourceProvider() { return fResourceProvider; }
@ -402,7 +400,6 @@ private:
SkAutoTUnref<GrContextThreadSafeProxy> fThreadSafeProxy;
GrBatchFontCache* fBatchFontCache;
SkAutoTDelete<GrLayerCache> fLayerCache;
SkAutoTDelete<GrTextBlobCache> fTextBlobCache;
// Set by OverbudgetCB() to request that GrContext flush before exiting a draw.

View File

@ -16,14 +16,12 @@ SkBigPicture::SkBigPicture(const SkRect& cull,
SkRecord* record,
SnapshotArray* drawablePicts,
SkBBoxHierarchy* bbh,
AccelData* accelData,
size_t approxBytesUsedBySubPictures)
: fCullRect(cull)
, fApproxBytesUsedBySubPictures(approxBytesUsedBySubPictures)
, fRecord(record) // Take ownership of caller's ref.
, fDrawablePicts(drawablePicts) // Take ownership.
, fBBH(bbh) // Take ownership of caller's ref.
, fAccelData(accelData) // Take ownership of caller's ref.
{}
void SkBigPicture::playback(SkCanvas* canvas, AbortCallback* callback) const {

View File

@ -20,9 +20,6 @@ class SkRecord;
// An implementation of SkPicture supporting an arbitrary number of drawing commands.
class SkBigPicture final : public SkPicture {
public:
// AccelData provides a base class for device-specific acceleration data.
class AccelData : public SkRefCnt { };
// An array of refcounted const SkPicture pointers.
class SnapshotArray : ::SkNoncopyable {
public:
@ -40,7 +37,6 @@ public:
SkRecord*, // We take ownership of the caller's ref.
SnapshotArray*, // We take exclusive ownership.
SkBBoxHierarchy*, // We take ownership of the caller's ref.
AccelData*, // We take ownership of the caller's ref.
size_t approxBytesUsedBySubPictures);
@ -60,7 +56,6 @@ public:
// Used by GrRecordReplaceDraw
const SkBBoxHierarchy* bbh() const { return fBBH; }
const SkRecord* record() const { return fRecord; }
const AccelData* accelData() const { return fAccelData; }
private:
struct Analysis {
@ -84,7 +79,6 @@ private:
SkAutoTUnref<const SkRecord> fRecord;
SkAutoTDelete<const SnapshotArray> fDrawablePicts;
SkAutoTUnref<const SkBBoxHierarchy> fBBH;
SkAutoTUnref<const AccelData> fAccelData;
};
#endif//SkBigPicture_DEFINED

View File

@ -3012,15 +3012,6 @@ void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
}
}
SkBaseDevice* device = this->getTopDevice();
if (device) {
// Canvas has to first give the device the opportunity to render
// the picture itself.
if (device->EXPERIMENTAL_drawPicture(this, picture, matrix, paint)) {
return; // the device has rendered the entire picture
}
}
SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
picture->playback(this);
}

View File

@ -258,12 +258,6 @@ bool SkBaseDevice::onReadPixels(const SkImageInfo&, void*, size_t, int x, int y)
return false;
}
bool SkBaseDevice::EXPERIMENTAL_drawPicture(SkCanvas*, const SkPicture*, const SkMatrix*,
const SkPaint*) {
// The base class doesn't perform any accelerated picture rendering
return false;
}
bool SkBaseDevice::accessPixels(SkPixmap* pmap) {
SkPixmap tempStorage;
if (nullptr == pmap) {

View File

@ -1,87 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkLayerInfo_DEFINED
#define SkLayerInfo_DEFINED
#include "SkBigPicture.h"
#include "SkMatrix.h"
#include "SkPaint.h"
#include "SkTArray.h"
// This class stores information about the saveLayer/restore pairs found
// within an SkPicture. It is used by Ganesh to perform layer hoisting.
class SkLayerInfo : public SkBigPicture::AccelData {
public:
// Information about a given saveLayer/restore block in an SkPicture
class BlockInfo {
public:
BlockInfo() : fPicture(nullptr), fPaint(nullptr), fKey(nullptr), fKeySize(0) {}
~BlockInfo() {
SkSafeUnref(fPicture);
delete fPaint;
delete[] fKey;
}
// The picture owning the layer. If the owning picture is the top-most
// one (i.e., the picture for which this SkLayerInfo was created) then
// this pointer is nullptr. If it is a nested picture then the pointer
// is non-nullptr and owns a ref on the picture.
const SkPicture* fPicture;
// The device space bounds of this layer.
SkRect fBounds;
// If not-empty, the optional bounds parameter passed in to the saveLayer
// call.
SkRect fSrcBounds;
// The pre-matrix begins as the identity and accumulates the transforms
// of the containing SkPictures (if any). This matrix state has to be
// part of the initial matrix during replay so that it will be
// preserved across setMatrix calls.
SkMatrix fPreMat;
// The matrix state (in the leaf picture) in which this layer's draws
// must occur. It will/can be overridden by setMatrix calls in the
// layer itself. It does not include the translation needed to map the
// layer's top-left point to the origin (which must be part of the
// initial matrix).
SkMatrix fLocalMat;
// The paint to use on restore. Can be nullptr since it is optional.
const SkPaint* fPaint;
// The index of this saveLayer in the picture.
size_t fSaveLayerOpID;
// The index of the matching restore in the picture.
size_t fRestoreOpID;
// True if this saveLayer has at least one other saveLayer nested within it.
// False otherwise.
bool fHasNestedLayers;
// True if this saveLayer is nested within another. False otherwise.
bool fIsNested;
// The variable length key for this saveLayer block. It stores the
// thread of drawPicture and saveLayer operation indices that lead to this
// saveLayer (including its own op index). The BlockInfo owns this memory.
int* fKey;
int fKeySize; // # of ints
};
SkLayerInfo() {}
BlockInfo& addBlock() { return fBlocks.push_back(); }
int numBlocks() const { return fBlocks.count(); }
const BlockInfo& block(int index) const {
SkASSERT(index < fBlocks.count());
return fBlocks[index];
}
private:
SkTArray<BlockInfo, true> fBlocks;
typedef SkBigPicture::AccelData INHERITED;
};
#endif // SkLayerInfo_DEFINED

View File

@ -5,23 +5,12 @@
* found in the LICENSE file.
*/
// Need to include something before #if SK_SUPPORT_GPU so that the Android
// framework build, which gets its defines from SkTypes rather than a makefile,
// has the definition before checking it.
#include "SkCanvas.h"
#include "SkCanvasPriv.h"
#include "SkMultiPictureDraw.h"
#include "SkPicture.h"
#include "SkTaskGroup.h"
#if SK_SUPPORT_GPU
#include "GrContext.h"
#include "GrDrawContext.h"
#include "GrLayerHoister.h"
#include "GrRecordReplaceDraw.h"
#include "GrRenderTarget.h"
#endif
void SkMultiPictureDraw::DrawData::draw() {
fCanvas->drawPicture(fPicture, &fMatrix, fPaint);
}
@ -108,102 +97,14 @@ void SkMultiPictureDraw::draw(bool flush) {
return;
}
#if !defined(SK_IGNORE_GPU_LAYER_HOISTING) && SK_SUPPORT_GPU
GrContext* context = fGPUDrawData[0].fCanvas->getGrContext();
SkASSERT(context);
// Start by collecting all the layers that are going to be atlased and render
// them (if necessary). Hoisting the free floating layers is deferred until
// drawing the canvas that requires them.
SkTDArray<GrHoistedLayer> atlasedNeedRendering, atlasedRecycled;
GrLayerHoister::Begin(context);
for (int i = 0; i < count; ++i) {
const DrawData& data = fGPUDrawData[i];
// we only expect 1 context for all the canvases
SkASSERT(data.fCanvas->getGrContext() == context);
if (!data.fPaint &&
(kRGBA_8888_SkColorType == data.fCanvas->imageInfo().colorType() ||
kBGRA_8888_SkColorType == data.fCanvas->imageInfo().colorType())) {
SkRect clipBounds;
if (!data.fCanvas->getClipBounds(&clipBounds)) {
continue;
}
SkMatrix initialMatrix = data.fCanvas->getTotalMatrix();
initialMatrix.preConcat(data.fMatrix);
GrDrawContext* dc = data.fCanvas->internal_private_accessTopLayerDrawContext();
SkASSERT(dc);
// TODO: sorting the cacheable layers from smallest to largest
// would improve the packing and reduce the number of swaps
// TODO: another optimization would be to make a first pass to
// lock any required layer that is already in the atlas
GrLayerHoister::FindLayersToAtlas(context, data.fPicture, initialMatrix,
clipBounds,
&atlasedNeedRendering, &atlasedRecycled,
dc->numColorSamples());
}
}
GrLayerHoister::DrawLayersToAtlas(context, atlasedNeedRendering);
SkTDArray<GrHoistedLayer> needRendering, recycled;
#endif
for (int i = 0; i < count; ++i) {
const DrawData& data = fGPUDrawData[i];
SkCanvas* canvas = data.fCanvas;
const SkPicture* picture = data.fPicture;
#if !defined(SK_IGNORE_GPU_LAYER_HOISTING) && SK_SUPPORT_GPU
if (!data.fPaint) {
SkRect clipBounds;
if (!canvas->getClipBounds(&clipBounds)) {
continue;
}
SkAutoCanvasMatrixPaint acmp(canvas, &data.fMatrix, data.fPaint, picture->cullRect());
const SkMatrix initialMatrix = canvas->getTotalMatrix();
GrDrawContext* dc = data.fCanvas->internal_private_accessTopLayerDrawContext();
SkASSERT(dc);
// Find the layers required by this canvas. It will return atlased
// layers in the 'recycled' list since they have already been drawn.
GrLayerHoister::FindLayersToHoist(context, picture, initialMatrix,
clipBounds, &needRendering, &recycled,
dc->numColorSamples());
GrLayerHoister::DrawLayers(context, needRendering);
// Render the entire picture using new layers
GrRecordReplaceDraw(picture, canvas, context->getLayerCache(),
initialMatrix, nullptr);
GrLayerHoister::UnlockLayers(context, needRendering);
GrLayerHoister::UnlockLayers(context, recycled);
needRendering.rewind();
recycled.rewind();
} else
#endif
{
canvas->drawPicture(picture, &data.fMatrix, data.fPaint);
}
canvas->drawPicture(picture, &data.fMatrix, data.fPaint);
if (flush) {
canvas->flush();
}
}
#if !defined(SK_IGNORE_GPU_LAYER_HOISTING) && SK_SUPPORT_GPU
GrLayerHoister::UnlockLayers(context, atlasedNeedRendering);
GrLayerHoister::UnlockLayers(context, atlasedRecycled);
GrLayerHoister::End(context);
#endif
}

View File

@ -8,7 +8,6 @@
#include "SkBigPicture.h"
#include "SkData.h"
#include "SkDrawable.h"
#include "SkLayerInfo.h"
#include "SkPictureRecorder.h"
#include "SkPictureUtils.h"
#include "SkRecord.h"
@ -71,23 +70,13 @@ sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPicture(uint32_t finishFlag
}
}
SkAutoTUnref<SkLayerInfo> saveLayerData;
if (fBBH && (fFlags & kComputeSaveLayerInfo_RecordFlag)) {
saveLayerData.reset(new SkLayerInfo);
}
SkDrawableList* drawableList = fRecorder->getDrawableList();
SkBigPicture::SnapshotArray* pictList =
drawableList ? drawableList->newDrawableSnapshot() : nullptr;
if (fBBH.get()) {
SkAutoTMalloc<SkRect> bounds(fRecord->count());
if (saveLayerData) {
SkRecordComputeLayers(fCullRect, *fRecord, bounds, pictList, saveLayerData);
} else {
SkRecordFillBounds(fCullRect, *fRecord, bounds);
}
SkRecordFillBounds(fCullRect, *fRecord, bounds);
fBBH->insert(bounds, fRecord->count());
// Now that we've calculated content bounds, we can update fCullRect, often trimming it.
@ -103,7 +92,7 @@ sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPicture(uint32_t finishFlag
subPictureBytes += SkPictureUtils::ApproximateBytesUsed(pictList->begin()[i]);
}
return sk_make_sp<SkBigPicture>(fCullRect, fRecord.release(), pictList, fBBH.release(),
saveLayerData.release(), subPictureBytes);
subPictureBytes);
}
sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPictureWithCull(const SkRect& cullRect,
@ -148,8 +137,7 @@ sk_sp<SkDrawable> SkPictureRecorder::finishRecordingAsDrawable(uint32_t finishFl
}
sk_sp<SkDrawable> drawable =
sk_make_sp<SkRecordedDrawable>(fRecord, fBBH, fRecorder->detachDrawableList(), fCullRect,
SkToBool(fFlags & kComputeSaveLayerInfo_RecordFlag));
sk_make_sp<SkRecordedDrawable>(fRecord, fBBH, fRecorder->detachDrawableList(), fCullRect);
// release our refs now, so only the drawable will be the owner.
fRecord.reset(nullptr);

View File

@ -5,7 +5,6 @@
* found in the LICENSE file.
*/
#include "SkLayerInfo.h"
#include "SkRecordDraw.h"
#include "SkPatchUtils.h"
@ -605,203 +604,6 @@ private:
SkTDArray<int> fControlIndices;
};
// SkRecord visitor to gather saveLayer/restore information.
class CollectLayers : SkNoncopyable {
public:
CollectLayers(const SkRect& cullRect, const SkRecord& record, SkRect bounds[],
const SkBigPicture::SnapshotArray* pictList, SkLayerInfo* accelData)
: fSaveLayersInStack(0)
, fAccelData(accelData)
, fPictList(pictList)
, fFillBounds(cullRect, record, bounds)
{}
void cleanUp() {
// fFillBounds must perform its cleanUp first so that all the bounding
// boxes associated with unbalanced restores are updated (prior to
// fetching their bound in popSaveLayerInfo).
fFillBounds.cleanUp();
while (!fSaveLayerStack.isEmpty()) {
this->popSaveLayerInfo();
}
}
void setCurrentOp(int currentOp) { fFillBounds.setCurrentOp(currentOp); }
template <typename T> void operator()(const T& op) {
fFillBounds(op);
this->trackSaveLayers(op);
}
private:
struct SaveLayerInfo {
SaveLayerInfo() { }
SaveLayerInfo(int opIndex, bool isSaveLayer, const SkRect* bounds, const SkPaint* paint)
: fStartIndex(opIndex)
, fIsSaveLayer(isSaveLayer)
, fHasNestedSaveLayer(false)
, fBounds(bounds ? *bounds : SkRect::MakeEmpty())
, fPaint(paint) {
}
int fStartIndex;
bool fIsSaveLayer;
bool fHasNestedSaveLayer;
SkRect fBounds;
const SkPaint* fPaint;
};
template <typename T> void trackSaveLayers(const T& op) {
/* most ops aren't involved in saveLayers */
}
void trackSaveLayers(const Save& s) { this->pushSaveLayerInfo(false, nullptr, nullptr); }
void trackSaveLayers(const SaveLayer& sl) { this->pushSaveLayerInfo(true, sl.bounds, sl.paint); }
void trackSaveLayers(const Restore& r) { this->popSaveLayerInfo(); }
void trackSaveLayersForPicture(const SkPicture* picture, const SkPaint* paint) {
// For sub-pictures, we wrap their layer information within the parent
// picture's rendering hierarchy
const SkLayerInfo* childData = nullptr;
if (const SkBigPicture* bp = picture->asSkBigPicture()) {
childData = static_cast<const SkLayerInfo*>(bp->accelData());
}
if (!childData) {
// If the child layer hasn't been generated with saveLayer data we
// assume the worst (i.e., that it does contain layers which nest
// inside existing layers). Layers within sub-pictures that don't
// have saveLayer data cannot be hoisted.
// TODO: could the analysis data be use to fine tune this?
this->updateStackForSaveLayer();
return;
}
for (int i = 0; i < childData->numBlocks(); ++i) {
const SkLayerInfo::BlockInfo& src = childData->block(i);
FillBounds::Bounds newBound = fFillBounds.adjustAndMap(src.fBounds, paint);
if (newBound.isEmpty()) {
continue;
}
this->updateStackForSaveLayer();
SkLayerInfo::BlockInfo& dst = fAccelData->addBlock();
// If src.fPicture is nullptr the layer is in dp.picture; otherwise
// it belongs to a sub-picture.
dst.fPicture = src.fPicture ? src.fPicture : picture;
dst.fPicture->ref();
dst.fBounds = newBound;
dst.fSrcBounds = src.fSrcBounds;
dst.fLocalMat = src.fLocalMat;
dst.fPreMat = src.fPreMat;
dst.fPreMat.postConcat(fFillBounds.ctm());
if (src.fPaint) {
dst.fPaint = new SkPaint(*src.fPaint);
}
dst.fSaveLayerOpID = src.fSaveLayerOpID;
dst.fRestoreOpID = src.fRestoreOpID;
dst.fHasNestedLayers = src.fHasNestedLayers;
dst.fIsNested = fSaveLayersInStack > 0 || src.fIsNested;
// Store 'saveLayer ops from enclosing picture' + drawPict op + 'ops from sub-picture'
dst.fKeySize = fSaveLayerOpStack.count() + src.fKeySize + 1;
dst.fKey = new int[dst.fKeySize];
sk_careful_memcpy(dst.fKey, fSaveLayerOpStack.begin(),
fSaveLayerOpStack.count() * sizeof(int));
dst.fKey[fSaveLayerOpStack.count()] = fFillBounds.currentOp();
memcpy(&dst.fKey[fSaveLayerOpStack.count()+1], src.fKey, src.fKeySize * sizeof(int));
}
}
void trackSaveLayers(const DrawPicture& dp) {
this->trackSaveLayersForPicture(dp.picture, dp.paint);
}
void trackSaveLayers(const DrawDrawable& dp) {
SkASSERT(fPictList);
SkASSERT(dp.index >= 0 && dp.index < fPictList->count());
const SkPaint* paint = nullptr; // drawables don't get a side-car paint
this->trackSaveLayersForPicture(fPictList->begin()[dp.index], paint);
}
// Inform all the saveLayers already on the stack that they now have a
// nested saveLayer inside them
void updateStackForSaveLayer() {
for (int index = fSaveLayerStack.count() - 1; index >= 0; --index) {
if (fSaveLayerStack[index].fHasNestedSaveLayer) {
break;
}
fSaveLayerStack[index].fHasNestedSaveLayer = true;
if (fSaveLayerStack[index].fIsSaveLayer) {
break;
}
}
}
void pushSaveLayerInfo(bool isSaveLayer, const SkRect* bounds, const SkPaint* paint) {
if (isSaveLayer) {
this->updateStackForSaveLayer();
++fSaveLayersInStack;
fSaveLayerOpStack.push(fFillBounds.currentOp());
}
fSaveLayerStack.push(SaveLayerInfo(fFillBounds.currentOp(), isSaveLayer, bounds, paint));
}
void popSaveLayerInfo() {
if (fSaveLayerStack.count() <= 0) {
SkASSERT(false);
return;
}
SkASSERT(fSaveLayersInStack == fSaveLayerOpStack.count());
SaveLayerInfo sli;
fSaveLayerStack.pop(&sli);
if (!sli.fIsSaveLayer) {
return;
}
--fSaveLayersInStack;
SkLayerInfo::BlockInfo& block = fAccelData->addBlock();
SkASSERT(nullptr == block.fPicture); // This layer is in the top-most picture
block.fBounds = fFillBounds.getBounds(sli.fStartIndex);
block.fLocalMat = fFillBounds.ctm();
block.fPreMat = SkMatrix::I();
if (sli.fPaint) {
block.fPaint = new SkPaint(*sli.fPaint);
}
block.fSrcBounds = sli.fBounds;
block.fSaveLayerOpID = sli.fStartIndex;
block.fRestoreOpID = fFillBounds.currentOp();
block.fHasNestedLayers = sli.fHasNestedSaveLayer;
block.fIsNested = fSaveLayersInStack > 0;
block.fKeySize = fSaveLayerOpStack.count();
block.fKey = new int[block.fKeySize];
memcpy(block.fKey, fSaveLayerOpStack.begin(), block.fKeySize * sizeof(int));
fSaveLayerOpStack.pop();
}
// Used to collect saveLayer information for layer hoisting
int fSaveLayersInStack;
SkTDArray<SaveLayerInfo> fSaveLayerStack;
// The op code indices of all the currently active saveLayers
SkTDArray<int> fSaveLayerOpStack;
SkLayerInfo* fAccelData;
const SkBigPicture::SnapshotArray* fPictList;
SkRecords::FillBounds fFillBounds;
};
} // namespace SkRecords
void SkRecordFillBounds(const SkRect& cullRect, const SkRecord& record, SkRect bounds[]) {
@ -813,12 +615,3 @@ void SkRecordFillBounds(const SkRect& cullRect, const SkRecord& record, SkRect b
visitor.cleanUp();
}
void SkRecordComputeLayers(const SkRect& cullRect, const SkRecord& record, SkRect bounds[],
const SkBigPicture::SnapshotArray* pictList, SkLayerInfo* data) {
SkRecords::CollectLayers visitor(cullRect, record, bounds, pictList, data);
for (int curOp = 0; curOp < record.count(); curOp++) {
visitor.setCurrentOp(curOp);
record.visit(curOp, visitor);
}
visitor.cleanUp();
}

View File

@ -5,7 +5,6 @@
* found in the LICENSE file.
*/
#include "SkLayerInfo.h"
#include "SkMatrix.h"
#include "SkPictureData.h"
#include "SkPicturePlayback.h"
@ -33,15 +32,6 @@ SkPicture* SkRecordedDrawable::onNewPictureSnapshot() {
pictList = fDrawableList->newDrawableSnapshot();
}
SkAutoTUnref<SkLayerInfo> saveLayerData;
if (fBBH && fDoSaveLayerInfo) {
// TODO: can we avoid work by not allocating / filling these bounds?
SkAutoTMalloc<SkRect> scratchBounds(fRecord->count());
saveLayerData.reset(new SkLayerInfo);
SkRecordComputeLayers(fBounds, *fRecord, scratchBounds, pictList, saveLayerData);
}
size_t subPictureBytes = 0;
for (int i = 0; pictList && i < pictList->count(); i++) {
subPictureBytes += SkPictureUtils::ApproximateBytesUsed(pictList->begin()[i]);
@ -49,7 +39,7 @@ SkPicture* SkRecordedDrawable::onNewPictureSnapshot() {
// SkBigPicture will take ownership of a ref on both fRecord and fBBH.
// We're not willing to give up our ownership, so we must ref them for SkPicture.
return new SkBigPicture(fBounds, SkRef(fRecord.get()), pictList, SkSafeRef(fBBH.get()),
saveLayerData.release(), subPictureBytes);
subPictureBytes);
}
void SkRecordedDrawable::flatten(SkWriteBuffer& buffer) const {

View File

@ -13,12 +13,11 @@
class SkRecordedDrawable : public SkDrawable {
public:
SkRecordedDrawable(SkRecord* record, SkBBoxHierarchy* bbh, SkDrawableList* drawableList,
const SkRect& bounds, bool doSaveLayerInfo)
const SkRect& bounds)
: fRecord(SkRef(record))
, fBBH(SkSafeRef(bbh))
, fDrawableList(drawableList) // we take ownership
, fBounds(bounds)
, fDoSaveLayerInfo(doSaveLayerInfo)
{}
void flatten(SkWriteBuffer& buffer) const override;
@ -39,5 +38,4 @@ private:
SkAutoTUnref<SkBBoxHierarchy> fBBH;
SkAutoTDelete<SkDrawableList> fDrawableList;
const SkRect fBounds;
const bool fDoSaveLayerInfo;
};

View File

@ -9,7 +9,6 @@
#include "GrContextOptions.h"
#include "GrDrawingManager.h"
#include "GrDrawContext.h"
#include "GrLayerCache.h"
#include "GrResourceCache.h"
#include "GrResourceProvider.h"
#include "GrSoftwarePathRenderer.h"
@ -88,8 +87,6 @@ void GrContext::initCommon(const GrContextOptions& options) {
fResourceCache->setOverBudgetCallback(OverBudgetCB, this);
fResourceProvider = new GrResourceProvider(fGpu, fResourceCache, &fSingleOwner);
fLayerCache.reset(new GrLayerCache(this));
fDidTestPMConversions = false;
GrDrawTarget::Options dtOptions;
@ -152,7 +149,6 @@ void GrContext::abandonContext() {
fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
fBatchFontCache->freeAll();
fLayerCache->freeAll();
fTextBlobCache->freeAll();
}
@ -171,7 +167,6 @@ void GrContext::releaseResourcesAndAbandonContext() {
fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
fBatchFontCache->freeAll();
fLayerCache->freeAll();
fTextBlobCache->freeAll();
}
@ -186,7 +181,6 @@ void GrContext::freeGpuResources() {
this->flush();
fBatchFontCache->freeAll();
fLayerCache->freeAll();
fDrawingManager->freeGpuResources();

View File

@ -1,168 +0,0 @@
/*
* Copyright 2010 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrGpuResourcePriv.h"
#include "GrLayerAtlas.h"
#include "GrRectanizer.h"
#include "GrTextureProvider.h"
///////////////////////////////////////////////////////////////////////////////
GrLayerAtlas::Plot::Plot()
: fID(-1)
, fRects(nullptr) {
fOffset.set(0, 0);
}
GrLayerAtlas::Plot::~Plot() {
delete fRects;
}
void GrLayerAtlas::Plot::init(int id, int offX, int offY, int width, int height) {
fID = id;
fRects = GrRectanizer::Factory(width, height);
fOffset.set(offX * width, offY * height);
}
bool GrLayerAtlas::Plot::allocateRect(int width, int height, SkIPoint16* loc) {
if (!fRects->addRect(width, height, loc)) {
return false;
}
loc->fX += fOffset.fX;
loc->fY += fOffset.fY;
return true;
}
void GrLayerAtlas::Plot::reset() {
SkASSERT(fRects);
fRects->reset();
}
///////////////////////////////////////////////////////////////////////////////
GR_DECLARE_STATIC_UNIQUE_KEY(gLayerAtlasKey);
static const GrUniqueKey& get_layer_atlas_key() {
GR_DEFINE_STATIC_UNIQUE_KEY(gLayerAtlasKey);
return gLayerAtlasKey;
}
bool GrLayerAtlas::reattachBackingTexture() {
SkASSERT(!fTexture);
fTexture.reset(fTexProvider->findAndRefTextureByUniqueKey(get_layer_atlas_key()));
return fTexture != nullptr;
}
void GrLayerAtlas::createBackingTexture() {
SkASSERT(!fTexture);
GrSurfaceDesc desc;
desc.fFlags = fFlags;
desc.fWidth = fBackingTextureSize.width();
desc.fHeight = fBackingTextureSize.height();
desc.fConfig = fPixelConfig;
fTexture.reset(fTexProvider->createTexture(desc, SkBudgeted::kYes, nullptr, 0));
fTexture->resourcePriv().setUniqueKey(get_layer_atlas_key());
}
GrLayerAtlas::GrLayerAtlas(GrTextureProvider* texProvider, GrPixelConfig config,
GrSurfaceFlags flags,
const SkISize& backingTextureSize,
int numPlotsX, int numPlotsY) {
fTexProvider = texProvider;
fPixelConfig = config;
fFlags = flags;
fBackingTextureSize = backingTextureSize;
int textureWidth = fBackingTextureSize.width();
int textureHeight = fBackingTextureSize.height();
int plotWidth = textureWidth / numPlotsX;
int plotHeight = textureHeight / numPlotsY;
SkASSERT(plotWidth * numPlotsX == textureWidth);
SkASSERT(plotHeight * numPlotsY == textureHeight);
// We currently do not support compressed atlases...
SkASSERT(!GrPixelConfigIsCompressed(config));
// set up allocated plots
fPlotArray = new Plot[numPlotsX * numPlotsY];
Plot* currPlot = fPlotArray;
for (int y = numPlotsY-1; y >= 0; --y) {
for (int x = numPlotsX-1; x >= 0; --x) {
currPlot->init(y*numPlotsX+x, x, y, plotWidth, plotHeight);
// build LRU list
fPlotList.addToHead(currPlot);
++currPlot;
}
}
}
void GrLayerAtlas::resetPlots() {
PlotIter iter;
for (Plot* plot = iter.init(fPlotList, PlotIter::kHead_IterStart); plot; plot = iter.next()) {
plot->reset();
}
}
GrLayerAtlas::~GrLayerAtlas() {
delete[] fPlotArray;
}
void GrLayerAtlas::makeMRU(Plot* plot) {
if (fPlotList.head() == plot) {
return;
}
fPlotList.remove(plot);
fPlotList.addToHead(plot);
};
GrLayerAtlas::Plot* GrLayerAtlas::addToAtlas(ClientPlotUsage* usage,
int width, int height, SkIPoint16* loc) {
// Iterate through the plots currently being used by this client and see if we can find a hole.
// The last one was most recently added and probably most empty.
// We want to consolidate the uses from individual clients to the same plot(s) so that
// when a specific client goes away they are more likely to completely empty a plot.
for (int i = usage->numPlots()-1; i >= 0; --i) {
Plot* plot = usage->plot(i);
if (plot->allocateRect(width, height, loc)) {
this->makeMRU(plot);
return plot;
}
}
// before we get a new plot, make sure we have a backing texture
if (nullptr == fTexture) {
this->createBackingTexture();
if (nullptr == fTexture) {
return nullptr;
}
}
// Now look through all allocated plots for one we can share, in MRU order
// TODO: its seems like traversing from emptiest to fullest would make more sense
PlotList::Iter plotIter;
plotIter.init(fPlotList, PlotList::Iter::kHead_IterStart);
Plot* plot;
while ((plot = plotIter.get())) {
if (plot->allocateRect(width, height, loc)) {
this->makeMRU(plot);
// new plot for atlas, put at end of array
usage->appendPlot(plot);
return plot;
}
plotIter.next();
}
// If the above fails, then the current plot list has no room
return nullptr;
}

View File

@ -1,157 +0,0 @@
/*
* Copyright 2010 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrLayerAtlas_DEFINED
#define GrLayerAtlas_DEFINED
#include "GrTexture.h"
#include "SkPoint.h"
#include "SkTDArray.h"
#include "SkTInternalLList.h"
class GrLayerAtlas;
class GrTextureProvider;
class GrRectanizer;
// The backing GrTexture for a GrLayerAtlas is broken into a spatial grid of Plots. When
// the atlas needs space on the texture (i.e., in response to an addToAtlas call), it
// iterates through the plots in use by the requesting client looking for space and,
// if no space is found, opens up a new Plot for that client. The Plots keep track of
// subimage placement via their GrRectanizer.
//
// If all Plots are full, the replacement strategy is up to the client. The Plot::reset
// call will remove a Plot's knowledge of any allocated rects - freeing its space for reuse.
class GrLayerAtlas {
public:
class Plot {
SK_DECLARE_INTERNAL_LLIST_INTERFACE(Plot); // In an MRU llist
public:
// This returns a plot ID unique to each plot in the atlas. They are
// consecutive and start at 0.
int id() const { return fID; }
void reset();
private:
friend class GrLayerAtlas;
Plot();
~Plot(); // does not try to delete the fNext field
void init(int id, int offX, int offY, int width, int height);
bool allocateRect(int width, int height, SkIPoint16*);
int fID;
GrRectanizer* fRects;
SkIPoint16 fOffset; // the offset of the plot in the backing texture
};
// This class allows each client to independently track the Plots in
// which its data is stored.
// For example, multiple pictures may simultaneously store their layers in the
// layer atlas. When a picture goes away it can use the ClientPlotUsage to remove itself
// from those plots.
class ClientPlotUsage {
public:
ClientPlotUsage(int maxPlots)
SkDEBUGCODE(: fMaxPlots(maxPlots)) {
fPlots.setReserve(maxPlots);
}
bool isEmpty() const { return 0 == fPlots.count(); }
int numPlots() const { return fPlots.count(); }
Plot* plot(int index) { return fPlots[index]; }
void appendPlot(Plot* plot) {
SkASSERT(fPlots.count() <= fMaxPlots);
SkASSERT(!fPlots.contains(plot));
*fPlots.append() = plot;
}
// remove reference to 'plot'
void removePlot(const Plot* plot) {
int index = fPlots.find(const_cast<Plot*>(plot));
if (index >= 0) {
fPlots.remove(index);
}
}
#ifdef SK_DEBUG
bool contains(const Plot* plot) const {
return fPlots.contains(const_cast<Plot*>(plot));
}
#endif
private:
SkTDArray<Plot*> fPlots;
SkDEBUGCODE(int fMaxPlots;)
};
GrLayerAtlas(GrTextureProvider*, GrPixelConfig, GrSurfaceFlags flags,
const SkISize& backingTextureSize,
int numPlotsX, int numPlotsY);
~GrLayerAtlas();
// Requests a width x height block in the atlas. Upon success it returns
// the containing Plot and absolute location in the backing texture.
// nullptr is returned if there is no more space in the atlas.
Plot* addToAtlas(ClientPlotUsage*, int width, int height, SkIPoint16* loc);
GrTexture* getTextureOrNull() const {
return fTexture;
}
GrTexture* getTexture() const {
SkASSERT(fTexture);
return fTexture;
}
bool reattachBackingTexture();
void detachBackingTexture() {
fTexture.reset(nullptr);
}
void resetPlots();
enum IterOrder {
kLRUFirst_IterOrder,
kMRUFirst_IterOrder
};
typedef SkTInternalLList<Plot> PlotList;
typedef PlotList::Iter PlotIter;
Plot* iterInit(PlotIter* iter, IterOrder order) {
return iter->init(fPlotList, kLRUFirst_IterOrder == order
? PlotList::Iter::kTail_IterStart
: PlotList::Iter::kHead_IterStart);
}
private:
void createBackingTexture();
void makeMRU(Plot* plot);
GrTextureProvider* fTexProvider;
GrPixelConfig fPixelConfig;
GrSurfaceFlags fFlags;
SkAutoTUnref<GrTexture> fTexture;
SkISize fBackingTextureSize;
// allocated array of Plots
Plot* fPlotArray;
// LRU list of Plots (MRU at head - LRU at tail)
PlotList fPlotList;
};
#endif

View File

@ -1,557 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrLayerAtlas.h"
#include "GrContext.h"
#include "GrDrawContext.h"
#include "GrGpu.h"
#include "GrLayerCache.h"
#include "GrSurfacePriv.h"
#ifdef SK_DEBUG
void GrCachedLayer::validate(const GrTexture* backingTexture) const {
SkASSERT(SK_InvalidGenID != fKey.pictureID());
if (fTexture) {
// If the layer is in some texture then it must occupy some rectangle
SkASSERT(!fRect.isEmpty());
if (!this->isAtlased()) {
// If it isn't atlased then the rectangle should start at the origin
SkASSERT(0.0f == fRect.fLeft && 0.0f == fRect.fTop);
}
} else {
SkASSERT(fRect.isEmpty());
SkASSERT(nullptr == fPlot);
SkASSERT(!fLocked); // layers without a texture cannot be locked
SkASSERT(!fAtlased); // can't be atlased if it doesn't have a texture
}
if (fPlot) {
SkASSERT(fAtlased);
// If a layer has a plot (i.e., is atlased) then it must point to
// the backing texture. Additionally, its rect should be non-empty.
SkASSERT(fTexture && backingTexture == fTexture);
SkASSERT(!fRect.isEmpty());
}
if (fLocked) {
// If a layer is locked it must have a texture (though it need not be
// the atlas-backing texture) and occupy some space.
SkASSERT(fTexture);
SkASSERT(!fRect.isEmpty());
}
// Unfortunately there is a brief time where a layer can be locked
// but not used, so we can only check the "used implies locked"
// invariant.
if (fUses > 0) {
SkASSERT(fLocked);
} else {
SkASSERT(0 == fUses);
}
}
class GrAutoValidateLayer : ::SkNoncopyable {
public:
GrAutoValidateLayer(GrTexture* backingTexture, const GrCachedLayer* layer)
: fBackingTexture(backingTexture)
, fLayer(layer) {
if (fLayer) {
fLayer->validate(backingTexture);
}
}
~GrAutoValidateLayer() {
if (fLayer) {
fLayer->validate(fBackingTexture);
}
}
void setBackingTexture(GrTexture* backingTexture) {
SkASSERT(nullptr == fBackingTexture || fBackingTexture == backingTexture);
fBackingTexture = backingTexture;
}
private:
const GrTexture* fBackingTexture;
const GrCachedLayer* fLayer;
};
#endif
GrLayerCache::GrLayerCache(GrContext* context)
: fContext(context) {
memset(fPlotLocks, 0, sizeof(fPlotLocks));
}
GrLayerCache::~GrLayerCache() {
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
GrCachedLayer* layer = &(*iter);
SkASSERT(0 == layer->uses());
this->unlock(layer);
delete layer;
}
SkASSERT(0 == fPictureHash.count());
// The atlas only lets go of its texture when the atlas is deleted.
fAtlas.reset();
}
void GrLayerCache::initAtlas() {
SkASSERT(nullptr == fAtlas.get());
GR_STATIC_ASSERT(kNumPlotsX*kNumPlotsX == GrPictureInfo::kNumPlots);
SkISize textureSize = SkISize::Make(kAtlasTextureWidth, kAtlasTextureHeight);
fAtlas.reset(new GrLayerAtlas(fContext->textureProvider(), kSkia8888_GrPixelConfig,
kRenderTarget_GrSurfaceFlag, textureSize,
kNumPlotsX, kNumPlotsY));
}
void GrLayerCache::freeAll() {
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
GrCachedLayer* layer = &(*iter);
this->unlock(layer);
delete layer;
}
fLayerHash.rewind();
if (fAtlas) {
fAtlas->resetPlots();
fAtlas->detachBackingTexture();
}
}
GrCachedLayer* GrLayerCache::createLayer(uint32_t pictureID,
int start, int stop,
const SkIRect& srcIR,
const SkIRect& dstIR,
const SkMatrix& initialMat,
const int* key,
int keySize,
const SkPaint* paint) {
SkASSERT(pictureID != SK_InvalidGenID && start >= 0 && stop > 0);
GrCachedLayer* layer = new GrCachedLayer(pictureID, start, stop, srcIR, dstIR, initialMat, key,
keySize, paint);
fLayerHash.add(layer);
return layer;
}
GrCachedLayer* GrLayerCache::findLayer(uint32_t pictureID, const SkMatrix& initialMat,
const int* key, int keySize) {
SkASSERT(pictureID != SK_InvalidGenID);
return fLayerHash.find(GrCachedLayer::Key(pictureID, initialMat, key, keySize));
}
GrCachedLayer* GrLayerCache::findLayerOrCreate(uint32_t pictureID,
int start, int stop,
const SkIRect& srcIR,
const SkIRect& dstIR,
const SkMatrix& initialMat,
const int* key,
int keySize,
const SkPaint* paint) {
SkASSERT(pictureID != SK_InvalidGenID && start >= 0 && stop > 0);
GrCachedLayer* layer = fLayerHash.find(GrCachedLayer::Key(pictureID, initialMat, key, keySize));
if (nullptr == layer) {
layer = this->createLayer(pictureID, start, stop,
srcIR, dstIR, initialMat,
key, keySize, paint);
}
return layer;
}
bool GrLayerCache::tryToAtlas(GrCachedLayer* layer,
const GrSurfaceDesc& desc,
bool* needsRendering) {
SkDEBUGCODE(GrAutoValidateLayer avl(fAtlas ? fAtlas->getTextureOrNull() : nullptr, layer);)
SkASSERT(PlausiblyAtlasable(desc.fWidth, desc.fHeight));
SkASSERT(0 == desc.fSampleCnt);
if (layer->locked()) {
// This layer is already locked
SkASSERT(fAtlas);
SkASSERT(layer->isAtlased());
SkASSERT(layer->rect().width() == desc.fWidth);
SkASSERT(layer->rect().height() == desc.fHeight);
*needsRendering = false;
return true;
}
if (layer->isAtlased()) {
SkASSERT(fAtlas);
// Hooray it is still in the atlas - make sure it stays there
layer->setLocked(true);
this->incPlotLock(layer->plot()->id());
*needsRendering = false;
return true;
} else {
if (!fAtlas) {
this->initAtlas();
if (!fAtlas) {
return false;
}
}
// Not in the atlas - will it fit?
GrPictureInfo* pictInfo = fPictureHash.find(layer->pictureID());
if (nullptr == pictInfo) {
pictInfo = new GrPictureInfo(layer->pictureID());
fPictureHash.add(pictInfo);
}
SkIPoint16 loc;
for (int i = 0; i < 2; ++i) { // extra pass in case we fail to add but are able to purge
GrLayerAtlas::Plot* plot = fAtlas->addToAtlas(&pictInfo->fPlotUsage,
desc.fWidth, desc.fHeight,
&loc);
// addToAtlas can allocate the backing texture
SkDEBUGCODE(avl.setBackingTexture(fAtlas->getTexture()));
if (plot) {
#if !GR_CACHE_HOISTED_LAYERS
pictInfo->incPlotUsage(plot->id());
#endif
// The layer was successfully added to the atlas
const SkIRect bounds = SkIRect::MakeXYWH(loc.fX, loc.fY,
desc.fWidth, desc.fHeight);
layer->setTexture(fAtlas->getTexture(), bounds, true);
layer->setPlot(plot);
layer->setLocked(true);
this->incPlotLock(layer->plot()->id());
*needsRendering = true;
return true;
}
// The layer was rejected by the atlas (even though we know it is
// plausibly atlas-able). See if a plot can be purged and try again.
if (!this->purgePlots(true)) {
break; // We weren't able to purge any plots
}
}
if (pictInfo->fPlotUsage.isEmpty()) {
fPictureHash.remove(pictInfo->fPictureID);
delete pictInfo;
}
}
return false;
}
bool GrLayerCache::lock(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering) {
if (layer->locked()) {
// This layer is already locked
*needsRendering = false;
return true;
}
// TODO: make the test for exact match depend on the image filters themselves
SkAutoTUnref<GrTexture> tex;
if (layer->fFilter) {
tex.reset(fContext->textureProvider()->createTexture(desc, SkBudgeted::kYes));
} else {
tex.reset(fContext->textureProvider()->createApproxTexture(desc));
}
if (!tex) {
return false;
}
layer->setTexture(tex, SkIRect::MakeWH(desc.fWidth, desc.fHeight), false);
layer->setLocked(true);
*needsRendering = true;
return true;
}
void GrLayerCache::unlock(GrCachedLayer* layer) {
SkDEBUGCODE(GrAutoValidateLayer avl(fAtlas ? fAtlas->getTextureOrNull() : nullptr, layer);)
if (nullptr == layer || !layer->locked()) {
// invalid or not locked
return;
}
if (layer->isAtlased()) {
const int plotID = layer->plot()->id();
this->decPlotLock(plotID);
// At this point we could aggressively clear out un-locked plots but
// by delaying we may be able to reuse some of the atlased layers later.
#if !GR_CACHE_HOISTED_LAYERS
// This testing code aggressively removes the atlased layers. This
// can be used to separate the performance contribution of less
// render target pingponging from that due to the re-use of cached layers
GrPictureInfo* pictInfo = fPictureHash.find(layer->pictureID());
SkASSERT(pictInfo);
pictInfo->decPlotUsage(plotID);
if (0 == pictInfo->plotUsage(plotID)) {
pictInfo->fPlotUsage.removePlot(layer->plot());
if (pictInfo->fPlotUsage.isEmpty()) {
fPictureHash.remove(pictInfo->fPictureID);
delete pictInfo;
}
}
layer->setPlot(nullptr);
layer->setTexture(nullptr, SkIRect::MakeEmpty(), false);
#endif
} else {
layer->setTexture(nullptr, SkIRect::MakeEmpty(), false);
}
layer->setLocked(false);
}
#ifdef SK_DEBUG
void GrLayerCache::validate() const {
int plotLocks[kNumPlotsX * kNumPlotsY];
memset(plotLocks, 0, sizeof(plotLocks));
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::ConstIter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
const GrCachedLayer* layer = &(*iter);
layer->validate(fAtlas.get() ? fAtlas->getTextureOrNull() : nullptr);
const GrPictureInfo* pictInfo = fPictureHash.find(layer->pictureID());
if (!pictInfo) {
// If there is no picture info for this picture then all of its
// layers should be non-atlased.
SkASSERT(!layer->isAtlased());
}
if (layer->plot()) {
SkASSERT(pictInfo);
SkASSERT(pictInfo->fPictureID == layer->pictureID());
SkASSERT(pictInfo->fPlotUsage.contains(layer->plot()));
#if !GR_CACHE_HOISTED_LAYERS
SkASSERT(pictInfo->plotUsage(layer->plot()->id()) > 0);
#endif
if (layer->locked()) {
plotLocks[layer->plot()->id()]++;
}
}
}
for (int i = 0; i < kNumPlotsX*kNumPlotsY; ++i) {
SkASSERT(plotLocks[i] == fPlotLocks[i]);
}
}
class GrAutoValidateCache : ::SkNoncopyable {
public:
explicit GrAutoValidateCache(GrLayerCache* cache)
: fCache(cache) {
fCache->validate();
}
~GrAutoValidateCache() {
fCache->validate();
}
private:
GrLayerCache* fCache;
};
#endif
void GrLayerCache::purge(uint32_t pictureID) {
SkDEBUGCODE(GrAutoValidateCache avc(this);)
// We need to find all the layers associated with 'picture' and remove them.
SkTDArray<GrCachedLayer*> toBeRemoved;
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
if (pictureID == (*iter).pictureID()) {
*toBeRemoved.append() = &(*iter);
}
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
SkASSERT(0 == toBeRemoved[i]->uses());
this->unlock(toBeRemoved[i]);
fLayerHash.remove(GrCachedLayer::GetKey(*toBeRemoved[i]));
delete toBeRemoved[i];
}
GrPictureInfo* pictInfo = fPictureHash.find(pictureID);
if (pictInfo) {
fPictureHash.remove(pictureID);
delete pictInfo;
}
}
bool GrLayerCache::purgePlots(bool justOne) {
SkDEBUGCODE(GrAutoValidateCache avc(this);)
SkASSERT(fAtlas);
bool anyPurged = false;
GrLayerAtlas::PlotIter iter;
GrLayerAtlas::Plot* plot;
for (plot = fAtlas->iterInit(&iter, GrLayerAtlas::kLRUFirst_IterOrder);
plot;
plot = iter.prev()) {
if (fPlotLocks[plot->id()] > 0) {
continue;
}
anyPurged = true;
this->purgePlot(plot);
if (justOne) {
break;
}
}
return anyPurged;
}
void GrLayerCache::purgePlot(GrLayerAtlas::Plot* plot) {
SkASSERT(0 == fPlotLocks[plot->id()]);
// We need to find all the layers in 'plot' and remove them.
SkTDArray<GrCachedLayer*> toBeRemoved;
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
if (plot == (*iter).plot()) {
*toBeRemoved.append() = &(*iter);
}
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
SkASSERT(0 == toBeRemoved[i]->uses());
SkASSERT(!toBeRemoved[i]->locked());
uint32_t pictureIDToRemove = toBeRemoved[i]->pictureID();
// Aggressively remove layers and, if it becomes totally uncached, delete the picture info
fLayerHash.remove(GrCachedLayer::GetKey(*toBeRemoved[i]));
delete toBeRemoved[i];
GrPictureInfo* pictInfo = fPictureHash.find(pictureIDToRemove);
if (pictInfo) {
#if !GR_CACHE_HOISTED_LAYERS
SkASSERT(0 == pictInfo->plotUsage(plot->id()));
#endif
pictInfo->fPlotUsage.removePlot(plot);
if (pictInfo->fPlotUsage.isEmpty()) {
fPictureHash.remove(pictInfo->fPictureID);
delete pictInfo;
}
}
}
plot->reset();
}
#if !GR_CACHE_HOISTED_LAYERS
void GrLayerCache::purgeAll() {
if (!fAtlas) {
return;
}
this->purgePlots(false); // clear them all out
SkASSERT(0 == fPictureHash.count());
if (fAtlas->getTextureOrNull()) {
sk_sp<GrDrawContext> drawContext(
fContext->drawContext(sk_ref_sp(fAtlas->getTexture()->asRenderTarget())));
if (drawContext) {
drawContext->discard();
}
}
}
#endif
void GrLayerCache::begin() {
if (!fAtlas) {
return;
}
if (!fAtlas->reattachBackingTexture()) {
// We weren't able to re-attach. Clear out all the atlased layers.
this->purgePlots(false);
SkASSERT(0 == fPictureHash.count());
}
#ifdef SK_DEBUG
else {
// we've reattached - everything had better make sense
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
GrCachedLayer* layer = &(*iter);
if (layer->isAtlased()) {
SkASSERT(fAtlas->getTexture() == layer->texture());
}
}
}
#endif
}
void GrLayerCache::end() {
if (!fAtlas) {
return;
}
// Adding this call will clear out all the layers in the atlas
//this->purgePlots(false);
fAtlas->detachBackingTexture();
}
void GrLayerCache::processDeletedPictures() {
SkTArray<SkPicture::DeletionMessage> deletedPictures;
fPictDeletionInbox.poll(&deletedPictures);
for (int i = 0; i < deletedPictures.count(); i++) {
this->purge(deletedPictures[i].fUniqueID);
}
}
#ifdef SK_DEBUG
void GrLayerCache::writeLayersToDisk(const SkString& dirName) {
if (fAtlas) {
GrTexture* atlasTexture = fAtlas->getTextureOrNull();
if (nullptr != atlasTexture) {
SkString fileName(dirName);
fileName.append("\\atlas.png");
atlasTexture->surfacePriv().savePixels(fileName.c_str());
}
}
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
GrCachedLayer* layer = &(*iter);
if (layer->isAtlased() || !layer->texture()) {
continue;
}
SkString fileName(dirName);
fileName.appendf("\\%d", layer->fKey.pictureID());
for (int i = 0; i < layer->fKey.keySize(); ++i) {
fileName.appendf("-%d", layer->fKey.key()[i]);
}
fileName.appendf(".png");
layer->texture()->surfacePriv().savePixels(fileName.c_str());
}
}
#endif

View File

@ -1,432 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrLayerCache_DEFINED
#define GrLayerCache_DEFINED
#include "GrLayerAtlas.h"
#include "GrTexture.h"
#include "GrRect.h"
#include "SkChecksum.h"
#include "SkImageFilter.h"
#include "SkMessageBus.h"
#include "SkPaint.h"
#include "SkPicture.h"
#include "SkTDynamicHash.h"
// Set to 0 to disable caching of hoisted layers
#define GR_CACHE_HOISTED_LAYERS 0
// GrPictureInfo stores the atlas plots used by a single picture. A single
// plot may be used to store layers from multiple pictures.
struct GrPictureInfo {
public:
static const int kNumPlots = 4;
// for SkTDynamicHash - just use the pictureID as the hash key
static const uint32_t& GetKey(const GrPictureInfo& pictInfo) { return pictInfo.fPictureID; }
static uint32_t Hash(const uint32_t& key) { return SkChecksum::Mix(key); }
// GrPictureInfo proper
GrPictureInfo(uint32_t pictureID)
: fPictureID(pictureID)
, fPlotUsage(kNumPlots) {
#if !GR_CACHE_HOISTED_LAYERS
memset(fPlotUses, 0, sizeof(fPlotUses));
#endif
}
#if !GR_CACHE_HOISTED_LAYERS
void incPlotUsage(int plotID) {
SkASSERT(plotID < kNumPlots);
fPlotUses[plotID]++;
}
void decPlotUsage(int plotID) {
SkASSERT(plotID < kNumPlots);
SkASSERT(fPlotUses[plotID] > 0);
fPlotUses[plotID]--;
}
int plotUsage(int plotID) const {
SkASSERT(plotID < kNumPlots);
return fPlotUses[plotID];
}
#endif
const uint32_t fPictureID;
GrLayerAtlas::ClientPlotUsage fPlotUsage;
#if !GR_CACHE_HOISTED_LAYERS
private:
int fPlotUses[kNumPlots];
#endif
};
// GrCachedLayer encapsulates the caching information for a single saveLayer.
//
// Atlased layers get a ref to the backing GrTexture while non-atlased layers
// get a ref to the GrTexture in which they reside. In both cases 'fRect'
// contains the layer's extent in its texture.
// Atlased layers also get a pointer to the plot in which they reside.
// For non-atlased layers, the lock field just corresponds to locking in
// the resource cache. For atlased layers, it implements an additional level
// of locking to allow atlased layers to be reused multiple times.
struct GrCachedLayer {
public:
// For SkTDynamicHash
struct Key {
Key(uint32_t pictureID, const SkMatrix& initialMat,
const int* key, int keySize, bool copyKey = false)
: fKeySize(keySize)
, fFreeKey(copyKey) {
fIDMatrix.fPictureID = pictureID;
fIDMatrix.fInitialMat = initialMat;
fIDMatrix.fInitialMat.getType(); // force initialization of type so hashes match
if (copyKey) {
int* tempKey = new int[keySize];
memcpy(tempKey, key, keySize*sizeof(int));
fKey = tempKey;
} else {
fKey = key;
}
// The pictureID/matrix portion needs to be tightly packed.
GR_STATIC_ASSERT(sizeof(IDMatrix) == sizeof(uint32_t)+ // pictureID
9 * sizeof(SkScalar) + sizeof(uint32_t)); // matrix
}
~Key() {
if (fFreeKey) {
delete[] fKey;
}
}
bool operator==(const Key& other) const {
if (fKeySize != other.fKeySize) {
return false;
}
return fIDMatrix.fPictureID == other.fIDMatrix.fPictureID &&
fIDMatrix.fInitialMat.cheapEqualTo(other.fIDMatrix.fInitialMat) &&
!memcmp(fKey, other.fKey, fKeySize * sizeof(int));
}
uint32_t pictureID() const { return fIDMatrix.fPictureID; }
// TODO: remove these when GrCachedLayer & ReplacementInfo fuse
const int* key() const { SkASSERT(fFreeKey); return fKey; }
int keySize() const { SkASSERT(fFreeKey); return fKeySize; }
uint32_t hash() const {
uint32_t hash = SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(fKey),
fKeySize * sizeof(int));
return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&fIDMatrix),
sizeof(IDMatrix), hash);
}
private:
struct IDMatrix {
// ID of the picture of which this layer is a part
uint32_t fPictureID;
// The initial matrix passed into drawPicture
SkMatrix fInitialMat;
} fIDMatrix;
const int* fKey;
const int fKeySize;
bool fFreeKey;
};
static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; }
static uint32_t Hash(const Key& key) { return key.hash(); }
// GrCachedLayer proper
GrCachedLayer(uint32_t pictureID,
int start,
int stop,
const SkIRect& srcIR,
const SkIRect& dstIR,
const SkMatrix& ctm,
const int* key,
int keySize,
const SkPaint* paint)
: fKey(pictureID, ctm, key, keySize, true)
, fStart(start)
, fStop(stop)
, fSrcIR(srcIR)
, fDstIR(dstIR)
, fOffset(SkIPoint::Make(0, 0))
, fPaint(paint ? new SkPaint(*paint) : nullptr)
, fFilter(nullptr)
, fTexture(nullptr)
, fAtlased(false)
, fRect(SkIRect::MakeEmpty())
, fPlot(nullptr)
, fUses(0)
, fLocked(false) {
SkASSERT(SK_InvalidGenID != pictureID);
if (fPaint) {
if (fPaint->getImageFilter()) {
fFilter = SkSafeRef(fPaint->getImageFilter());
fPaint->setImageFilter(nullptr);
}
}
}
~GrCachedLayer() {
if (!fAtlased) {
SkSafeUnref(fTexture);
}
SkSafeUnref(fFilter);
delete fPaint;
}
uint32_t pictureID() const { return fKey.pictureID(); }
// TODO: remove these when GrCachedLayer & ReplacementInfo fuse
const int* key() const { return fKey.key(); }
int keySize() const { return fKey.keySize(); }
int start() const { return fStart; }
// TODO: make bound debug only
const SkIRect& srcIR() const { return fSrcIR; }
const SkIRect& dstIR() const { return fDstIR; }
int stop() const { return fStop; }
void setTexture(GrTexture* texture, const SkIRect& rect, bool atlased) {
if (texture && !atlased) {
texture->ref(); // non-atlased textures carry a ref
}
if (fTexture && !fAtlased) {
fTexture->unref(); // non-atlased textures carry a ref
}
fTexture = texture;
fAtlased = atlased;
fRect = rect;
if (!fTexture) {
fLocked = false;
}
}
GrTexture* texture() { return fTexture; }
const SkPaint* paint() const { return fPaint; }
const SkImageFilter* filter() const { return fFilter; }
const SkIRect& rect() const { return fRect; }
void setOffset(const SkIPoint& offset) { fOffset = offset; }
const SkIPoint& offset() const { return fOffset; }
void setPlot(GrLayerAtlas::Plot* plot) {
SkASSERT(nullptr == plot || nullptr == fPlot);
fPlot = plot;
}
GrLayerAtlas::Plot* plot() { return fPlot; }
bool isAtlased() const { SkASSERT(fAtlased == SkToBool(fPlot)); return fAtlased; }
void setLocked(bool locked) { fLocked = locked; }
bool locked() const { return fLocked; }
SkDEBUGCODE(const GrLayerAtlas::Plot* plot() const { return fPlot; })
SkDEBUGCODE(void validate(const GrTexture* backingTexture) const;)
private:
const Key fKey;
// The "saveLayer" operation index of the cached layer
const int fStart;
// The final "restore" operation index of the cached layer
const int fStop;
// The layer's src rect (i.e., the portion of the source scene required
// for filtering).
const SkIRect fSrcIR;
// The layer's dest rect (i.e., where it will land in device space)
const SkIRect fDstIR;
// Offset sometimes required by image filters
SkIPoint fOffset;
// The paint used when dropping the layer down into the owning canvas.
// Can be nullptr. This class makes a copy for itself.
SkPaint* fPaint;
// The imagefilter that needs to be applied to the layer prior to it being
// composited with the rest of the scene.
const SkImageFilter* fFilter;
// fTexture is a ref on the atlasing texture for atlased layers and a
// ref on a GrTexture for non-atlased textures.
GrTexture* fTexture;
// true if this layer is in the atlas (and 'fTexture' doesn't carry a ref)
// and false if the layer is a free floater (and carries a ref).
bool fAtlased;
// For both atlased and non-atlased layers 'fRect' contains the bound of
// the layer in whichever texture it resides. It is empty when 'fTexture'
// is nullptr.
SkIRect fRect;
// For atlased layers, fPlot stores the atlas plot in which the layer rests.
// It is always nullptr for non-atlased layers.
GrLayerAtlas::Plot* fPlot;
// The number of actively hoisted layers using this cached image (e.g.,
// extant GrHoistedLayers pointing at this object). This object will
// be unlocked when the use count reaches 0.
int fUses;
// For non-atlased layers 'fLocked' should always match "fTexture".
// (i.e., if there is a texture it is locked).
// For atlased layers, 'fLocked' is true if the layer is in a plot and
// actively required for rendering. If the layer is in a plot but not
// actively required for rendering, then 'fLocked' is false. If the
// layer isn't in a plot then is can never be locked.
bool fLocked;
void addUse() { ++fUses; }
void removeUse() { SkASSERT(fUses > 0); --fUses; }
int uses() const { return fUses; }
friend class GrLayerCache; // for access to usage methods
friend class TestingAccess; // for testing
};
// The GrLayerCache caches pre-computed saveLayers for later rendering.
// Non-atlased layers are stored in their own GrTexture while the atlased
// layers share a single GrTexture.
// Unlike the GrFontCache, the GrLayerCache only has one atlas (for 8888).
// As such, the GrLayerCache roughly combines the functionality of the
// GrFontCache and GrTextStrike classes.
class GrLayerCache {
public:
GrLayerCache(GrContext*);
~GrLayerCache();
// As a cache, the GrLayerCache can be ordered to free up all its cached
// elements by the GrContext
void freeAll();
GrCachedLayer* findLayer(uint32_t pictureID, const SkMatrix& ctm,
const int* key, int keySize);
GrCachedLayer* findLayerOrCreate(uint32_t pictureID,
int start, int stop,
const SkIRect& srcIR,
const SkIRect& dstIR,
const SkMatrix& initialMat,
const int* key, int keySize,
const SkPaint* paint);
// Attempt to place 'layer' in the atlas. Return true on success; false on failure.
// When true is returned, 'needsRendering' will indicate if the layer must be (re)drawn.
// Additionally, the GPU resources will be locked.
bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering);
// Attempt to lock the GPU resources required for a layer. Return true on success;
// false on failure. When true is returned 'needsRendering' will indicate if the
// layer must be (re)drawn.
// Note that atlased layers should already have been locked and rendered so only
// free floating layers will have 'needsRendering' set.
// Currently, this path always uses a new scratch texture for non-Atlased layers
// and (thus) doesn't cache anything. This can yield a lot of re-rendering.
// TODO: allow rediscovery of free-floating layers that are still in the resource cache.
bool lock(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needsRendering);
// addUse is just here to keep the API symmetric
void addUse(GrCachedLayer* layer) { layer->addUse(); }
void removeUse(GrCachedLayer* layer) {
layer->removeUse();
if (layer->uses() == 0) {
// If no one cares about the layer allow it to be recycled.
this->unlock(layer);
}
}
// Cleanup after any SkPicture deletions
void processDeletedPictures();
SkDEBUGCODE(void validate() const;)
#ifdef SK_DEBUG
void writeLayersToDisk(const SkString& dirName);
#endif
static bool PlausiblyAtlasable(int width, int height) {
return width <= kPlotWidth && height <= kPlotHeight;
}
void begin();
void end();
#if !GR_CACHE_HOISTED_LAYERS
void purgeAll();
#endif
private:
static const int kAtlasTextureWidth = 1024;
static const int kAtlasTextureHeight = 1024;
static const int kNumPlotsX = 2;
static const int kNumPlotsY = 2;
static const int kPlotWidth = kAtlasTextureWidth / kNumPlotsX;
static const int kPlotHeight = kAtlasTextureHeight / kNumPlotsY;
GrContext* fContext; // pointer back to owning context
SkAutoTDelete<GrLayerAtlas> fAtlas; // lazily allocated
// We cache this information here (rather then, say, on the owning picture)
// because we want to be able to clean it up as needed (e.g., if a picture
// is leaked and never cleans itself up we still want to be able to
// remove the GrPictureInfo once its layers are purged from all the atlas
// plots).
SkTDynamicHash<GrPictureInfo, uint32_t> fPictureHash;
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key> fLayerHash;
SkMessageBus<SkPicture::DeletionMessage>::Inbox fPictDeletionInbox;
// This implements a plot-centric locking mechanism (since the atlas
// backing texture is always locked). Each layer that is locked (i.e.,
// needed for the current rendering) in a plot increments the plot lock
// count for that plot. Similarly, once a rendering is complete all the
// layers used in it decrement the lock count for the used plots.
// Plots with a 0 lock count are open for recycling/purging.
int fPlotLocks[kNumPlotsX * kNumPlotsY];
// Inform the cache that layer's cached image is not currently required
void unlock(GrCachedLayer* layer);
void initAtlas();
GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop,
const SkIRect& srcIR, const SkIRect& dstIR,
const SkMatrix& initialMat,
const int* key, int keySize,
const SkPaint* paint);
// Remove all the layers (and unlock any resources) associated with 'pictureID'
void purge(uint32_t pictureID);
void purgePlot(GrLayerAtlas::Plot* plot);
// Either purge all un-locked plots or just one. Return true if >= 1 plot
// was purged; false otherwise.
bool purgePlots(bool justOne);
void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; }
void decPlotLock(int plotIdx) {
SkASSERT(fPlotLocks[plotIdx] > 0);
--fPlotLocks[plotIdx];
}
// for testing
friend class TestingAccess;
int numLayers() const { return fLayerHash.count(); }
};
#endif

View File

@ -1,402 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBigPicture.h"
#include "SkCanvas.h"
#include "SkImageFilterCache.h"
#include "SkLayerInfo.h"
#include "SkRecordDraw.h"
#include "SkSpecialImage.h"
#include "SkSurface.h"
#include "GrLayerHoister.h"
#if !defined(SK_IGNORE_GPU_LAYER_HOISTING) && SK_SUPPORT_GPU
#include "GrContext.h"
#include "GrLayerCache.h"
#include "GrRecordReplaceDraw.h"
// Create the layer information for the hoisted layer and secure the
// required texture/render target resources.
static void prepare_for_hoisting(GrLayerCache* layerCache,
const SkPicture* topLevelPicture,
const SkMatrix& initialMat,
const SkLayerInfo::BlockInfo& info,
const SkIRect& srcIR,
const SkIRect& dstIR,
SkTDArray<GrHoistedLayer>* needRendering,
SkTDArray<GrHoistedLayer>* recycled,
bool attemptToAtlas,
int numSamples) {
const SkPicture* pict = info.fPicture ? info.fPicture : topLevelPicture;
GrCachedLayer* layer = layerCache->findLayerOrCreate(topLevelPicture->uniqueID(),
SkToInt(info.fSaveLayerOpID),
SkToInt(info.fRestoreOpID),
srcIR,
dstIR,
initialMat,
info.fKey,
info.fKeySize,
info.fPaint);
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = srcIR.width();
desc.fHeight = srcIR.height();
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fSampleCnt = numSamples;
bool locked, needsRendering;
if (attemptToAtlas) {
locked = layerCache->tryToAtlas(layer, desc, &needsRendering);
} else {
locked = layerCache->lock(layer, desc, &needsRendering);
}
if (!locked) {
// GPU resources could not be secured for the hoisting of this layer
return;
}
if (attemptToAtlas) {
SkASSERT(layer->isAtlased());
}
GrHoistedLayer* hl;
if (needsRendering) {
if (!attemptToAtlas) {
SkASSERT(!layer->isAtlased());
}
hl = needRendering->append();
} else {
hl = recycled->append();
}
layerCache->addUse(layer);
hl->fLayer = layer;
hl->fPicture = pict;
hl->fLocalMat = info.fLocalMat;
hl->fInitialMat = initialMat;
hl->fPreMat = initialMat;
hl->fPreMat.preConcat(info.fPreMat);
}
// Compute the source rect and return false if it is empty.
static bool compute_source_rect(const SkLayerInfo::BlockInfo& info, const SkMatrix& initialMat,
const SkIRect& dstIR, SkIRect* srcIR) {
SkIRect clipBounds = dstIR;
SkMatrix totMat = initialMat;
totMat.preConcat(info.fPreMat);
totMat.preConcat(info.fLocalMat);
if (info.fPaint && info.fPaint->getImageFilter()) {
clipBounds = info.fPaint->getImageFilter()->filterBounds(clipBounds, totMat);
}
if (!info.fSrcBounds.isEmpty()) {
SkRect r;
totMat.mapRect(&r, info.fSrcBounds);
r.roundOut(srcIR);
if (!srcIR->intersect(clipBounds)) {
return false;
}
} else {
*srcIR = clipBounds;
}
return true;
}
// Atlased layers must be small enough to fit in the atlas, not have a
// paint with an image filter and be neither nested nor nesting.
// TODO: allow leaf nested layers to appear in the atlas.
void GrLayerHoister::FindLayersToAtlas(GrContext* context,
const SkPicture* topLevelPicture,
const SkMatrix& initialMat,
const SkRect& query,
SkTDArray<GrHoistedLayer>* atlased,
SkTDArray<GrHoistedLayer>* recycled,
int numSamples) {
if (0 != numSamples) {
// MSAA layers are currently never atlased
return;
}
GrLayerCache* layerCache = context->getLayerCache();
layerCache->processDeletedPictures();
const SkBigPicture::AccelData* topLevelData = nullptr;
if (const SkBigPicture* bp = topLevelPicture->asSkBigPicture()) {
topLevelData = bp->accelData();
}
if (!topLevelData) {
return;
}
const SkLayerInfo *topLevelGPUData = static_cast<const SkLayerInfo*>(topLevelData);
if (0 == topLevelGPUData->numBlocks()) {
return;
}
atlased->setReserve(atlased->count() + topLevelGPUData->numBlocks());
for (int i = 0; i < topLevelGPUData->numBlocks(); ++i) {
const SkLayerInfo::BlockInfo& info = topLevelGPUData->block(i);
// TODO: ignore perspective projected layers here?
bool disallowAtlasing = info.fHasNestedLayers || info.fIsNested ||
(info.fPaint && info.fPaint->getImageFilter());
if (disallowAtlasing) {
continue;
}
SkRect layerRect;
initialMat.mapRect(&layerRect, info.fBounds);
if (!layerRect.intersect(query)) {
continue;
}
const SkIRect dstIR = layerRect.roundOut();
SkIRect srcIR;
if (!compute_source_rect(info, initialMat, dstIR, &srcIR) ||
!GrLayerCache::PlausiblyAtlasable(srcIR.width(), srcIR.height())) {
continue;
}
prepare_for_hoisting(layerCache, topLevelPicture, initialMat,
info, srcIR, dstIR, atlased, recycled, true, 0);
}
}
void GrLayerHoister::FindLayersToHoist(GrContext* context,
const SkPicture* topLevelPicture,
const SkMatrix& initialMat,
const SkRect& query,
SkTDArray<GrHoistedLayer>* needRendering,
SkTDArray<GrHoistedLayer>* recycled,
int numSamples) {
GrLayerCache* layerCache = context->getLayerCache();
layerCache->processDeletedPictures();
const SkBigPicture::AccelData* topLevelData = nullptr;
if (const SkBigPicture* bp = topLevelPicture->asSkBigPicture()) {
topLevelData = bp->accelData();
}
if (!topLevelData) {
return;
}
const SkLayerInfo *topLevelGPUData = static_cast<const SkLayerInfo*>(topLevelData);
if (0 == topLevelGPUData->numBlocks()) {
return;
}
// Find and prepare for hoisting all the layers that intersect the query rect
for (int i = 0; i < topLevelGPUData->numBlocks(); ++i) {
const SkLayerInfo::BlockInfo& info = topLevelGPUData->block(i);
if (info.fIsNested) {
// Parent layers are currently hoisted while nested layers are not.
continue;
}
SkRect layerRect;
initialMat.mapRect(&layerRect, info.fBounds);
if (!layerRect.intersect(query)) {
continue;
}
const SkIRect dstIR = layerRect.roundOut();
SkIRect srcIR;
if (!compute_source_rect(info, initialMat, dstIR, &srcIR)) {
continue;
}
prepare_for_hoisting(layerCache, topLevelPicture, initialMat, info, srcIR, dstIR,
needRendering, recycled, false, numSamples);
}
}
void GrLayerHoister::DrawLayersToAtlas(GrContext* context,
const SkTDArray<GrHoistedLayer>& atlased) {
if (atlased.count() > 0) {
// All the atlased layers are rendered into the same GrTexture
SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
sk_sp<SkSurface> surface(SkSurface::MakeRenderTargetDirect(
atlased[0].fLayer->texture()->asRenderTarget(), &props));
SkCanvas* atlasCanvas = surface->getCanvas();
for (int i = 0; i < atlased.count(); ++i) {
const GrCachedLayer* layer = atlased[i].fLayer;
const SkBigPicture* pict = atlased[i].fPicture->asSkBigPicture();
if (!pict) {
// TODO: can we assume / assert this?
continue;
}
const SkIPoint offset = SkIPoint::Make(layer->srcIR().fLeft, layer->srcIR().fTop);
SkDEBUGCODE(const SkPaint* layerPaint = layer->paint();)
SkASSERT(!layerPaint || !layerPaint->getImageFilter());
SkASSERT(!layer->filter());
atlasCanvas->save();
// Add a rect clip to make sure the rendering doesn't
// extend beyond the boundaries of the atlased sub-rect
const SkRect bound = SkRect::Make(layer->rect());
atlasCanvas->clipRect(bound);
atlasCanvas->clear(0);
// '-offset' maps the layer's top/left to the origin.
// Since this layer is atlased, the top/left corner needs
// to be offset to the correct location in the backing texture.
SkMatrix initialCTM;
initialCTM.setTranslate(SkIntToScalar(-offset.fX), SkIntToScalar(-offset.fY));
initialCTM.preTranslate(bound.fLeft, bound.fTop);
initialCTM.preConcat(atlased[i].fPreMat);
atlasCanvas->setMatrix(initialCTM);
atlasCanvas->concat(atlased[i].fLocalMat);
pict->partialPlayback(atlasCanvas, layer->start() + 1, layer->stop(), initialCTM);
atlasCanvas->restore();
}
atlasCanvas->flush();
}
}
void GrLayerHoister::FilterLayer(GrContext* context,
const SkSurfaceProps* props,
const GrHoistedLayer& info) {
GrCachedLayer* layer = info.fLayer;
SkASSERT(layer->filter());
static const int kDefaultCacheSize = 32 * 1024 * 1024;
const SkIPoint filterOffset = SkIPoint::Make(layer->srcIR().fLeft, layer->srcIR().fTop);
SkMatrix totMat(info.fPreMat);
totMat.preConcat(info.fLocalMat);
totMat.postTranslate(-SkIntToScalar(filterOffset.fX), -SkIntToScalar(filterOffset.fY));
SkASSERT(0 == layer->rect().fLeft && 0 == layer->rect().fTop);
const SkIRect& clipBounds = layer->rect();
// This cache is transient, and is freed (along with all its contained
// textures) when it goes out of scope.
SkAutoTUnref<SkImageFilterCache> cache(SkImageFilterCache::Create(kDefaultCacheSize));
SkImageFilter::Context filterContext(totMat, clipBounds, cache);
// TODO: should the layer hoister store stand alone layers as SkSpecialImages internally?
SkASSERT(layer->rect().width() == layer->texture()->width() &&
layer->rect().height() == layer->texture()->height());
const SkIRect subset = SkIRect::MakeWH(layer->rect().width(), layer->rect().height());
sk_sp<SkSpecialImage> img(SkSpecialImage::MakeFromGpu(subset,
kNeedNewImageUniqueID_SpecialImage,
sk_ref_sp(layer->texture()),
props));
SkIPoint offset = SkIPoint::Make(0, 0);
sk_sp<SkSpecialImage> result(layer->filter()->filterImage(img.get(),
filterContext,
&offset));
if (!result) {
// Filtering failed. Press on with the unfiltered version.
return;
}
SkASSERT(result->isTextureBacked());
sk_sp<GrTexture> texture(result->asTextureRef(context));
layer->setTexture(texture.get(), result->subset(), false);
layer->setOffset(offset);
}
void GrLayerHoister::DrawLayers(GrContext* context, const SkTDArray<GrHoistedLayer>& layers) {
for (int i = 0; i < layers.count(); ++i) {
GrCachedLayer* layer = layers[i].fLayer;
const SkBigPicture* pict = layers[i].fPicture->asSkBigPicture();
if (!pict) {
// TODO: can we assume / assert this?
continue;
}
const SkIPoint offset = SkIPoint::Make(layer->srcIR().fLeft, layer->srcIR().fTop);
// Each non-atlased layer has its own GrTexture
SkSurfaceProps props(0, kUnknown_SkPixelGeometry);
auto surface(SkSurface::MakeRenderTargetDirect(
layer->texture()->asRenderTarget(), &props));
SkCanvas* layerCanvas = surface->getCanvas();
SkASSERT(0 == layer->rect().fLeft && 0 == layer->rect().fTop);
// Add a rect clip to make sure the rendering doesn't
// extend beyond the boundaries of the layer
const SkRect bound = SkRect::Make(layer->rect());
layerCanvas->clipRect(bound);
layerCanvas->clear(SK_ColorTRANSPARENT);
SkMatrix initialCTM;
initialCTM.setTranslate(SkIntToScalar(-offset.fX), SkIntToScalar(-offset.fY));
initialCTM.preConcat(layers[i].fPreMat);
layerCanvas->setMatrix(initialCTM);
layerCanvas->concat(layers[i].fLocalMat);
pict->partialPlayback(layerCanvas, layer->start()+1, layer->stop(), initialCTM);
layerCanvas->flush();
if (layer->filter()) {
FilterLayer(context, &surface->props(), layers[i]);
}
}
}
void GrLayerHoister::UnlockLayers(GrContext* context,
const SkTDArray<GrHoistedLayer>& layers) {
GrLayerCache* layerCache = context->getLayerCache();
for (int i = 0; i < layers.count(); ++i) {
layerCache->removeUse(layers[i].fLayer);
}
SkDEBUGCODE(layerCache->validate();)
}
void GrLayerHoister::Begin(GrContext* context) {
GrLayerCache* layerCache = context->getLayerCache();
layerCache->begin();
}
void GrLayerHoister::End(GrContext* context) {
GrLayerCache* layerCache = context->getLayerCache();
#if !GR_CACHE_HOISTED_LAYERS
// This code completely clears out the atlas. It is required when
// caching is disabled so the atlas doesn't fill up and force more
// free floating layers
layerCache->purgeAll();
#endif
layerCache->end();
}
#endif

View File

@ -1,130 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrLayerHoister_DEFINED
#define GrLayerHoister_DEFINED
#define SK_IGNORE_GPU_LAYER_HOISTING
#include "SkPicture.h"
#include "SkTDArray.h"
#if !defined(SK_IGNORE_GPU_LAYER_HOISTING) && SK_SUPPORT_GPU
struct GrCachedLayer;
class GrReplacements;
class SkGpuDevice;
struct SkRect;
class GrHoistedLayer {
public:
const SkPicture* fPicture; // the picture that actually contains the layer
// (not necessarily the top-most picture)
GrCachedLayer* fLayer;
SkMatrix fInitialMat;
SkMatrix fPreMat;
SkMatrix fLocalMat;
};
// This class collects the layer hoisting functionality in one place.
// For each picture rendering:
// FindLayersToHoist should be called once to collect the required layers
// DrawLayers should be called once to render them
// UnlockLayers should be called once to allow the texture resources to be recycled
class GrLayerHoister {
public:
/** Attempt to reattach layers that may have been atlased in the past
*/
static void Begin(GrContext* context);
/** Release cache resources
*/
static void End(GrContext* context);
/** Find the layers in 'topLevelPicture' that can be atlased. Note that the discovered
layers can be inside nested sub-pictures.
@param context Owner of the layer cache (the source of new layers)
@param topLevelPicture The top-level picture that is about to be rendered
@param initialMat The CTM of the canvas into which the layers will be drawn
@param query The rectangle that is about to be drawn.
@param atlasedNeedRendering Out parameter storing the layers that
should be hoisted to the atlas
@param recycled Out parameter storing layers that are atlased but do not need rendering
@param numSamples The number if MSAA samples required
*/
static void FindLayersToAtlas(GrContext* context,
const SkPicture* topLevelPicture,
const SkMatrix& initialMat,
const SkRect& query,
SkTDArray<GrHoistedLayer>* atlasedNeedRendering,
SkTDArray<GrHoistedLayer>* recycled,
int numSamples);
/** Find the layers in 'topLevelPicture' that need hoisting. Note that the discovered
layers can be inside nested sub-pictures.
@param context Owner of the layer cache (the source of new layers)
@param topLevelPicture The top-level picture that is about to be rendered
@param initialMat The CTM of the canvas into which the layers will be drawn
@param query The rectangle that is about to be drawn.
@param needRendering Out parameter storing the layers that need rendering.
This should never include atlased layers.
@param recycled Out parameter storing layers that need hoisting but not rendering
@param numSamples The number if MSAA samples required
*/
static void FindLayersToHoist(GrContext* context,
const SkPicture* topLevelPicture,
const SkMatrix& initialMat,
const SkRect& query,
SkTDArray<GrHoistedLayer>* needRendering,
SkTDArray<GrHoistedLayer>* recycled,
int numSamples);
/** Draw the specified layers into the atlas.
@param context Owner of the layer cache (and thus the layers)
@param layers The layers to be drawn into the atlas
*/
static void DrawLayersToAtlas(GrContext* context, const SkTDArray<GrHoistedLayer>& layers);
/** Draw the specified layers into their own individual textures.
@param context Owner of the layer cache (and thus the layers)
@param layers The layers to be drawn
*/
static void DrawLayers(GrContext* context, const SkTDArray<GrHoistedLayer>& layers);
/** Convert all the layers in 'layers' into replacement objects in 'replacements'.
@param layers The hoisted layers
@param replacements Replacement object that will be used for a replacement draw
*/
static void ConvertLayersToReplacements(const SkPicture* topLevelPicture,
const SkTDArray<GrHoistedLayer>& layers,
GrReplacements* replacements);
/** Unlock a group of layers in the layer cache.
@param context Owner of the layer cache (and thus the layers)
@param layers Unneeded layers in the atlas
*/
static void UnlockLayers(GrContext* context, const SkTDArray<GrHoistedLayer>& layers);
/** Forceably remove all cached layers and release the atlas. Useful for debugging and timing.
This is only functional when GR_CACHE_HOISTED_LAYERS is set to 1 in GrLayerCache.h
@param context Owner of the layer cache (and thus the layers)
*/
static void PurgeCache(GrContext* context);
private:
/** Update the GrTexture in 'layer' with its filtered version
@param context Owner of the layer cache (and thus the layers)
@param props Surface properties
@param info Layer info for a layer needing filtering prior to being composited
*/
static void FilterLayer(GrContext* context, const SkSurfaceProps*, const GrHoistedLayer& info);
};
#endif
#endif

View File

@ -1,226 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrContext.h"
#include "GrLayerCache.h"
#include "GrRecordReplaceDraw.h"
#include "SkBigPicture.h"
#include "SkCanvasPriv.h"
#include "SkGr.h"
#include "SkImage.h"
#include "SkRecordDraw.h"
#include "SkRecords.h"
static inline void draw_replacement_bitmap(GrCachedLayer* layer, SkCanvas* canvas) {
// Some image filter can totally filter away a layer (e.g., SkPictureImageFilter's with
// no picture).
if (!layer->texture()) {
return;
}
SkBitmap bm;
GrWrapTextureInBitmap(layer->texture(),
!layer->isAtlased() ? layer->rect().width() : layer->texture()->width(),
!layer->isAtlased() ? layer->rect().height() : layer->texture()->height(),
false,
&bm);
canvas->save();
canvas->setMatrix(SkMatrix::I());
if (layer->isAtlased()) {
const SkRect src = SkRect::Make(layer->rect());
const SkRect dst = SkRect::Make(layer->srcIR());
SkASSERT(layer->offset().isZero());
canvas->drawBitmapRect(bm, src, dst, layer->paint(), SkCanvas::kStrict_SrcRectConstraint);
} else {
canvas->drawBitmap(bm,
SkIntToScalar(layer->srcIR().fLeft + layer->offset().fX),
SkIntToScalar(layer->srcIR().fTop + layer->offset().fY),
layer->paint());
}
canvas->restore();
}
// Used by GrRecordReplaceDraw. It intercepts nested drawPicture calls and
// also draws them with replaced layers.
class ReplaceDraw : public SkRecords::Draw {
public:
ReplaceDraw(SkCanvas* canvas, GrLayerCache* layerCache,
SkPicture const* const drawablePicts[], int drawableCount,
const SkPicture* topLevelPicture,
const SkBigPicture* picture,
const SkMatrix& initialMatrix,
SkPicture::AbortCallback* callback,
const int* opIndices, int numIndices)
: INHERITED(canvas, drawablePicts, nullptr, drawableCount)
, fCanvas(canvas)
, fLayerCache(layerCache)
, fTopLevelPicture(topLevelPicture)
, fPicture(picture)
, fInitialMatrix(initialMatrix)
, fCallback(callback)
, fIndex(0)
, fNumReplaced(0) {
fOpIndexStack.append(numIndices, opIndices);
}
int draw() {
const SkBBoxHierarchy* bbh = fPicture->bbh();
const SkRecord* record = fPicture->record();
if (nullptr == record) {
return 0;
}
fNumReplaced = 0;
fOps.rewind();
if (bbh) {
// Draw only ops that affect pixels in the canvas's current clip.
// The SkRecord and BBH were recorded in identity space. This canvas
// is not necessarily in that same space. getClipBounds() returns us
// this canvas' clip bounds transformed back into identity space, which
// lets us query the BBH.
SkRect query = { 0, 0, 0, 0 };
(void)fCanvas->getClipBounds(&query);
bbh->search(query, &fOps);
for (fIndex = 0; fIndex < fOps.count(); ++fIndex) {
if (fCallback && fCallback->abort()) {
return fNumReplaced;
}
record->visit(fOps[fIndex], *this);
}
} else {
for (fIndex = 0; fIndex < (int) record->count(); ++fIndex) {
if (fCallback && fCallback->abort()) {
return fNumReplaced;
}
record->visit(fIndex, *this);
}
}
return fNumReplaced;
}
// Same as Draw for all ops except DrawPicture and SaveLayer.
template <typename T> void operator()(const T& r) {
this->INHERITED::operator()(r);
}
void operator()(const SkRecords::DrawPicture& dp) {
int drawPictureOffset;
if (fOps.count()) {
drawPictureOffset = fOps[fIndex];
} else {
drawPictureOffset = fIndex;
}
fOpIndexStack.push(drawPictureOffset);
SkAutoCanvasMatrixPaint acmp(fCanvas, &dp.matrix, dp.paint, dp.picture->cullRect());
if (const SkBigPicture* bp = dp.picture->asSkBigPicture()) {
// Draw sub-pictures with the same replacement list but a different picture
ReplaceDraw draw(fCanvas, fLayerCache,
this->drawablePicts(), this->drawableCount(),
fTopLevelPicture, bp, fInitialMatrix, fCallback,
fOpIndexStack.begin(), fOpIndexStack.count());
fNumReplaced += draw.draw();
} else {
// TODO: can we assume / assert this doesn't happen?
dp.picture->playback(fCanvas, fCallback);
}
fOpIndexStack.pop();
}
void operator()(const SkRecords::SaveLayer& sl) {
// For a saveLayer command, check if it can be replaced by a drawBitmap
// call and, if so, draw it and then update the current op index accordingly.
int startOffset;
if (fOps.count()) {
startOffset = fOps[fIndex];
} else {
startOffset = fIndex;
}
fOpIndexStack.push(startOffset);
GrCachedLayer* layer = fLayerCache->findLayer(fTopLevelPicture->uniqueID(),
fInitialMatrix,
fOpIndexStack.begin(),
fOpIndexStack.count());
if (layer) {
fNumReplaced++;
draw_replacement_bitmap(layer, fCanvas);
if (fPicture->bbh()) {
while (fOps[fIndex] < layer->stop()) {
++fIndex;
}
SkASSERT(fOps[fIndex] == layer->stop());
} else {
fIndex = layer->stop();
}
fOpIndexStack.pop();
return;
}
// This is a fail for layer hoisting
this->INHERITED::operator()(sl);
fOpIndexStack.pop();
}
private:
SkCanvas* fCanvas;
GrLayerCache* fLayerCache;
const SkPicture* fTopLevelPicture;
const SkBigPicture* fPicture;
const SkMatrix fInitialMatrix;
SkPicture::AbortCallback* fCallback;
SkTDArray<int> fOps;
int fIndex;
int fNumReplaced;
// The op code indices of all the enclosing drawPicture and saveLayer calls
SkTDArray<int> fOpIndexStack;
typedef Draw INHERITED;
};
int GrRecordReplaceDraw(const SkPicture* picture,
SkCanvas* canvas,
GrLayerCache* layerCache,
const SkMatrix& initialMatrix,
SkPicture::AbortCallback* callback) {
SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
if (const SkBigPicture* bp = picture->asSkBigPicture()) {
// TODO: drawablePicts?
ReplaceDraw draw(canvas, layerCache, nullptr, 0,
bp, bp,
initialMatrix, callback, nullptr, 0);
return draw.draw();
} else {
// TODO: can we assume / assert this doesn't happen?
picture->playback(canvas, callback);
return 0;
}
}

View File

@ -1,26 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrRecordReplaceDraw_DEFINED
#define GrRecordReplaceDraw_DEFINED
#include "SkPicture.h"
class GrLayerCache;
class SkCanvas;
class SkMatrix;
// Draw an SkPicture into an SkCanvas replacing saveLayer/restore blocks with
// drawBitmap calls. A convenience wrapper around SkRecords::Draw.
// It returns the number of saveLayer/restore blocks replaced with drawBitmap calls.
int GrRecordReplaceDraw(const SkPicture*,
SkCanvas*,
GrLayerCache* layerCache,
const SkMatrix& initialMatrix,
SkPicture::AbortCallback*);
#endif // GrRecordReplaceDraw_DEFINED

View File

@ -13,8 +13,6 @@
#include "GrGpu.h"
#include "GrGpuResourcePriv.h"
#include "GrImageIDTextureAdjuster.h"
#include "GrLayerHoister.h"
#include "GrRecordReplaceDraw.h"
#include "GrStyle.h"
#include "GrTracing.h"
#include "SkCanvasPriv.h"
@ -27,7 +25,6 @@
#include "SkImageCacherator.h"
#include "SkImageFilter.h"
#include "SkImageFilterCache.h"
#include "SkLayerInfo.h"
#include "SkMaskFilter.h"
#include "SkNinePatchIter.h"
#include "SkPathEffect.h"
@ -1837,79 +1834,6 @@ sk_sp<SkSurface> SkGpuDevice::makeSurface(const SkImageInfo& info, const SkSurfa
&props);
}
bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* mainCanvas, const SkPicture* mainPicture,
const SkMatrix* matrix, const SkPaint* paint) {
ASSERT_SINGLE_OWNER
#ifndef SK_IGNORE_GPU_LAYER_HOISTING
// todo: should handle this natively
if (paint ||
(kRGBA_8888_SkColorType != mainCanvas->imageInfo().colorType() &&
kBGRA_8888_SkColorType != mainCanvas->imageInfo().colorType())) {
return false;
}
const SkBigPicture::AccelData* data = nullptr;
if (const SkBigPicture* bp = mainPicture->asSkBigPicture()) {
data = bp->accelData();
}
if (!data) {
return false;
}
const SkLayerInfo *gpuData = static_cast<const SkLayerInfo*>(data);
if (0 == gpuData->numBlocks()) {
return false;
}
SkTDArray<GrHoistedLayer> atlasedNeedRendering, atlasedRecycled;
SkIRect iBounds;
if (!mainCanvas->getClipDeviceBounds(&iBounds)) {
return false;
}
SkRect clipBounds = SkRect::Make(iBounds);
SkMatrix initialMatrix = mainCanvas->getTotalMatrix();
GrLayerHoister::Begin(fContext);
GrLayerHoister::FindLayersToAtlas(fContext, mainPicture,
initialMatrix,
clipBounds,
&atlasedNeedRendering, &atlasedRecycled,
fDrawContext->numColorSamples());
GrLayerHoister::DrawLayersToAtlas(fContext, atlasedNeedRendering);
SkTDArray<GrHoistedLayer> needRendering, recycled;
SkAutoCanvasMatrixPaint acmp(mainCanvas, matrix, paint, mainPicture->cullRect());
GrLayerHoister::FindLayersToHoist(fContext, mainPicture,
initialMatrix,
clipBounds,
&needRendering, &recycled,
fDrawContext->numColorSamples());
GrLayerHoister::DrawLayers(fContext, needRendering);
// Render the entire picture using new layers
GrRecordReplaceDraw(mainPicture, mainCanvas, fContext->getLayerCache(),
initialMatrix, nullptr);
GrLayerHoister::UnlockLayers(fContext, needRendering);
GrLayerHoister::UnlockLayers(fContext, recycled);
GrLayerHoister::UnlockLayers(fContext, atlasedNeedRendering);
GrLayerHoister::UnlockLayers(fContext, atlasedRecycled);
GrLayerHoister::End(fContext);
return true;
#else
return false;
#endif
}
SkImageFilterCache* SkGpuDevice::getImageFilterCache() {
ASSERT_SINGLE_OWNER
// We always return a transient cache, so it is freed after each

View File

@ -145,10 +145,6 @@ protected:
bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) override;
bool onShouldDisableLCD(const SkPaint&) const final;
/** PRIVATE / EXPERIMENTAL -- do not call */
virtual bool EXPERIMENTAL_drawPicture(SkCanvas* canvas, const SkPicture* picture,
const SkMatrix*, const SkPaint*) override;
private:
// We want these unreffed in DrawContext, RenderTarget, GrContext order.
SkAutoTUnref<GrContext> fContext;

View File

@ -5,6 +5,7 @@
* found in the LICENSE file.
*/
#include "SkBigPicture.h"
#include "SkBBoxHierarchy.h"
#include "SkBlurImageFilter.h"
#include "SkCanvas.h"
@ -16,7 +17,6 @@
#include "SkError.h"
#include "SkImageEncoder.h"
#include "SkImageGenerator.h"
#include "SkLayerInfo.h"
#include "SkMD5.h"
#include "SkPaint.h"
#include "SkPicture.h"
@ -319,209 +319,6 @@ static void test_gpu_veto(skiatest::Reporter* reporter) {
#endif // SK_SUPPORT_GPU
static void test_savelayer_extraction(skiatest::Reporter* reporter) {
static const int kWidth = 100;
static const int kHeight = 100;
// Create complex paint that the bounding box computation code can't
// optimize away
SkScalar blueToRedMatrix[20] = { 0 };
blueToRedMatrix[2] = blueToRedMatrix[18] = SK_Scalar1;
sk_sp<SkColorFilter> blueToRed(SkColorFilter::MakeMatrixFilterRowMajor255(blueToRedMatrix));
sk_sp<SkImageFilter> filter(SkColorFilterImageFilter::Make(std::move(blueToRed), nullptr));
SkPaint complexPaint;
complexPaint.setImageFilter(std::move(filter));
sk_sp<SkPicture> pict, child;
SkRTreeFactory bbhFactory;
{
SkPictureRecorder recorder;
SkCanvas* c = recorder.beginRecording(SkIntToScalar(kWidth), SkIntToScalar(kHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
c->saveLayer(nullptr, &complexPaint);
c->restore();
child = recorder.finishRecordingAsPicture();
}
// create a picture with the structure:
// 1)
// SaveLayer
// Restore
// 2)
// SaveLayer
// Translate
// SaveLayer w/ bound
// Restore
// Restore
// 3)
// SaveLayer w/ copyable paint
// Restore
// 4)
// SaveLayer
// DrawPicture (which has a SaveLayer/Restore pair)
// Restore
// 5)
// SaveLayer
// DrawPicture with Matrix & Paint (with SaveLayer/Restore pair)
// Restore
{
SkPictureRecorder recorder;
SkCanvas* c = recorder.beginRecording(SkIntToScalar(kWidth),
SkIntToScalar(kHeight),
&bbhFactory,
SkPictureRecorder::kComputeSaveLayerInfo_RecordFlag);
// 1)
c->saveLayer(nullptr, &complexPaint); // layer #0
c->restore();
// 2)
c->saveLayer(nullptr, nullptr); // layer #1
c->translate(kWidth / 2.0f, kHeight / 2.0f);
SkRect r = SkRect::MakeXYWH(0, 0, kWidth/2, kHeight/2);
c->saveLayer(&r, &complexPaint); // layer #2
c->restore();
c->restore();
// 3)
{
c->saveLayer(nullptr, &complexPaint); // layer #3
c->restore();
}
SkPaint layerPaint;
layerPaint.setColor(SK_ColorRED); // Non-alpha only to avoid SaveLayerDrawRestoreNooper
// 4)
{
c->saveLayer(nullptr, &layerPaint); // layer #4
c->drawPicture(child); // layer #5 inside picture
c->restore();
}
// 5
{
SkPaint picturePaint;
SkMatrix trans;
trans.setTranslate(10, 10);
c->saveLayer(nullptr, &layerPaint); // layer #6
c->drawPicture(child, &trans, &picturePaint); // layer #7 inside picture
c->restore();
}
pict = recorder.finishRecordingAsPicture();
}
// Now test out the SaveLayer extraction
if (!SkCanvas::Internal_Private_GetIgnoreSaveLayerBounds()) {
const SkBigPicture* bp = pict->asSkBigPicture();
REPORTER_ASSERT(reporter, bp);
const SkBigPicture::AccelData* data = bp->accelData();
REPORTER_ASSERT(reporter, data);
const SkLayerInfo *gpuData = static_cast<const SkLayerInfo*>(data);
REPORTER_ASSERT(reporter, 8 == gpuData->numBlocks());
const SkLayerInfo::BlockInfo& info0 = gpuData->block(0);
// The parent/child layers appear in reverse order
const SkLayerInfo::BlockInfo& info1 = gpuData->block(2);
const SkLayerInfo::BlockInfo& info2 = gpuData->block(1);
const SkLayerInfo::BlockInfo& info3 = gpuData->block(3);
// The parent/child layers appear in reverse order
const SkLayerInfo::BlockInfo& info4 = gpuData->block(5);
const SkLayerInfo::BlockInfo& info5 = gpuData->block(4);
// The parent/child layers appear in reverse order
const SkLayerInfo::BlockInfo& info6 = gpuData->block(7);
const SkLayerInfo::BlockInfo& info7 = gpuData->block(6);
REPORTER_ASSERT(reporter, nullptr == info0.fPicture);
REPORTER_ASSERT(reporter, kWidth == info0.fBounds.width() &&
kHeight == info0.fBounds.height());
REPORTER_ASSERT(reporter, info0.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info0.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, 0 == info0.fBounds.fLeft && 0 == info0.fBounds.fTop);
REPORTER_ASSERT(reporter, nullptr != info0.fPaint);
REPORTER_ASSERT(reporter, !info0.fIsNested && !info0.fHasNestedLayers);
REPORTER_ASSERT(reporter, nullptr == info1.fPicture);
REPORTER_ASSERT(reporter, kWidth/2.0 == info1.fBounds.width() &&
kHeight/2.0 == info1.fBounds.height());
REPORTER_ASSERT(reporter, info1.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info1.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, kWidth/2.0 == info1.fBounds.fLeft &&
kHeight/2.0 == info1.fBounds.fTop);
REPORTER_ASSERT(reporter, nullptr == info1.fPaint);
REPORTER_ASSERT(reporter, !info1.fIsNested &&
info1.fHasNestedLayers); // has a nested SL
REPORTER_ASSERT(reporter, nullptr == info2.fPicture);
REPORTER_ASSERT(reporter, kWidth / 2 == info2.fBounds.width() &&
kHeight / 2 == info2.fBounds.height()); // bound reduces size
REPORTER_ASSERT(reporter, !info2.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info2.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, kWidth / 2 == info2.fBounds.fLeft && // translated
kHeight / 2 == info2.fBounds.fTop);
REPORTER_ASSERT(reporter, nullptr != info2.fPaint);
REPORTER_ASSERT(reporter, info2.fIsNested && !info2.fHasNestedLayers); // is nested
REPORTER_ASSERT(reporter, nullptr == info3.fPicture);
REPORTER_ASSERT(reporter, kWidth == info3.fBounds.width() &&
kHeight == info3.fBounds.height());
REPORTER_ASSERT(reporter, info3.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info3.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, 0 == info3.fBounds.fLeft && 0 == info3.fBounds.fTop);
REPORTER_ASSERT(reporter, info3.fPaint);
REPORTER_ASSERT(reporter, !info3.fIsNested && !info3.fHasNestedLayers);
REPORTER_ASSERT(reporter, nullptr == info4.fPicture);
REPORTER_ASSERT(reporter, kWidth == info4.fBounds.width() &&
kHeight == info4.fBounds.height());
REPORTER_ASSERT(reporter, 0 == info4.fBounds.fLeft && 0 == info4.fBounds.fTop);
REPORTER_ASSERT(reporter, info4.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info4.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, info4.fPaint);
REPORTER_ASSERT(reporter, !info4.fIsNested &&
info4.fHasNestedLayers); // has a nested SL
REPORTER_ASSERT(reporter, child.get() == info5.fPicture); // in a child picture
REPORTER_ASSERT(reporter, kWidth == info5.fBounds.width() &&
kHeight == info5.fBounds.height());
REPORTER_ASSERT(reporter, 0 == info5.fBounds.fLeft && 0 == info5.fBounds.fTop);
REPORTER_ASSERT(reporter, info5.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info5.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, nullptr != info5.fPaint);
REPORTER_ASSERT(reporter, info5.fIsNested && !info5.fHasNestedLayers); // is nested
REPORTER_ASSERT(reporter, nullptr == info6.fPicture);
REPORTER_ASSERT(reporter, kWidth-10 == info6.fBounds.width() &&
kHeight-10 == info6.fBounds.height());
REPORTER_ASSERT(reporter, 10 == info6.fBounds.fLeft && 10 == info6.fBounds.fTop);
REPORTER_ASSERT(reporter, info6.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info6.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, info6.fPaint);
REPORTER_ASSERT(reporter, !info6.fIsNested &&
info6.fHasNestedLayers); // has a nested SL
REPORTER_ASSERT(reporter, child.get() == info7.fPicture); // in a child picture
REPORTER_ASSERT(reporter, kWidth == info7.fBounds.width() &&
kHeight == info7.fBounds.height());
REPORTER_ASSERT(reporter, 0 == info7.fBounds.fLeft && 0 == info7.fBounds.fTop);
REPORTER_ASSERT(reporter, info7.fLocalMat.isIdentity());
REPORTER_ASSERT(reporter, info7.fPreMat.isIdentity());
REPORTER_ASSERT(reporter, nullptr != info7.fPaint);
REPORTER_ASSERT(reporter, info7.fIsNested && !info7.fHasNestedLayers); // is nested
}
}
static void set_canvas_to_save_count_4(SkCanvas* canvas) {
canvas->restoreToCount(1);
canvas->save();
@ -1190,7 +987,6 @@ DEF_TEST(Picture, reporter) {
test_clip_expansion(reporter);
test_hierarchical(reporter);
test_gen_id(reporter);
test_savelayer_extraction(reporter);
test_cull_rect_reset(reporter);
}

View File

@ -1,159 +0,0 @@
/*
* Copyright 2014 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "Test.h"
#if SK_SUPPORT_GPU
#include "GrContext.h"
#include "GrLayerCache.h"
#include "GrRecordReplaceDraw.h"
#include "RecordTestUtils.h"
#include "SkBBHFactory.h"
#include "SkPictureRecorder.h"
#include "SkRecordDraw.h"
#include "SkRecorder.h"
#include "SkUtils.h"
static const int kWidth = 100;
static const int kHeight = 100;
class JustOneDraw : public SkPicture::AbortCallback {
public:
JustOneDraw() : fCalls(0) {}
bool abort() override { return fCalls++ > 0; }
private:
int fCalls;
};
// Make sure the abort callback works
DEF_TEST(RecordReplaceDraw_Abort, r) {
sk_sp<SkPicture> pic;
{
// Record two commands.
SkPictureRecorder recorder;
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kWidth), SkIntToScalar(kHeight));
canvas->drawRect(SkRect::MakeWH(SkIntToScalar(kWidth), SkIntToScalar(kHeight)), SkPaint());
canvas->clipRect(SkRect::MakeWH(SkIntToScalar(kWidth), SkIntToScalar(kHeight)));
pic = recorder.finishRecordingAsPicture();
}
SkRecord rerecord;
SkRecorder canvas(&rerecord, kWidth, kHeight);
JustOneDraw callback;
GrRecordReplaceDraw(pic.get(), &canvas, nullptr, SkMatrix::I(), &callback);
switch (rerecord.count()) {
case 3:
assert_type<SkRecords::Save>(r, rerecord, 0);
assert_type<SkRecords::DrawRect>(r, rerecord, 1);
assert_type<SkRecords::Restore>(r, rerecord, 2);
break;
case 1:
assert_type<SkRecords::DrawRect>(r, rerecord, 0);
break;
default:
REPORTER_ASSERT(r, false);
}
}
// Make sure GrRecordReplaceDraw balances unbalanced saves
DEF_TEST(RecordReplaceDraw_Unbalanced, r) {
sk_sp<SkPicture> pic;
{
SkPictureRecorder recorder;
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kWidth), SkIntToScalar(kHeight));
// We won't balance this, but GrRecordReplaceDraw will for us.
canvas->save();
canvas->scale(2, 2);
pic = recorder.finishRecordingAsPicture();
// we may have optimized everything away. If so, just return
if (pic->approximateOpCount() == 0) {
return;
}
}
SkRecord rerecord;
SkRecorder canvas(&rerecord, kWidth, kHeight);
GrRecordReplaceDraw(pic.get(), &canvas, nullptr, SkMatrix::I(), nullptr/*callback*/);
// ensure rerecord is balanced (in this case by checking that the count is odd)
REPORTER_ASSERT(r, (rerecord.count() & 1) == 1);
}
// Test out the layer replacement functionality with and w/o a BBH
void test_replacements(skiatest::Reporter* r, GrContext* context, bool doReplace) {
sk_sp<SkPicture> pic;
{
SkPictureRecorder recorder;
SkCanvas* canvas = recorder.beginRecording(SkIntToScalar(kWidth), SkIntToScalar(kHeight));
SkPaint paint;
canvas->saveLayer(nullptr, &paint);
canvas->clear(SK_ColorRED);
canvas->restore();
canvas->drawRect(SkRect::MakeWH(SkIntToScalar(kWidth / 2), SkIntToScalar(kHeight / 2)),
SkPaint());
pic = recorder.finishRecordingAsPicture();
}
SkAutoTUnref<GrTexture> texture;
SkPaint paint;
GrLayerCache* layerCache = context->getLayerCache();
if (doReplace) {
int key[1] = { 0 };
GrCachedLayer* layer = layerCache->findLayerOrCreate(pic->uniqueID(), 0, 2,
SkIRect::MakeWH(kWidth, kHeight),
SkIRect::MakeWH(kWidth, kHeight),
SkMatrix::I(), key, 1, &paint);
GrSurfaceDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = kWidth;
desc.fHeight = kHeight;
desc.fSampleCnt = 0;
// Giving the texture some initial data so the Gpu (specifically vulkan) does not complain
// when reading from an uninitialized texture.
SkAutoTMalloc<uint32_t> srcBuffer(kWidth*kHeight);
memset(srcBuffer.get(), 0, kWidth*kHeight*sizeof(uint32_t));
texture.reset(context->textureProvider()->createTexture(
desc, SkBudgeted::kNo, srcBuffer.get(), 0));
layer->setTexture(texture, SkIRect::MakeWH(kWidth, kHeight), false);
}
SkRecord rerecord;
SkRecorder canvas(&rerecord, kWidth, kHeight);
GrRecordReplaceDraw(pic.get(), &canvas, layerCache, SkMatrix::I(), nullptr/*callback*/);
int numLayers = count_instances_of_type<SkRecords::SaveLayer>(rerecord);
if (doReplace) {
REPORTER_ASSERT(r, 0 == numLayers);
} else {
REPORTER_ASSERT(r, 1 == numLayers);
}
}
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(RecordReplaceDraw, r, ctxInfo) {
test_replacements(r, ctxInfo.grContext(), true);
test_replacements(r, ctxInfo.grContext(), false);
}
#endif