Reland "ccpr: Support caching of paths that span multiple tiles"

This is a reland of 6a3dc8be46

Original change's description:
> ccpr: Support caching of paths that span multiple tiles
> 
> Adds an accumulative "hit rect" for each cache entry that tracks the
> region of the path that has been drawn during its lifetime. Now, a
> path mask can be cached once the "hit rect" covers 50% of the path.
> This allows us to cache a path that spans multiple tiles.
> 
> To guard against unnecessarily caching gigantic path masks, we also
> require that 10% of the path be visible during the draw when it is
> cached.
> 
> Bug: skia:8462
> Change-Id: Iab2c277102b7a774eaa909c9663211694554c5a5
> Reviewed-on: https://skia-review.googlesource.com/c/180700
> Commit-Queue: Chris Dalton <csmartdalton@google.com>
> Reviewed-by: Robert Phillips <robertphillips@google.com>

Bug: skia:8462
Change-Id: Ia2b10430acd2dffac78b5abd432763ead79bc902
Reviewed-on: https://skia-review.googlesource.com/c/181983
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
This commit is contained in:
Chris Dalton 2019-01-07 17:45:36 -07:00 committed by Skia Commit-Bot
parent 555c972f5b
commit aaa77c169f
5 changed files with 144 additions and 84 deletions

View File

@ -25,10 +25,6 @@ static bool has_coord_transforms(const GrPaint& paint) {
return false;
}
static int64_t area(const SkIRect& r) {
return sk_64_mul(r.height(), r.width());
}
std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(
GrContext* context, const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape,
GrPaint&& paint) {
@ -91,36 +87,22 @@ std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::InternalMake(
conservativeDevBounds.roundOut(&shapeConservativeIBounds);
SkIRect maskDevIBounds;
Visibility maskVisibility;
if (clipIBounds.contains(shapeConservativeIBounds)) {
maskDevIBounds = shapeConservativeIBounds;
maskVisibility = Visibility::kComplete;
} else {
if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
return nullptr;
}
int64_t unclippedArea = area(shapeConservativeIBounds);
int64_t clippedArea = area(maskDevIBounds);
maskVisibility = (clippedArea >= unclippedArea/2 || unclippedArea < 100*100)
? Visibility::kMostlyComplete // i.e., visible enough to justify rendering the
// whole thing if we think we can cache it.
: Visibility::kPartial;
if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
return nullptr;
}
GrOpMemoryPool* pool = context->contextPriv().opMemoryPool();
return pool->allocate<GrCCDrawPathsOp>(m, shape, strokeDevWidth, shapeConservativeIBounds,
maskDevIBounds, maskVisibility, conservativeDevBounds,
std::move(paint));
maskDevIBounds, conservativeDevBounds, std::move(paint));
}
GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds,
const SkIRect& maskDevIBounds, Visibility maskVisibility,
const SkRect& conservativeDevBounds, GrPaint&& paint)
const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
GrPaint&& paint)
: GrDrawOp(ClassID())
, fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
, fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds, maskVisibility,
, fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
paint.getColor4f())
, fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
SkDEBUGCODE(fBaseInstance = -1);
@ -139,14 +121,12 @@ GrCCDrawPathsOp::~GrCCDrawPathsOp() {
GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrShape& shape,
float strokeDevWidth,
const SkIRect& shapeConservativeIBounds,
const SkIRect& maskDevIBounds, Visibility maskVisibility,
const SkPMColor4f& color)
const SkIRect& maskDevIBounds, const SkPMColor4f& color)
: fMatrix(m)
, fShape(shape)
, fStrokeDevWidth(strokeDevWidth)
, fShapeConservativeIBounds(shapeConservativeIBounds)
, fMaskDevIBounds(maskDevIBounds)
, fMaskVisibility(maskVisibility)
, fColor(color) {
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
if (fShape.hasUnstyledKey()) {
@ -239,8 +219,6 @@ void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
GrCCPerFlushResourceSpecs* specs) {
using CreateIfAbsent = GrCCPathCache::CreateIfAbsent;
using MaskTransform = GrCCPathCache::MaskTransform;
using CoverageType = GrCCAtlas::CoverageType;
SkPath path;
@ -249,9 +227,8 @@ void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
SkASSERT(!fCacheEntry);
if (pathCache) {
MaskTransform m(fMatrix, &fCachedMaskShift);
bool canStashPathMask = fMaskVisibility >= Visibility::kMostlyComplete;
fCacheEntry = pathCache->find(onFlushRP, fShape, m, CreateIfAbsent(canStashPathMask));
fCacheEntry =
pathCache->find(onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
}
if (fCacheEntry) {
@ -273,25 +250,55 @@ void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
return;
}
if (Visibility::kMostlyComplete == fMaskVisibility && fCacheEntry->hitCount() > 1) {
int shapeSize = SkTMax(fShapeConservativeIBounds.height(),
fShapeConservativeIBounds.width());
if (shapeSize <= onFlushRP->caps()->maxRenderTargetSize()) {
// We've seen this path before with a compatible matrix, and it's mostly
// visible. Just render the whole mask so we can try to cache it.
fMaskDevIBounds = fShapeConservativeIBounds;
fMaskVisibility = Visibility::kComplete;
}
if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
fDoCachePathMask = true;
// We don't cache partial masks; ensure the bounds include the entire path.
fMaskDevIBounds = fShapeConservativeIBounds;
}
}
// Plan on rendering this path in a new atlas.
int idx = (fShape.style().strokeRec().isFillStyle())
? GrCCPerFlushResourceSpecs::kFillIdx
: GrCCPerFlushResourceSpecs::kStrokeIdx;
++specs->fNumRenderedPaths[idx];
specs->fRenderedPathStats[idx].statPath(path);
specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(),
fMaskDevIBounds.height());
specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
}
bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
SkASSERT(fCacheEntry);
SkASSERT(!fCacheEntry->cachedAtlas());
if (fCacheEntry->hitCount() <= 1) {
return false; // Don't cache a path mask until at least its second hit.
}
int shapeMaxDimension = SkTMax(fShapeConservativeIBounds.height(),
fShapeConservativeIBounds.width());
if (shapeMaxDimension > maxRenderTargetSize) {
return false; // This path isn't cachable.
}
int64_t shapeArea = sk_64_mul(fShapeConservativeIBounds.height(),
fShapeConservativeIBounds.width());
if (shapeArea < 100*100) {
// If a path is small enough, we might as well try to render and cache the entire thing, no
// matter how much of it is actually visible.
return true;
}
// The hitRect should already be contained within the shape's bounds, but we still intersect it
// because it's possible for edges very near pixel boundaries (e.g., 0.999999), to round out
// inconsistently, depending on the integer translation values and fp32 precision.
SkIRect hitRect = fCacheEntry->hitRect().makeOffset(fCachedMaskShift.x(), fCachedMaskShift.y());
hitRect.intersect(fShapeConservativeIBounds);
// Render and cache the entire path mask if we see enough of it to justify rendering all the
// pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
// past, and in this particular draw we must see at least 10% of it.
int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
}
void GrCCDrawPathsOp::setupResources(
@ -335,6 +342,12 @@ void GrCCDrawPathsOp::SingleDraw::setupResources(
== fCacheEntry->cachedAtlas()->coverageType());
SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
}
#if 0
// Simple color manipulation to visualize cached paths.
fColor = (GrCCAtlas::CoverageType::kA8_LiteralCoverage
== fCacheEntry->cachedAtlas()->coverageType())
? SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
#endif
op->recordInstance(fCacheEntry->cachedAtlas()->getOnFlushProxy(),
resources->nextPathInstanceIdx());
// TODO4F: Preserve float colors
@ -359,19 +372,10 @@ void GrCCDrawPathsOp::SingleDraw::setupResources(
resources->appendDrawPathInstance().set(devBounds, devBounds45, devToAtlasOffset,
fColor.toBytes_RGBA(), doEvenOddFill);
// If we have a spot in the path cache, try to make a note of where this mask is so we
// can reuse it in the future.
if (fCacheEntry) {
if (fDoCachePathMask) {
SkASSERT(fCacheEntry);
SkASSERT(!fCacheEntry->cachedAtlas());
if (Visibility::kComplete != fMaskVisibility || fCacheEntry->hitCount() <= 1) {
// Don't cache a path mask unless it's completely visible with a hit count > 1.
//
// NOTE: mostly-visible paths with a hit count > 1 should have been promoted to
// fully visible during accountForOwnPaths().
return;
}
SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
fCacheEntry->setCoverageCountAtlas(onFlushRP, atlas, devToAtlasOffset, devBounds,
devBounds45, devIBounds, fCachedMaskShift);
}

View File

@ -73,15 +73,10 @@ private:
float strokeDevWidth,
const SkRect& conservativeDevBounds,
GrPaint&&);
enum class Visibility {
kPartial,
kMostlyComplete, // (i.e., can we cache the whole path mask if we think it will be reused?)
kComplete
};
GrCCDrawPathsOp(const SkMatrix&, const GrShape&, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
Visibility maskVisibility, const SkRect& conservativeDevBounds, GrPaint&&);
const SkRect& conservativeDevBounds, GrPaint&&);
void recordInstance(GrTextureProxy* atlasProxy, int instanceIdx);
@ -91,7 +86,7 @@ private:
public:
SingleDraw(const SkMatrix&, const GrShape&, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
Visibility maskVisibility, const SkPMColor4f&);
const SkPMColor4f&);
// See the corresponding methods in GrCCDrawPathsOp.
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*, GrProcessorSet*);
@ -101,17 +96,19 @@ private:
DoCopiesToA8Coverage, GrCCDrawPathsOp*);
private:
bool shouldCachePathMask(int maxRenderTargetSize) const;
SkMatrix fMatrix;
GrShape fShape;
float fStrokeDevWidth;
const SkIRect fShapeConservativeIBounds;
SkIRect fMaskDevIBounds;
Visibility fMaskVisibility;
SkPMColor4f fColor;
GrCCPathCache::OnFlushEntryRef fCacheEntry;
SkIVector fCachedMaskShift;
bool fDoCopyToA8Coverage = false;
bool fDoCachePathMask = false;
SingleDraw* fNext = nullptr;

View File

@ -158,9 +158,9 @@ private:
}
GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* onFlushRP,
const GrShape& shape, const MaskTransform& m,
CreateIfAbsent createIfAbsent) {
GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
if (!shape.hasUnstyledKey()) {
return OnFlushEntryRef();
}
@ -174,6 +174,7 @@ GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* on
fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
writeKeyHelper.write(shape, fScratchKey->data());
MaskTransform m(viewMatrix, maskShift);
GrCCPathCacheEntry* entry = nullptr;
if (HashNode* node = fHashTable.find(*fScratchKey)) {
entry = node->entry();
@ -181,11 +182,12 @@ GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* on
if (!fuzzy_equals(m, entry->fMaskTransform)) {
// The path was reused with an incompatible matrix.
if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
if (entry->unique()) {
// This entry is unique: recycle it instead of deleting and malloc-ing a new one.
SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
entry->fMaskTransform = m;
entry->fHitCount = 0;
entry->fHitRect = SkIRect::MakeEmpty();
entry->releaseCachedAtlas(this);
} else {
this->evict(*fScratchKey);
@ -195,9 +197,6 @@ GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* on
}
if (!entry) {
if (CreateIfAbsent::kNo == createIfAbsent) {
return OnFlushEntryRef();
}
if (fHashTable.count() >= kMaxCacheCount) {
SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
SkASSERT(node && node->entry() == fLRU.tail());
@ -241,6 +240,7 @@ GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* on
}
}
}
entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
return OnFlushEntryRef::OnFlushRef(entry);
}

View File

@ -99,15 +99,15 @@ public:
GrCCPathCacheEntry* fEntry = nullptr;
};
enum class CreateIfAbsent : bool {
kNo = false,
kYes = true
};
// Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
// with a different MaskTransform, the old entry gets evicted.
OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, const MaskTransform&,
CreateIfAbsent = CreateIfAbsent::kNo);
// Finds an entry in the cache that matches the given shape and transformation matrix.
// 'maskShift' is filled with an integer post-translate that the caller must apply when drawing
// the entry's mask to the device.
//
// NOTE: Shapes are only given one entry, so any time they are accessed with a new
// transformation, the old entry gets evicted.
OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&,
const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix,
SkIVector* maskShift);
void doPreFlushProcessing();
@ -204,14 +204,17 @@ public:
const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
// The number of times this specific entry (path + matrix combination) has been pulled from
// the path cache. As long as the caller does exactly one lookup per draw, this translates to
// the number of times the path has been drawn with a compatible matrix.
// The number of flushes during which this specific entry (path + matrix combination) has been
// pulled from the path cache. If a path is pulled from the cache more than once in a single
// flush, the hit count is only incremented once.
//
// If the entry did not previously exist and was created during
// GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
// If the entry did not previously exist, its hit count will be 1.
int hitCount() const { return fHitCount; }
// The accumulative region of the path that has been drawn during the lifetime of this cache
// entry (as defined by the 'clippedDrawBounds' parameter for GrCCPathCache::find).
const SkIRect& hitRect() const { return fHitRect; }
const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
const SkIRect& devIBounds() const { return fDevIBounds; }
@ -251,6 +254,7 @@ private:
sk_sp<GrCCPathCache::Key> fCacheKey;
GrStdSteadyClock::time_point fTimestamp;
int fHitCount = 0;
SkIRect fHitRect = SkIRect::MakeEmpty();
sk_sp<GrCCCachedAtlas> fCachedAtlas;
SkIVector fAtlasOffset;

View File

@ -679,6 +679,61 @@ class CCPR_cache_multiFlush : public CCPRCacheTest {
};
DEF_CCPR_TEST(CCPR_cache_multiFlush)
// Ensures a path drawn over mutiple tiles gets cached.
class CCPR_cache_multiTileCache : public CCPRCacheTest {
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
const RecordLastMockAtlasIDs& atlasIDRecorder) override {
// Make sure a path drawn over 9 tiles gets cached (1 tile out of 9 is >10% visibility).
const SkMatrix m0 = SkMatrix::MakeScale(kCanvasSize*3, kCanvasSize*3);
const SkPath p0 = fPaths[0];
for (int i = 0; i < 9; ++i) {
static constexpr int kRowOrder[9] = {0,1,1,0,2,2,2,1,0};
static constexpr int kColumnOrder[9] = {0,0,1,1,0,1,2,2,2};
SkMatrix tileM = m0;
tileM.postTranslate(-kCanvasSize * kColumnOrder[i], -kCanvasSize * kRowOrder[i]);
ccpr.drawPath(p0, tileM);
ccpr.flush();
if (i < 5) {
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
} else if (5 == i) {
REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
} else {
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
}
}
// Now make sure paths don't get cached when visibility is <10% for every draw (12 tiles).
const SkMatrix m1 = SkMatrix::MakeScale(kCanvasSize*4, kCanvasSize*3);
const SkPath p1 = fPaths[1];
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 4; ++col) {
SkMatrix tileM = m1;
tileM.postTranslate(-kCanvasSize * col, -kCanvasSize * row);
ccpr.drawPath(p1, tileM);
ccpr.flush();
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
}
}
// Double-check the cache is still intact.
ccpr.drawPath(p0, m0);
ccpr.flush();
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
ccpr.drawPath(p1, m1);
ccpr.flush();
REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
}
};
DEF_CCPR_TEST(CCPR_cache_multiTileCache)
// This test exercises CCPR's cache capabilities by drawing many paths with two different
// transformation matrices. We then vary the matrices independently by whole and partial pixels,
// and verify the caching behaved as expected.