ccpr: Implement path mask caching

Implement caching as follows:

1) Instead of deleting the mainline ccpr atlas when finished, stash it
   away from flush to flush.

2) On subsequent flushes, check the stashed atlas to see if we can
   reuse any of its cachable paths. Copy reusable paths into 8-bit
   literal coverage atlases and store them in the resource cache.

3) Recycle the stashed atlas texture for the remaining paths in the
   flush.

Bug: skia:
Change-Id: I9b20fbea708646df1df3a5f9c044e2299706b989
Reviewed-on: https://skia-review.googlesource.com/134703
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Chris Dalton 2018-06-18 09:51:36 -06:00 committed by Skia Commit-Bot
parent ddc0c6006c
commit 4da70190aa
18 changed files with 1085 additions and 180 deletions

View File

@ -320,6 +320,8 @@ skia_gpu_sources = [
"$_src/gpu/ccpr/GrCCDrawPathsOp.h",
"$_src/gpu/ccpr/GrCCGeometry.cpp",
"$_src/gpu/ccpr/GrCCGeometry.h",
"$_src/gpu/ccpr/GrCCPathCache.cpp",
"$_src/gpu/ccpr/GrCCPathCache.h",
"$_src/gpu/ccpr/GrCCPathParser.cpp",
"$_src/gpu/ccpr/GrCCPathParser.h",
"$_src/gpu/ccpr/GrCCPathProcessor.cpp",

View File

@ -29,7 +29,7 @@ sk_sp<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
sk_sp<GrSurfaceProxy> proxy =
proxyProvider->createProxy(tmpDesc, origin, SkBackingFit::kExact, SkBudgeted::kYes,
GrInternalSurfaceFlags::kNoPendingIO);
if (!proxy->asRenderTargetProxy()) {
if (!proxy || !proxy->asRenderTargetProxy()) {
return nullptr;
}

View File

@ -137,9 +137,39 @@ void GrCCAtlas::setUserBatchID(int id) {
fUserBatchID = id;
}
static uint32_t next_atlas_unique_id() {
static int32_t nextID;
return sk_atomic_inc(&nextID);
}
const GrUniqueKey& GrCCAtlas::getOrAssignUniqueKey(GrOnFlushResourceProvider* onFlushRP) {
static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
if (!fUniqueKey.isValid()) {
GrUniqueKey::Builder builder(&fUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
builder[0] = next_atlas_unique_id();
builder.finish();
if (fTextureProxy->priv().isInstantiated()) {
onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
}
}
return fUniqueKey;
}
sk_sp<GrCCAtlas::CachedAtlasInfo> GrCCAtlas::refOrMakeCachedAtlasInfo() {
if (!fCachedAtlasInfo) {
fCachedAtlasInfo = sk_make_sp<CachedAtlasInfo>();
}
return fCachedAtlasInfo;
}
sk_sp<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(!fTextureProxy->priv().isInstantiated()); // This method should only be called once.
// Caller should have cropped any paths to the destination render target instead of asking for
// an atlas larger than maxRenderTargetSize.
SkASSERT(SkTMax(fHeight, fWidth) <= fMaxTextureSize);
SkASSERT(fMaxTextureSize <= onFlushRP->caps()->maxRenderTargetSize());
sk_sp<GrRenderTargetContext> rtc =
@ -150,6 +180,10 @@ sk_sp<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
return nullptr;
}
if (fUniqueKey.isValid()) {
onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
}
SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
rtc->clear(&clearRect, 0, GrRenderTargetContext::CanClearFullscreen::kYes);
return rtc;

View File

@ -9,6 +9,8 @@
#define GrCCAtlas_DEFINED
#include "GrAllocator.h"
#include "GrNonAtomicRef.h"
#include "GrResourceKey.h"
#include "GrTypes.h"
#include "GrTypesPriv.h"
#include "SkRefCnt.h"
@ -18,6 +20,7 @@ class GrOnFlushResourceProvider;
class GrRenderTargetContext;
class GrTextureProxy;
struct SkIPoint16;
struct SkIRect;
/**
* This class implements a dynamic size GrRectanizer that grows until it reaches the implementation-
@ -58,6 +61,22 @@ public:
void setUserBatchID(int id);
int getUserBatchID() const { return fUserBatchID; }
// Manages a unique resource cache key that gets assigned to the atlas texture. The unique key
// does not get assigned to the texture proxy until it is instantiated.
const GrUniqueKey& getOrAssignUniqueKey(GrOnFlushResourceProvider*);
const GrUniqueKey& uniqueKey() const { return fUniqueKey; }
// An object for simple bookkeeping on the atlas texture once it has a unique key. In practice,
// we use it to track the percentage of the original atlas pixels that could still ever
// potentially be reused (i.e., those which still represent an extant path). When the percentage
// of useful pixels drops below 50%, the entire texture is purged from the resource cache.
struct CachedAtlasInfo : public GrNonAtomicRef<CachedAtlasInfo> {
int fNumPathPixels = 0;
int fNumInvalidatedPathPixels = 0;
bool fIsPurgedFromResourceCache = false;
};
sk_sp<CachedAtlasInfo> refOrMakeCachedAtlasInfo();
// Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
// that the caller may use to render the content. After this call, it is no longer valid to call
// addRect(), setUserBatchID(), or this method again.
@ -74,6 +93,12 @@ private:
SkISize fDrawBounds = {0, 0};
int fUserBatchID;
// Not every atlas will have a unique key -- a mainline CCPR one won't if we don't stash any
// paths, and only the first atlas in the stack is eligible to be stashed.
GrUniqueKey fUniqueKey;
sk_sp<CachedAtlasInfo> fCachedAtlasInfo;
sk_sp<GrTextureProxy> fTextureProxy;
};

View File

@ -48,15 +48,15 @@ void GrCCClipPath::init(const SkPath& deviceSpacePath, const SkIRect& accessRect
fAccessRect = accessRect;
}
void GrCCClipPath::accountForOwnPath(GrCCPerFlushResourceSpecs* resourceSpecs) const {
void GrCCClipPath::accountForOwnPath(GrCCPerFlushResourceSpecs* specs) const {
SkASSERT(this->isInitialized());
++resourceSpecs->fNumClipPaths;
resourceSpecs->fParsingPathStats.statPath(fDeviceSpacePath);
++specs->fNumClipPaths;
specs->fRenderedPathStats.statPath(fDeviceSpacePath);
SkIRect ibounds;
if (ibounds.intersect(fAccessRect, fPathDevIBounds)) {
resourceSpecs->fAtlasSpecs.accountForSpace(ibounds.width(), ibounds.height());
specs->fRenderedAtlasSpecs.accountForSpace(ibounds.width(), ibounds.height());
}
}

View File

@ -7,7 +7,6 @@
#include "GrCCDrawPathsOp.h"
#include "GrGpuCommandBuffer.h"
#include "GrMemoryPool.h"
#include "GrOpFlushState.h"
#include "ccpr/GrCCPerFlushResources.h"
@ -24,23 +23,30 @@ static bool has_coord_transforms(const GrPaint& paint) {
}
std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(GrContext*, const SkIRect& clipIBounds,
const SkMatrix& m, const SkPath& path,
const SkMatrix& m, const GrShape& shape,
const SkRect& devBounds, GrPaint&& paint) {
bool canStashPathMask = true;
SkIRect looseClippedIBounds;
devBounds.roundOut(&looseClippedIBounds); // GrCCPathParser might find slightly tighter bounds.
if (!looseClippedIBounds.intersect(clipIBounds)) {
return nullptr;
if (!clipIBounds.contains(looseClippedIBounds)) {
canStashPathMask = false;
if (!looseClippedIBounds.intersect(clipIBounds)) {
return nullptr;
}
}
return std::unique_ptr<GrCCDrawPathsOp>(
new GrCCDrawPathsOp(looseClippedIBounds, m, path, devBounds, std::move(paint)));
return std::unique_ptr<GrCCDrawPathsOp>(new GrCCDrawPathsOp(looseClippedIBounds, m, shape,
canStashPathMask, devBounds,
std::move(paint)));
}
GrCCDrawPathsOp::GrCCDrawPathsOp(const SkIRect& looseClippedIBounds, const SkMatrix& m,
const SkPath& path, const SkRect& devBounds, GrPaint&& paint)
const GrShape& shape, bool canStashPathMask,
const SkRect& devBounds, GrPaint&& paint)
: GrDrawOp(ClassID())
, fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
, fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(paint))
, fDraws({looseClippedIBounds, m, path, paint.getColor(), nullptr})
, fDraws({looseClippedIBounds, m, shape, paint.getColor(), nullptr, nullptr, {0, 0},
canStashPathMask, nullptr})
, fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
SkDEBUGCODE(fBaseInstance = -1);
// FIXME: intersect with clip bounds to (hopefully) improve batching.
@ -55,18 +61,24 @@ GrCCDrawPathsOp::~GrCCDrawPathsOp() {
}
}
GrCCDrawPathsOp::SingleDraw::~SingleDraw() {
if (fCacheEntry) {
// All currFlushAtlas references must be reset back to null before the flush is finished.
fCacheEntry->setCurrFlushAtlas(nullptr);
}
}
GrDrawOp::RequiresDstTexture GrCCDrawPathsOp::finalize(const GrCaps& caps,
const GrAppliedClip* clip,
GrPixelConfigIsClamped dstIsClamped) {
// There should only be one single path draw in this Op right now.
SkASSERT(1 == fNumDraws);
SkASSERT(1 == fNumDraws); // There should only be one single path draw in this Op right now.
GrProcessorSet::Analysis analysis =
fProcessors.finalize(fDraws.head().fColor, GrProcessorAnalysisCoverage::kSingleChannel,
clip, false, caps, dstIsClamped, &fDraws.head().fColor);
return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
return RequiresDstTexture(analysis.requiresDstTexture());
}
bool GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
bool GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
SkASSERT(fOwningPerOpListPaths);
SkASSERT(fNumDraws);
@ -93,50 +105,158 @@ void GrCCDrawPathsOp::wasRecorded(GrCCPerOpListPaths* owningPerOpListPaths) {
fOwningPerOpListPaths = owningPerOpListPaths;
}
void GrCCDrawPathsOp::accountForOwnPaths(GrCCPerFlushResourceSpecs* resourceSpecs) const {
for (const GrCCDrawPathsOp::SingleDraw& draw : fDraws) {
++resourceSpecs->fNumRenderedPaths;
resourceSpecs->fParsingPathStats.statPath(draw.fPath);
resourceSpecs->fAtlasSpecs.accountForSpace(draw.fLooseClippedIBounds.width(),
void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
GrOnFlushResourceProvider* onFlushRP,
const GrUniqueKey& stashedAtlasKey,
GrCCPerFlushResourceSpecs* specs) {
using CreateIfAbsent = GrCCPathCache::CreateIfAbsent;
using MaskTransform = GrCCPathCache::MaskTransform;
for (SingleDraw& draw : fDraws) {
SkASSERT(!draw.fCacheEntry);
SkPath path;
draw.fShape.asPath(&path);
MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(draw.fCanStashPathMask));
if (auto cacheEntry = draw.fCacheEntry.get()) {
SkASSERT(!cacheEntry->currFlushAtlas()); // Shouldn't be set until setupResources().
if (cacheEntry->atlasKey().isValid()) {
// Does the path already exist in a cached atlas?
if (cacheEntry->hasCachedAtlas() &&
(draw.fCachedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(
cacheEntry->atlasKey(),
GrCCAtlas::kTextureOrigin))) {
++specs->fNumCachedPaths;
continue;
}
// Does the path exist in the atlas that we stashed away from last flush? If so we
// can copy it into a new 8-bit atlas and keep it in the resource cache.
if (stashedAtlasKey.isValid() && stashedAtlasKey == cacheEntry->atlasKey()) {
SkASSERT(!cacheEntry->hasCachedAtlas());
++specs->fNumCopiedPaths;
specs->fCopyPathStats.statPath(path);
specs->fCopyAtlasSpecs.accountForSpace(cacheEntry->width(),
cacheEntry->height());
continue;
}
// Whatever atlas the path used to reside in, it no longer exists.
cacheEntry->resetAtlasKeyAndInfo();
}
if (!draw.fCanStashPathMask) {
// No point in keeping this cache entry around anymore if we aren't going to try and
// stash the the rendered path mask after flush.
draw.fCacheEntry = nullptr;
pathCache->evict(cacheEntry);
}
}
++specs->fNumRenderedPaths;
specs->fRenderedPathStats.statPath(path);
specs->fRenderedAtlasSpecs.accountForSpace(draw.fLooseClippedIBounds.width(),
draw.fLooseClippedIBounds.height());
}
}
void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
GrOnFlushResourceProvider* onFlushRP) {
const GrCCAtlas* currentAtlas = nullptr;
void GrCCDrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
GrCCPerFlushResources* resources, DoCopiesToCache doCopies) {
using DoEvenOddFill = GrCCPathProcessor::DoEvenOddFill;
SkASSERT(fNumDraws > 0);
SkASSERT(-1 == fBaseInstance);
fBaseInstance = resources->nextPathInstanceIdx();
for (const SingleDraw& draw : fDraws) {
// renderPathInAtlas gives us two tight bounding boxes: one in device space, as well as a
// second one rotated an additional 45 degrees. The path vertex shader uses these two
// bounding boxes to generate an octagon that circumscribes the path.
for (SingleDraw& draw : fDraws) {
SkPath path;
draw.fShape.asPath(&path);
auto doEvenOddFill = DoEvenOddFill(SkPath::kEvenOdd_FillType == path.getFillType());
SkASSERT(SkPath::kEvenOdd_FillType == path.getFillType() ||
SkPath::kWinding_FillType == path.getFillType());
if (auto cacheEntry = draw.fCacheEntry.get()) {
// Does the path already exist in a cached atlas texture?
if (auto proxy = draw.fCachedAtlasProxy.get()) {
SkASSERT(!cacheEntry->currFlushAtlas());
this->recordInstance(proxy, resources->nextPathInstanceIdx());
resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
draw.fColor);
continue;
}
// Have we already encountered this path during the flush? (i.e. was the same SkPath
// drawn more than once during the same flush, with a compatible matrix?)
if (auto atlas = cacheEntry->currFlushAtlas()) {
this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
resources->appendDrawPathInstance().set(
*cacheEntry, draw.fCachedMaskShift, draw.fColor,
cacheEntry->hasCachedAtlas() ? DoEvenOddFill::kNo : doEvenOddFill);
continue;
}
// If the cache entry still has a valid atlas key at this point, it means the path
// exists in the atlas that we stashed away from last flush. Copy it into a permanent
// 8-bit atlas in the resource cache.
if (DoCopiesToCache::kYes == doCopies && cacheEntry->atlasKey().isValid()) {
SkIVector newOffset;
GrCCAtlas* atlas =
resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP),
newOffset, atlas->refOrMakeCachedAtlasInfo());
this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
draw.fColor);
// Remember this atlas in case we encounter the path again during the same flush.
cacheEntry->setCurrFlushAtlas(atlas);
continue;
}
}
// Render the raw path into a coverage count atlas. renderPathInAtlas() gives us two tight
// bounding boxes: One in device space, as well as a second one rotated an additional 45
// degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
// circumscribes the path.
SkASSERT(!draw.fCachedAtlasProxy);
SkRect devBounds, devBounds45;
SkIRect devIBounds;
SkIVector devToAtlasOffset;
const GrCCAtlas* atlas = resources->renderPathInAtlas(draw.fLooseClippedIBounds,
draw.fMatrix, draw.fPath, &devBounds,
&devBounds45, &devToAtlasOffset);
if (!atlas) {
SkDEBUGCODE(++fNumSkippedInstances);
if (auto atlas = resources->renderPathInAtlas(draw.fLooseClippedIBounds, draw.fMatrix, path,
&devBounds, &devBounds45, &devIBounds,
&devToAtlasOffset)) {
this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
resources->appendDrawPathInstance().set(devBounds, devBounds45, devToAtlasOffset,
draw.fColor, doEvenOddFill);
if (draw.fCacheEntry && draw.fCanStashPathMask &&
resources->nextAtlasToStash() == atlas) {
const GrUniqueKey& atlasKey =
resources->nextAtlasToStash()->getOrAssignUniqueKey(onFlushRP);
draw.fCacheEntry->initAsStashedAtlas(atlasKey, devToAtlasOffset, devBounds,
devBounds45, devIBounds,
draw.fCachedMaskShift);
// Remember this atlas in case we encounter the path again during the same flush.
draw.fCacheEntry->setCurrFlushAtlas(atlas);
}
continue;
}
if (currentAtlas != atlas) {
if (currentAtlas) {
this->recordInstanceRange(currentAtlas->textureProxy(),
resources->nextPathInstanceIdx());
}
currentAtlas = atlas;
}
resources->appendDrawPathInstance().set(draw.fPath.getFillType(), devBounds, devBounds45,
devToAtlasOffset, draw.fColor);
}
SkASSERT(resources->nextPathInstanceIdx() == fBaseInstance + fNumDraws - fNumSkippedInstances);
if (currentAtlas) {
this->recordInstanceRange(currentAtlas->textureProxy(), resources->nextPathInstanceIdx());
if (!fInstanceRanges.empty()) {
fInstanceRanges.back().fEndInstanceIdx = resources->nextPathInstanceIdx();
}
}
inline void GrCCDrawPathsOp::recordInstance(const GrTextureProxy* atlasProxy, int instanceIdx) {
if (fInstanceRanges.empty()) {
fInstanceRanges.push_back({atlasProxy, instanceIdx});
return;
}
if (fInstanceRanges.back().fAtlasProxy != atlasProxy) {
fInstanceRanges.back().fEndInstanceIdx = instanceIdx;
fInstanceRanges.push_back({atlasProxy, instanceIdx});
return;
}
}
@ -148,8 +268,6 @@ void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState) {
return; // Setup failed.
}
SkASSERT(fBaseInstance >= 0); // Make sure setupResources has been called.
GrPipeline::InitArgs initArgs;
initArgs.fFlags = fSRGBFlags;
initArgs.fProxy = flushState->drawOpArgs().fProxy;
@ -159,18 +277,16 @@ void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState) {
GrPipeline pipeline(initArgs, std::move(fProcessors), flushState->detachAppliedClip());
int baseInstance = fBaseInstance;
SkASSERT(baseInstance >= 0); // Make sure setupResources() has been called.
for (const InstanceRange& range : fInstanceRanges) {
SkASSERT(range.fEndInstanceIdx > baseInstance);
GrCCPathProcessor pathProc(flushState->resourceProvider(), sk_ref_sp(range.fAtlasProxy),
fViewMatrixIfUsingLocalCoords);
pathProc.drawPaths(flushState, pipeline, resources->indexBuffer(),
resources->vertexBuffer(), resources->instanceBuffer(),
baseInstance, range.fEndInstanceIdx, this->bounds());
pathProc.drawPaths(flushState, pipeline, *resources, baseInstance, range.fEndInstanceIdx,
this->bounds());
baseInstance = range.fEndInstanceIdx;
}
SkASSERT(baseInstance == fBaseInstance + fNumDraws - fNumSkippedInstances);
}

View File

@ -8,13 +8,16 @@
#ifndef GrCCDrawPathsOp_DEFINED
#define GrCCDrawPathsOp_DEFINED
#include "GrShape.h"
#include "SkTInternalLList.h"
#include "ccpr/GrCCPathProcessor.h"
#include "ccpr/GrCCSTLList.h"
#include "ops/GrDrawOp.h"
struct GrCCPerFlushResourceSpecs;
class GrCCAtlas;
class GrOnFlushResourceProvider;
class GrCCPathCache;
class GrCCPathCacheEntry;
class GrCCPerFlushResources;
class GrCCPerOpListPaths;
@ -27,53 +30,70 @@ public:
SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCDrawPathsOp);
static std::unique_ptr<GrCCDrawPathsOp> Make(GrContext*, const SkIRect& clipIBounds,
const SkMatrix&, const SkPath&,
const SkMatrix&, const GrShape&,
const SkRect& devBounds, GrPaint&&);
~GrCCDrawPathsOp() override;
const char* name() const override { return "GrCCDrawOp"; }
const char* name() const override { return "GrCCDrawPathsOp"; }
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
GrPixelConfigIsClamped) override;
bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
void visitProxies(const VisitProxyFunc& func) const override {
fProcessors.visitProxies(func);
}
bool onCombineIfPossible(GrOp*, const GrCaps&) override;
void visitProxies(const VisitProxyFunc& fn) const override { fProcessors.visitProxies(fn); }
void onPrepare(GrOpFlushState*) override {}
void wasRecorded(GrCCPerOpListPaths* owningPerOpListPaths);
void accountForOwnPaths(GrCCPerFlushResourceSpecs*) const;
void setupResources(GrCCPerFlushResources*, GrOnFlushResourceProvider*);
SkDEBUGCODE(int numSkippedInstances_debugOnly() const { return fNumSkippedInstances; })
// Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
// increments/fills out the corresponding GrCCPerFlushResourceSpecs. 'stashedAtlasKey', if
// valid, references the mainline coverage count atlas from the previous flush. Paths found in
// this atlas will be copied to more permanent atlases in the resource cache.
void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*,
const GrUniqueKey& stashedAtlasKey, GrCCPerFlushResourceSpecs*);
// Allows the caller to decide whether to copy paths out of the stashed atlas and into the
// resource cache, or to just re-render the paths from scratch. If there aren't many copies or
// the copies would only fill a small atlas, it's probably best to just re-render.
enum class DoCopiesToCache : bool {
kNo = false,
kYes = true
};
// Allocates the GPU resources indicated by accountForOwnPaths(), in preparation for drawing. If
// DoCopiesToCache is kNo, the paths slated for copy will instead be re-rendered from scratch.
//
// NOTE: If using DoCopiesToCache::kNo, it is the caller's responsibility to call
// convertCopiesToRenders() on the GrCCPerFlushResourceSpecs.
void setupResources(GrOnFlushResourceProvider*, GrCCPerFlushResources*, DoCopiesToCache);
void onExecute(GrOpFlushState*) override;
private:
friend class GrOpMemoryPool;
GrCCDrawPathsOp(const SkIRect& looseClippedIBounds, const SkMatrix&, const SkPath&,
const SkRect& devBounds, GrPaint&&);
GrCCDrawPathsOp(const SkIRect& clippedDevIBounds, const SkMatrix&, const GrShape&,
bool canStashPathMask, const SkRect& devBounds, GrPaint&&);
struct InstanceRange {
const GrTextureProxy* fAtlasProxy;
int fEndInstanceIdx;
};
void recordInstanceRange(const GrTextureProxy* atlasProxy, int endInstanceIdx) {
SkASSERT(endInstanceIdx > fBaseInstance);
SkASSERT(fInstanceRanges.empty() ||
endInstanceIdx > fInstanceRanges.back().fEndInstanceIdx);
fInstanceRanges.push_back() = {atlasProxy, endInstanceIdx};
}
void recordInstance(const GrTextureProxy* atlasProxy, int instanceIdx);
const SkMatrix fViewMatrixIfUsingLocalCoords;
const uint32_t fSRGBFlags;
struct SingleDraw {
~SingleDraw();
SkIRect fLooseClippedIBounds;
SkMatrix fMatrix;
SkPath fPath;
GrShape fShape;
GrColor fColor;
sk_sp<GrCCPathCacheEntry> fCacheEntry;
sk_sp<GrTextureProxy> fCachedAtlasProxy;
SkIVector fCachedMaskShift;
// If we render the path, can we stash its atlas and copy to the resource cache next flush?
bool fCanStashPathMask;
SingleDraw* fNext;
};
@ -83,9 +103,13 @@ private:
GrCCPerOpListPaths* fOwningPerOpListPaths = nullptr;
GrProcessorSet fProcessors;
int fBaseInstance;
SkSTArray<1, InstanceRange, true> fInstanceRanges;
SkDEBUGCODE(int fNumSkippedInstances = 0);
struct InstanceRange {
const GrTextureProxy* fAtlasProxy;
int fEndInstanceIdx;
};
SkSTArray<2, InstanceRange, true> fInstanceRanges;
int fBaseInstance SkDEBUGCODE(= -1);
};
#endif

View File

@ -0,0 +1,176 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrCCPathCache.h"
#include "GrShape.h"
#include "SkNx.h"
#include "ccpr/GrCCPathParser.h"
// The maximum number of cache entries we allow in our own cache.
static constexpr int kMaxCacheCount = 1 << 16;
GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
: fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
SkASSERT(!m.hasPerspective());
Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Sk2f floor = translate.floor();
(translate - floor).store(fSubpixelTranslate);
shift->set((int)floor[0], (int)floor[1]);
SkASSERT((float)shift->fX == floor[0]);
SkASSERT((float)shift->fY == floor[1]);
}
inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
const GrCCPathCache::MaskTransform& b) {
return (Sk4f::Load(a.fMatrix2x2) == Sk4f::Load(b.fMatrix2x2)).allTrue() &&
((Sk2f::Load(a.fSubpixelTranslate) -
Sk2f::Load(b.fSubpixelTranslate)).abs() < 1.f/256).allTrue();
}
inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* cache, const MaskTransform& m,
const GrShape& shape) {
SkASSERT(shape.hasUnstyledKey());
int keyLength = 1 + shape.unstyledKeySize();
void* mem = ::operator new (sizeof(GrCCPathCacheEntry) + keyLength * sizeof(uint32_t));
fEntry = new (mem) GrCCPathCacheEntry(cache, m);
// The shape key is a variable-length footer to the entry allocation.
uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry));
keyData[0] = keyLength - 1;
shape.writeUnstyledKey(&keyData[1]);
}
inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
return key1.fData[0] == key2.fData[0] &&
!memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0] * sizeof(uint32_t));
}
inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
// The shape key is a variable-length footer to the entry allocation.
return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
}
inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
return GrResourceKeyHash(&key.fData[1], key.fData[0]);
}
GrCCPathCache::HashNode::~HashNode() {
if (!fEntry) {
return;
}
// Finalize our eviction from the path cache.
SkASSERT(fEntry->fCacheWeakPtr);
fEntry->fCacheWeakPtr->fLRU.remove(fEntry);
fEntry->fCacheWeakPtr = nullptr;
if (GrCCAtlas::CachedAtlasInfo* info = fEntry->fCachedAtlasInfo.get()) {
// Mark our own pixels invalid in the cached atlas texture now that we have been evicted.
info->fNumInvalidatedPathPixels += fEntry->height() * fEntry->width();
if (!info->fIsPurgedFromResourceCache &&
info->fNumInvalidatedPathPixels >= info->fNumPathPixels / 2) {
// Too many invalidated pixels: purge the atlas texture from the resource cache.
SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
GrUniqueKeyInvalidatedMessage(fEntry->fAtlasKey));
info->fIsPurgedFromResourceCache = true;
}
}
fEntry->unref();
}
GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
this->~HashNode();
return *new (this) HashNode(std::move(node));
}
sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
CreateIfAbsent createIfAbsent) {
if (!shape.hasUnstyledKey()) {
return nullptr;
}
int keyLength = 1 + shape.unstyledKeySize();
SkAutoSTMalloc<GrShape::kMaxKeyFromDataVerbCnt * 4, uint32_t> keyData(keyLength);
keyData[0] = keyLength - 1;
shape.writeUnstyledKey(&keyData[1]);
GrCCPathCacheEntry* entry = nullptr;
if (HashNode* node = fHashTable.find({keyData.get()})) {
entry = node->entry();
SkASSERT(this == entry->fCacheWeakPtr);
if (!fuzzy_equals(m, entry->fMaskTransform)) {
this->evict(entry); // The path was reused with an incompatible matrix.
entry = nullptr;
}
}
if (!entry) {
if (CreateIfAbsent::kNo == createIfAbsent) {
return nullptr;
}
if (fHashTable.count() >= kMaxCacheCount) {
this->evict(fLRU.tail()); // We've exceeded our limit.
}
entry = fHashTable.set(HashNode(this, m, shape))->entry();
SkASSERT(fHashTable.count() <= kMaxCacheCount);
} else {
fLRU.remove(entry); // Will be re-added at head.
}
fLRU.addToHead(entry);
return sk_ref_sp(entry);
}
void GrCCPathCache::evict(const GrCCPathCacheEntry* entry) {
SkASSERT(entry);
SkASSERT(this == entry->fCacheWeakPtr);
SkASSERT(fLRU.isInList(entry));
SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
fHashTable.remove(HashNode::GetKey(entry)); // ~HashNode() handles the rest.
}
void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& atlasOffset, const SkRect& devBounds,
const SkRect& devBounds45, const SkIRect& devIBounds,
const SkIVector& maskShift) {
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
fAtlasKey = atlasKey;
fAtlasOffset = atlasOffset + maskShift;
SkASSERT(!fCachedAtlasInfo); // Otherwise they should have reused the cached atlas instead.
float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
fDevBounds = devBounds.makeOffset(-dx, -dy);
fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
}
void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& newAtlasOffset,
sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
fAtlasKey = atlasKey;
fAtlasOffset = newAtlasOffset;
SkASSERT(!fCachedAtlasInfo); // Otherwise we need to invalidate our pixels in the old info.
fCachedAtlasInfo = std::move(info);
fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
}
void GrCCPathCacheEntry::onChange() {
// Our corresponding path was modified or deleted. Evict ourselves.
if (fCacheWeakPtr) {
fCacheWeakPtr->evict(this);
}
}

View File

@ -0,0 +1,176 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCPathCache_DEFINED
#define GrCCPathCache_DEFINED
#include "SkExchange.h"
#include "SkTHash.h"
#include "SkTInternalLList.h"
#include "ccpr/GrCCAtlas.h"
#include "ccpr/GrCCPathProcessor.h"
class GrCCPathCacheEntry;
class GrShape;
/**
* This class implements an LRU cache that maps from GrShape to GrCCPathCacheEntry objects. Shapes
* are only given one entry in the cache, so any time they are accessed with a different matrix, the
* old entry gets evicted.
*/
class GrCCPathCache {
public:
#ifdef SK_DEBUG
~GrCCPathCache() {
// Ensure the hash table and LRU list are still coherent.
fHashTable.reset();
SkASSERT(fLRU.isEmpty());
}
#endif
// Stores the components of a transformation that affect a path mask (i.e. everything but
// integer translation). During construction, any integer portions of the matrix's translate are
// shaved off and returned to the caller. The caller is responsible for those integer shifts.
struct MaskTransform {
MaskTransform(const SkMatrix& m, SkIVector* shift);
float fMatrix2x2[4];
float fSubpixelTranslate[2];
};
enum class CreateIfAbsent : bool {
kNo = false,
kYes = true
};
// Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
// with a different MaskTransform, the old entry gets evicted.
sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
CreateIfAbsent = CreateIfAbsent::kNo);
void evict(const GrCCPathCacheEntry*);
private:
// Wrapper around a raw GrShape key that has a specialized operator==. Used by the hash table.
struct HashKey {
const uint32_t* fData;
};
friend bool operator==(const HashKey&, const HashKey&);
// This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It can only be
// moved, which guarantees the hash table holds exactly one reference for each entry. When a
// HashNode goes out of scope, it therefore means the entry has been evicted from the cache.
class HashNode : SkNoncopyable {
public:
static HashKey GetKey(const HashNode& node) { return GetKey(node.fEntry); }
static HashKey GetKey(const GrCCPathCacheEntry*);
static uint32_t Hash(HashKey);
HashNode() = default;
HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&);
HashNode(HashNode&& node) { fEntry = skstd::exchange(node.fEntry, nullptr); }
~HashNode(); // Called when fEntry (if not null) has been evicted from the cache.
HashNode& operator=(HashNode&&);
GrCCPathCacheEntry* entry() const { return fEntry; }
private:
GrCCPathCacheEntry* fEntry = nullptr;
// The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry'
// allocation. GetKey provides access to it.
};
SkTHashTable<HashNode, HashKey> fHashTable;
SkTInternalLList<GrCCPathCacheEntry> fLRU;
};
/**
* This class stores all the data necessary to draw a specific path from its corresponding cached
* atlas.
*/
class GrCCPathCacheEntry : public SkPathRef::GenIDChangeListener {
public:
SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
// Does this entry reference a permanent, 8-bit atlas that resides in the resource cache?
// (i.e. not a temporarily-stashed, fp16 coverage count atlas.)
bool hasCachedAtlas() const { return SkToBool(fCachedAtlasInfo); }
const SkIRect& devIBounds() const { return fDevIBounds; }
int width() const { return fDevIBounds.width(); }
int height() const { return fDevIBounds.height(); }
// Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
// The caller will stash this atlas texture away after drawing, and during the next flush,
// recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
const SkRect& devBounds, const SkRect& devBounds45,
const SkIRect& devIBounds, const SkIVector& maskShift);
// Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
// the entry at the new atlas and updates the CachedAtlasInfo data.
void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
sk_sp<GrCCAtlas::CachedAtlasInfo>);
const GrUniqueKey& atlasKey() const { return fAtlasKey; }
void resetAtlasKeyAndInfo() {
fAtlasKey.reset();
fCachedAtlasInfo.reset();
}
// This is a utility for the caller to detect when a path gets drawn more than once during the
// same flush, with compatible matrices. Before adding a path to an atlas, the caller may check
// here to see if they have already placed the path previously during the same flush. The caller
// is required to reset all currFlushAtlas references back to null before any subsequent flush.
void setCurrFlushAtlas(const GrCCAtlas* currFlushAtlas) {
// This should not get called more than once in a single flush. Once fCurrFlushAtlas is
// non-null, it can only be set back to null (once the flush is over).
SkASSERT(!fCurrFlushAtlas || !currFlushAtlas);
fCurrFlushAtlas = currFlushAtlas;
}
const GrCCAtlas* currFlushAtlas() const { return fCurrFlushAtlas; }
private:
using MaskTransform = GrCCPathCache::MaskTransform;
GrCCPathCacheEntry(GrCCPathCache* cache, const MaskTransform& m)
: fCacheWeakPtr(cache), fMaskTransform(m) {}
// Called when our corresponding path is modified or deleted.
void onChange() override;
GrCCPathCache* fCacheWeakPtr; // Gets manually reset to null by the path cache upon eviction.
const MaskTransform fMaskTransform;
GrUniqueKey fAtlasKey;
SkIVector fAtlasOffset;
// If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
SkRect fDevBounds;
SkRect fDevBounds45;
SkIRect fDevIBounds;
// This field is for when a path gets drawn more than once during the same flush.
const GrCCAtlas* fCurrFlushAtlas = nullptr;
friend class GrCCPathCache;
friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
uint32_t, DoEvenOddFill); // To access data.
};
inline void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry& entry,
const SkIVector& shift, GrColor color,
DoEvenOddFill doEvenOddFill) {
float dx = (float)shift.fX, dy = (float)shift.fY;
this->set(entry.fDevBounds.makeOffset(dx, dy), MakeOffset45(entry.fDevBounds45, dx, dy),
entry.fAtlasOffset - shift, color, doEvenOddFill);
}
#endif

View File

@ -10,6 +10,7 @@
#include "GrGpuCommandBuffer.h"
#include "GrOnFlushResourceProvider.h"
#include "GrTexture.h"
#include "ccpr/GrCCPerFlushResources.h"
#include "glsl/GrGLSLFragmentShaderBuilder.h"
#include "glsl/GrGLSLGeometryProcessor.h"
#include "glsl/GrGLSLProgramBuilder.h"
@ -131,9 +132,8 @@ GrGLSLPrimitiveProcessor* GrCCPathProcessor::createGLSLInstance(const GrShaderCa
}
void GrCCPathProcessor::drawPaths(GrOpFlushState* flushState, const GrPipeline& pipeline,
const GrBuffer* indexBuffer, const GrBuffer* vertexBuffer,
GrBuffer* instanceBuffer, int baseInstance, int endInstance,
const SkRect& bounds) const {
const GrCCPerFlushResources& resources, int baseInstance,
int endInstance, const SkRect& bounds) const {
const GrCaps& caps = flushState->caps();
GrPrimitiveType primitiveType = caps.usePrimitiveRestart()
? GrPrimitiveType::kTriangleStrip
@ -144,9 +144,10 @@ void GrCCPathProcessor::drawPaths(GrOpFlushState* flushState, const GrPipeline&
GrMesh mesh(primitiveType);
auto enablePrimitiveRestart = GrPrimitiveRestart(flushState->caps().usePrimitiveRestart());
mesh.setIndexedInstanced(indexBuffer, numIndicesPerInstance, instanceBuffer,
endInstance - baseInstance, baseInstance, enablePrimitiveRestart);
mesh.setVertexData(vertexBuffer);
mesh.setIndexedInstanced(resources.indexBuffer(), numIndicesPerInstance,
resources.instanceBuffer(), endInstance - baseInstance, baseInstance,
enablePrimitiveRestart);
mesh.setVertexData(resources.vertexBuffer());
flushState->rtCommandBuffer()->draw(pipeline, *this, &mesh, nullptr, 1, bounds);
}

View File

@ -13,6 +13,8 @@
#include "SkPath.h"
#include <array>
class GrCCPathCacheEntry;
class GrCCPerFlushResources;
class GrOnFlushResourceProvider;
class GrOpFlushState;
class GrPipeline;
@ -37,15 +39,29 @@ public:
};
static constexpr int kNumInstanceAttribs = 1 + (int)InstanceAttribs::kColor;
// Helper to offset the 45-degree bounding box returned by GrCCPathParser::parsePath().
static SkRect MakeOffset45(const SkRect& devBounds45, float dx, float dy) {
// devBounds45 is in "| 1 -1 | * devCoords" space.
// | 1 1 |
return devBounds45.makeOffset(dx - dy, dx + dy);
}
enum class DoEvenOddFill : bool {
kNo = false,
kYes = true
};
struct Instance {
SkRect fDevBounds; // "right < left" indicates even-odd fill type.
SkRect fDevBounds45; // Bounding box in "| 1 -1 | * devCoords" space.
// | 1 1 |
SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
uint32_t fColor;
GrColor fColor;
void set(SkPath::FillType, const SkRect& devBounds, const SkRect& devBounds45,
const SkIVector& devToAtlasOffset, uint32_t color);
void set(const SkRect& devBounds, const SkRect& devBounds45,
const SkIVector& devToAtlasOffset, GrColor, DoEvenOddFill = DoEvenOddFill::kNo);
void set(const GrCCPathCacheEntry&, const SkIVector& shift, GrColor,
DoEvenOddFill = DoEvenOddFill::kNo);
};
GR_STATIC_ASSERT(4 * 11 == sizeof(Instance));
@ -75,9 +91,8 @@ public:
void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override {}
GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
void drawPaths(GrOpFlushState*, const GrPipeline&, const GrBuffer* indexBuffer,
const GrBuffer* vertexBuffer, GrBuffer* instanceBuffer, int baseInstance,
int endInstance, const SkRect& bounds) const;
void drawPaths(GrOpFlushState*, const GrPipeline&, const GrCCPerFlushResources&,
int baseInstance, int endInstance, const SkRect& bounds) const;
private:
const TextureSampler fAtlasAccess;
@ -86,14 +101,13 @@ private:
typedef GrGeometryProcessor INHERITED;
};
inline void GrCCPathProcessor::Instance::set(SkPath::FillType fillType, const SkRect& devBounds,
const SkRect& devBounds45,
const SkIVector& devToAtlasOffset, uint32_t color) {
if (SkPath::kEvenOdd_FillType == fillType) {
inline void GrCCPathProcessor::Instance::set(const SkRect& devBounds, const SkRect& devBounds45,
const SkIVector& devToAtlasOffset, GrColor color,
DoEvenOddFill doEvenOddFill) {
if (DoEvenOddFill::kYes == doEvenOddFill) {
// "right < left" indicates even-odd fill type.
fDevBounds.setLTRB(devBounds.fRight, devBounds.fTop, devBounds.fLeft, devBounds.fBottom);
} else {
SkASSERT(SkPath::kWinding_FillType == fillType);
fDevBounds = devBounds;
}
fDevBounds45 = devBounds45;

View File

@ -13,14 +13,83 @@
#include "GrSurfaceContextPriv.h"
#include "GrRenderTargetContext.h"
#include "SkMakeUnique.h"
#include "ccpr/GrCCPathCache.h"
using CoverageCountBatchID = GrCCPathParser::CoverageCountBatchID;
using PathInstance = GrCCPathProcessor::Instance;
namespace {
// Base class for an Op that renders a CCPR atlas.
class AtlasOp : public GrDrawOp {
public:
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
bool onCombineIfPossible(GrOp* other, const GrCaps&) override {
SK_ABORT("Only expected one Op per CCPR atlas.");
return true;
}
void onPrepare(GrOpFlushState*) override {}
protected:
AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
const SkISize& drawBounds)
: GrDrawOp(classID)
, fResources(std::move(resources)) {
this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
}
const sk_sp<const GrCCPerFlushResources> fResources;
};
// Copies paths from a stashed coverage count atlas into an 8-bit literal-coverage atlas.
class CopyAtlasOp : public AtlasOp {
public:
DEFINE_OP_CLASS_ID
static std::unique_ptr<GrDrawOp> Make(GrContext* context,
sk_sp<const GrCCPerFlushResources> resources,
sk_sp<GrTextureProxy> copyProxy, int baseInstance,
int endInstance, const SkISize& drawBounds) {
return std::unique_ptr<GrDrawOp>(new CopyAtlasOp(std::move(resources), std::move(copyProxy),
baseInstance, endInstance, drawBounds));
}
const char* name() const override { return "CopyAtlasOp (CCPR)"; }
void visitProxies(const VisitProxyFunc& fn) const override { fn(fStashedAtlasProxy.get()); }
void onExecute(GrOpFlushState* flushState) override {
SkASSERT(fStashedAtlasProxy);
GrPipeline pipeline(flushState->proxy(), GrPipeline::ScissorState::kDisabled,
SkBlendMode::kSrc);
GrCCPathProcessor pathProc(flushState->resourceProvider(), std::move(fStashedAtlasProxy));
pathProc.drawPaths(flushState, pipeline, *fResources, fBaseInstance, fEndInstance,
this->bounds());
// Ensure we released the stashed atlas proxy. This allows its underlying texture to be
// reused as the current flush's mainline CCPR atlas if needed.
SkASSERT(!fStashedAtlasProxy);
}
private:
friend class ::GrOpMemoryPool; // for ctor
CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> copyProxy,
int baseInstance, int endInstance, const SkISize& drawBounds)
: AtlasOp(ClassID(), std::move(resources), drawBounds)
, fStashedAtlasProxy(copyProxy)
, fBaseInstance(baseInstance)
, fEndInstance(endInstance) {
}
sk_sp<GrTextureProxy> fStashedAtlasProxy;
const int fBaseInstance;
const int fEndInstance;
};
// Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
class RenderAtlasOp : public GrDrawOp {
class RenderAtlasOp : public AtlasOp {
public:
DEFINE_OP_CLASS_ID
@ -33,14 +102,6 @@ public:
// GrDrawOp interface.
const char* name() const override { return "RenderAtlasOp (CCPR)"; }
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
bool onCombineIfPossible(GrOp* other, const GrCaps&) override {
SK_ABORT("Only expected one Op per CCPR atlas.");
return true;
}
void onPrepare(GrOpFlushState*) override {}
void onExecute(GrOpFlushState* flushState) override {
fResources->pathParser().drawCoverageCount(flushState, fBatchID, fDrawBounds);
@ -51,29 +112,35 @@ private:
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, CoverageCountBatchID batchID,
const SkISize& drawBounds)
: GrDrawOp(ClassID())
, fResources(std::move(resources))
: AtlasOp(ClassID(), std::move(resources), drawBounds)
, fBatchID(batchID)
, fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
}
const sk_sp<const GrCCPerFlushResources> fResources;
const CoverageCountBatchID fBatchID;
const SkIRect fDrawBounds;
};
}
static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
return specs.fNumCachedPaths +
specs.fNumCopiedPaths*2 + // 1 copy + 1 draw.
specs.fNumRenderedPaths;
}
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCPerFlushResourceSpecs& specs)
: fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fParsingPathStats)
, fAtlasStack(kAlpha_half_GrPixelConfig, specs.fAtlasSpecs, onFlushRP->caps())
: fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fRenderedPathStats)
, fCopyAtlasStack(kAlpha_8_GrPixelConfig, specs.fCopyAtlasSpecs, onFlushRP->caps())
, fRenderedAtlasStack(kAlpha_half_GrPixelConfig, specs.fRenderedAtlasSpecs,
onFlushRP->caps())
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
, fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
specs.fNumRenderedPaths * sizeof(PathInstance))) {
inst_buffer_count(specs) * sizeof(PathInstance)))
, fNextCopyInstanceIdx(0)
, fNextPathInstanceIdx(specs.fNumCopiedPaths) {
if (!fIndexBuffer) {
SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
return;
@ -88,26 +155,43 @@ GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushR
}
fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
SkASSERT(fPathInstanceData);
SkDEBUGCODE(fEndPathInstance = specs.fNumRenderedPaths);
SkDEBUGCODE(fEndCopyInstance = specs.fNumCopiedPaths);
SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
}
GrCCAtlas* GrCCPerFlushResources::copyPathToCachedAtlas(const GrCCPathCacheEntry& entry,
GrCCPathProcessor::DoEvenOddFill evenOdd,
SkIVector* newAtlasOffset) {
SkASSERT(this->isMapped());
SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
SkASSERT(!entry.hasCachedAtlas()); // Unexpected, but not necessarily a problem.
if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry.devIBounds(), newAtlasOffset)) {
// We did not fit in the previous copy atlas and it was retired. We will render the copies
// up until fNextCopyInstanceIdx into the retired atlas during finalize().
retiredAtlas->setUserBatchID(fNextCopyInstanceIdx);
}
fPathInstanceData[fNextCopyInstanceIdx++].set(entry, *newAtlasOffset, GrColor_WHITE, evenOdd);
return &fCopyAtlasStack.current();
}
const GrCCAtlas* GrCCPerFlushResources::renderPathInAtlas(const SkIRect& clipIBounds,
const SkMatrix& m, const SkPath& path,
SkRect* devBounds, SkRect* devBounds45,
SkIRect* devIBounds,
SkIVector* devToAtlasOffset) {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
fPathParser.parsePath(m, path, devBounds, devBounds45);
devBounds->roundOut(devIBounds);
SkIRect devIBounds;
devBounds->roundOut(&devIBounds);
if (!this->placeParsedPathInAtlas(clipIBounds, devIBounds, devToAtlasOffset)) {
if (!this->placeParsedPathInAtlas(clipIBounds, *devIBounds, devToAtlasOffset)) {
SkDEBUGCODE(--fEndPathInstance);
return nullptr; // Path was degenerate or clipped away.
}
return &fAtlasStack.current();
return &fRenderedAtlasStack.current();
}
const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
@ -118,7 +202,7 @@ const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
if (!this->placeParsedPathInAtlas(clipIBounds, devPathIBounds, devToAtlasOffset)) {
return nullptr;
}
return &fAtlasStack.current();
return &fRenderedAtlasStack.current();
}
bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
@ -137,7 +221,8 @@ bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
return false;
}
if (GrCCAtlas* retiredAtlas = fAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
if (GrCCAtlas* retiredAtlas =
fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
// We did not fit in the previous coverage count atlas and it was retired. Close the path
// parser's current batch (which does not yet include the path we just parsed). We will
// render this batch into the retired atlas during finalize().
@ -149,16 +234,21 @@ bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
}
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
sk_sp<GrTextureProxy> stashedAtlasProxy,
SkTArray<sk_sp<GrRenderTargetContext>>* out) {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
// No assert for fEndCopyInstance because the caller may have detected and skipped duplicates.
fInstanceBuffer->unmap();
fPathInstanceData = nullptr;
if (!fAtlasStack.empty()) {
if (!fCopyAtlasStack.empty()) {
fCopyAtlasStack.current().setUserBatchID(fNextCopyInstanceIdx);
}
if (!fRenderedAtlasStack.empty()) {
CoverageCountBatchID batchID = fPathParser.closeCurrentBatch();
fAtlasStack.current().setUserBatchID(batchID);
fRenderedAtlasStack.current().setUserBatchID(batchID);
}
// Build the GPU buffers to render path coverage counts. (This must not happen until after the
@ -168,8 +258,30 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
return false;
}
// Render the atlas(es).
for (GrCCAtlasStack::Iter atlas(fAtlasStack); atlas.next();) {
// Draw the copies from the stashed atlas into 8-bit cached atlas(es).
int baseCopyInstance = 0;
for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
int endCopyInstance = atlas->getUserBatchID();
if (endCopyInstance <= baseCopyInstance) {
SkASSERT(endCopyInstance == baseCopyInstance);
continue;
}
if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
GrContext* ctx = rtc->surfPriv().getContext();
auto op = CopyAtlasOp::Make(ctx, sk_ref_sp(this), stashedAtlasProxy, baseCopyInstance,
endCopyInstance, atlas->drawBounds());
rtc->addDrawOp(GrNoClip(), std::move(op));
out->push_back(std::move(rtc));
}
baseCopyInstance = endCopyInstance;
}
// Release the stashed atlas before creating new one(s). This allows us to recycle the same
// underlying texture with the upcoming rendered atlases.
stashedAtlasProxy = nullptr;
// Render the coverage count atlas(es).
for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
atlas->getUserBatchID(), atlas->drawBounds());
@ -180,3 +292,22 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
return true;
}
void GrCCPerFlushResourceSpecs::convertCopiesToRenders() {
fNumRenderedPaths += fNumCopiedPaths;
fNumCopiedPaths = 0;
fRenderedAtlasSpecs.fApproxNumPixels += fCopyAtlasSpecs.fApproxNumPixels;
fRenderedAtlasSpecs.fMinWidth =
SkTMax(fRenderedAtlasSpecs.fMinWidth, fCopyAtlasSpecs.fMinWidth);
fRenderedAtlasSpecs.fMinHeight =
SkTMax(fRenderedAtlasSpecs.fMinHeight, fCopyAtlasSpecs.fMinHeight);
fCopyAtlasSpecs = GrCCAtlas::Specs();
fRenderedPathStats.fMaxPointsPerPath =
SkTMax(fRenderedPathStats.fMaxPointsPerPath, fCopyPathStats.fMaxPointsPerPath);
fRenderedPathStats.fNumTotalSkPoints += fCopyPathStats.fNumTotalSkPoints;
fRenderedPathStats.fNumTotalSkVerbs += fCopyPathStats.fNumTotalSkVerbs;
fRenderedPathStats.fNumTotalConicWeights += fCopyPathStats.fNumTotalConicWeights;
fCopyPathStats = GrCCPathParser::PathStats();
}

View File

@ -13,17 +13,29 @@
#include "ccpr/GrCCPathParser.h"
#include "ccpr/GrCCPathProcessor.h"
class GrCCPathCacheEntry;
class GrOnFlushResourceProvider;
/**
* This struct encapsulates the minimum and desired requirements for the GPU resources required by
* CCPR in a given flush.
*/
struct GrCCPerFlushResourceSpecs {
int fNumCachedPaths = 0;
int fNumCopiedPaths = 0;
GrCCPathParser::PathStats fCopyPathStats;
GrCCAtlas::Specs fCopyAtlasSpecs;
int fNumRenderedPaths = 0;
int fNumClipPaths = 0;
GrCCPathParser::PathStats fParsingPathStats;
GrCCAtlas::Specs fAtlasSpecs;
GrCCPathParser::PathStats fRenderedPathStats;
GrCCAtlas::Specs fRenderedAtlasSpecs;
bool isEmpty() const { return 0 == fNumRenderedPaths + fNumClipPaths; }
bool isEmpty() const {
return 0 == fNumCachedPaths + fNumCopiedPaths + fNumRenderedPaths + fNumClipPaths;
}
void convertCopiesToRenders();
};
/**
@ -37,9 +49,17 @@ public:
bool isMapped() const { return SkToBool(fPathInstanceData); }
// Renders a path into a temporary atlas. See GrCCPathParser for a description of the arguments.
// Copies a path out of the the previous flush's stashed mainline coverage count atlas, and into
// a cached, 8-bit, literal-coverage atlas. The actual source texture to copy from will be
// provided at the time finalize() is called.
GrCCAtlas* copyPathToCachedAtlas(const GrCCPathCacheEntry&, GrCCPathProcessor::DoEvenOddFill,
SkIVector* newAtlasOffset);
// These two methods render a path into a temporary coverage count atlas. See GrCCPathParser for
// a description of the arguments. The returned atlases are "const" to prevent the caller from
// assigning a unique key.
const GrCCAtlas* renderPathInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const SkPath&,
SkRect* devBounds, SkRect* devBounds45,
SkRect* devBounds, SkRect* devBounds45, SkIRect* devIBounds,
SkIVector* devToAtlasOffset);
const GrCCAtlas* renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds, const SkPath& devPath,
const SkIRect& devPathIBounds,
@ -58,8 +78,11 @@ public:
return fPathInstanceData[fNextPathInstanceIdx++];
}
// Finishes off the GPU buffers and renders the atlas(es).
bool finalize(GrOnFlushResourceProvider*, SkTArray<sk_sp<GrRenderTargetContext>>* out);
// Finishes off the GPU buffers and renders the atlas(es). 'stashedAtlasProxy', if provided, is
// the mainline coverage count atlas from the previous flush. It will be used as the source
// texture for any copies setup by copyStashedPathToAtlas().
bool finalize(GrOnFlushResourceProvider*, sk_sp<GrTextureProxy> stashedAtlasProxy,
SkTArray<sk_sp<GrRenderTargetContext>>* out);
// Accessors used by draw calls, once the resources have been finalized.
const GrCCPathParser& pathParser() const { SkASSERT(!this->isMapped()); return fPathParser; }
@ -67,19 +90,38 @@ public:
const GrBuffer* vertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer.get();}
GrBuffer* instanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer.get(); }
// Returns the mainline coverage count atlas that the client may stash for next flush, if any.
// The caller is responsible to call getOrAssignUniqueKey() on this atlas if they wish to
// actually stash it in order to copy paths into cached atlases.
GrCCAtlas* nextAtlasToStash() {
return fRenderedAtlasStack.empty() ? nullptr : &fRenderedAtlasStack.front();
}
// Returs true if the client has called getOrAssignUniqueKey() on our nextAtlasToStash().
bool hasStashedAtlas() const {
return !fRenderedAtlasStack.empty() && fRenderedAtlasStack.front().uniqueKey().isValid();
}
const GrUniqueKey& stashedAtlasKey() const {
SkASSERT(this->hasStashedAtlas());
return fRenderedAtlasStack.front().uniqueKey();
}
private:
bool placeParsedPathInAtlas(const SkIRect& clipIBounds, const SkIRect& pathIBounds,
SkIVector* devToAtlasOffset);
GrCCPathParser fPathParser;
GrCCAtlasStack fAtlasStack;
GrCCAtlasStack fCopyAtlasStack;
GrCCAtlasStack fRenderedAtlasStack;
const sk_sp<const GrBuffer> fIndexBuffer;
const sk_sp<const GrBuffer> fVertexBuffer;
const sk_sp<GrBuffer> fInstanceBuffer;
GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
int fNextPathInstanceIdx = 0;
int fNextCopyInstanceIdx;
SkDEBUGCODE(int fEndCopyInstance);
int fNextPathInstanceIdx;
SkDEBUGCODE(int fEndPathInstance);
};

View File

@ -45,14 +45,16 @@ public:
fTail = !nextTail ? newRightHead : nextTail;
}
struct Iter {
template<typename U> struct Iter {
bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
const T& operator*() { return *fCurr; }
U& operator*() { return *fCurr; }
void operator++() { fCurr = fCurr->fNext; }
const T* fCurr;
U* fCurr;
};
Iter begin() const { return Iter{&fHead}; }
Iter end() const { return Iter{nullptr}; }
Iter<const T> begin() const { return Iter<const T>{&fHead}; }
Iter<const T> end() const { return Iter<const T>{nullptr}; }
Iter<T> begin() { return Iter<T>{&fHead}; }
Iter<T> end() { return Iter<T>{nullptr}; }
private:
T fHead;

View File

@ -50,6 +50,8 @@ bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
caps.instanceAttribSupport() && GrCaps::kNone_MapFlags != caps.mapBufferFlags() &&
caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
caps.isConfigRenderable(kAlpha_half_GrPixelConfig) &&
caps.isConfigTexturable(kAlpha_8_GrPixelConfig) &&
caps.isConfigRenderable(kAlpha_8_GrPixelConfig) &&
!caps.blacklistCoverageCounting();
}
@ -112,24 +114,22 @@ bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
GrRenderTargetContext* rtc = args.fRenderTargetContext;
args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
SkPath path;
args.fShape->asPath(&path);
SkRect devBounds;
args.fViewMatrix->mapRect(&devBounds, path.getBounds());
args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds());
std::unique_ptr<GrCCDrawPathsOp> op;
if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) {
// The path is too large. Crop it or analytic AA can run out of fp32 precision.
SkPath croppedPath;
path.transform(*args.fViewMatrix, &croppedPath);
args.fShape->asPath(&croppedPath);
croppedPath.transform(*args.fViewMatrix, &croppedPath);
crop_path(croppedPath, clipIBounds, &croppedPath);
// FIXME: This breaks local coords: http://skbug.com/8003
op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, SkMatrix::I(), croppedPath,
op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, SkMatrix::I(), GrShape(croppedPath),
croppedPath.getBounds(), std::move(args.fPaint));
} else {
op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, path, devBounds,
std::move(args.fPaint));
op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
devBounds, std::move(args.fPaint));
}
this->recordOp(std::move(op), args);
@ -179,18 +179,37 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
const uint32_t* opListIDs, int numOpListIDs,
SkTArray<sk_sp<GrRenderTargetContext>>* out) {
using DoCopiesToCache = GrCCDrawPathsOp::DoCopiesToCache;
SkASSERT(!fFlushing);
SkASSERT(fFlushingPaths.empty());
SkDEBUGCODE(fFlushing = true);
// Dig up the stashed atlas from the previous flush (if any) so we can attempt to copy any
// reusable paths out of it and into the resource cache. We also need to clear its unique key.
sk_sp<GrTextureProxy> stashedAtlasProxy;
if (fStashedAtlasKey.isValid()) {
stashedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(fStashedAtlasKey,
GrCCAtlas::kTextureOrigin);
if (stashedAtlasProxy) {
// Instantiate the proxy so we can clear the underlying texture's unique key.
onFlushRP->instatiateProxy(stashedAtlasProxy.get());
onFlushRP->removeUniqueKeyFromProxy(fStashedAtlasKey, stashedAtlasProxy.get());
} else {
fStashedAtlasKey.reset(); // Indicate there is no stashed atlas to copy from.
}
}
if (fPendingPaths.empty()) {
fStashedAtlasKey.reset();
return; // Nothing to draw.
}
GrCCPerFlushResourceSpecs resourceSpecs;
GrCCPerFlushResourceSpecs specs;
int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
resourceSpecs.fAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
resourceSpecs.fAtlasSpecs.fMinTextureSize = SkTMin(1024, maxPreferredRTSize);
specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(1024, maxPreferredRTSize);
// Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
// and count them up so we can preallocate buffers.
@ -204,40 +223,49 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
fFlushingPaths.push_back(std::move(iter->second));
fPendingPaths.erase(iter);
for (const GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
op->accountForOwnPaths(&resourceSpecs);
for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
op->accountForOwnPaths(&fPathCache, onFlushRP, fStashedAtlasKey, &specs);
}
for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
clipsIter.second.accountForOwnPath(&resourceSpecs);
clipsIter.second.accountForOwnPath(&specs);
}
}
fStashedAtlasKey.reset();
if (resourceSpecs.isEmpty()) {
if (specs.isEmpty()) {
return; // Nothing to draw.
}
auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, resourceSpecs);
// Determine if there are enough reusable paths from last flush for it to be worth our time to
// copy them to cached atlas(es).
DoCopiesToCache doCopies = DoCopiesToCache(specs.fNumCopiedPaths > 100 ||
specs.fCopyAtlasSpecs.fApproxNumPixels > 512 * 256);
if (specs.fNumCopiedPaths && DoCopiesToCache::kNo == doCopies) {
specs.convertCopiesToRenders();
SkASSERT(!specs.fNumCopiedPaths);
}
auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, specs);
if (!resources->isMapped()) {
return; // Some allocation failed.
}
// Layout atlas(es) and parse paths.
SkDEBUGCODE(int numSkippedPaths = 0);
// Layout the atlas(es) and parse paths.
for (const auto& flushingPaths : fFlushingPaths) {
for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
op->setupResources(resources.get(), onFlushRP);
SkDEBUGCODE(numSkippedPaths += op->numSkippedInstances_debugOnly());
op->setupResources(onFlushRP, resources.get(), doCopies);
}
for (auto& clipsIter : flushingPaths->fClipPaths) {
clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
}
}
SkASSERT(resources->nextPathInstanceIdx() == resourceSpecs.fNumRenderedPaths - numSkippedPaths);
// Allocate the atlases and create instance buffers to draw them.
if (!resources->finalize(onFlushRP, out)) {
// Allocate resources and then render the atlas(es).
if (!resources->finalize(onFlushRP, std::move(stashedAtlasProxy), out)) {
return;
}
// Verify the stashed atlas got released so its texture could be recycled.
SkASSERT(!stashedAtlasProxy);
// Commit flushing paths to the resources once they are successfully completed.
for (auto& flushingPaths : fFlushingPaths) {
@ -249,14 +277,24 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
int numOpListIDs) {
SkASSERT(fFlushing);
SkASSERT(!fStashedAtlasKey.isValid()); // Should have been cleared in preFlush().
// In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
// resources manually.
for (auto& flushingPaths : fFlushingPaths) {
flushingPaths->fFlushResources = nullptr;
if (!fFlushingPaths.empty()) {
// Note the stashed atlas's key for next flush, if any.
auto resources = fFlushingPaths.front()->fFlushResources.get();
if (resources && resources->hasStashedAtlas()) {
fStashedAtlasKey = resources->stashedAtlasKey();
}
// In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
// resources manually.
for (auto& flushingPaths : fFlushingPaths) {
flushingPaths->fFlushResources = nullptr;
}
// We wait to erase these until after flush, once Ops and FPs are done accessing their data.
fFlushingPaths.reset();
}
// We wait to erase these until after flush, once Ops and FPs are done accessing their data.
fFlushingPaths.reset();
SkDEBUGCODE(fFlushing = false);
}

View File

@ -11,6 +11,7 @@
#include "GrCCPerOpListPaths.h"
#include "GrPathRenderer.h"
#include "GrRenderTargetOpList.h"
#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPerFlushResources.h"
#include <map>
@ -70,6 +71,8 @@ public:
SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
const GrUniqueKey& testingOnly_getStashedAtlasKey() const;
private:
GrCoverageCountingPathRenderer(bool drawCachablePaths)
: fDrawCachablePaths(drawCachablePaths) {}
@ -85,6 +88,10 @@ private:
// fFlushingPaths holds the GrCCPerOpListPaths objects that are currently being flushed.
// (It will only contain elements when fFlushing is true.)
SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
GrCCPathCache fPathCache;
GrUniqueKey fStashedAtlasKey;
SkDEBUGCODE(bool fFlushing = false);
const bool fDrawCachablePaths;

View File

@ -17,9 +17,11 @@
#include "GrRenderTargetContext.h"
#include "GrRenderTargetContextPriv.h"
#include "GrShape.h"
#include "GrTexture.h"
#include "SkMatrix.h"
#include "SkPathPriv.h"
#include "SkRect.h"
#include "sk_tool_utils.h"
#include "ccpr/GrCoverageCountingPathRenderer.h"
#include "mock/GrMockTypes.h"
#include <cmath>
@ -51,6 +53,11 @@ private:
const SkPath fPath;
};
enum class MarkVolatile : bool {
kNo = false,
kYes = true
};
class CCPRPathDrawer {
public:
CCPRPathDrawer(GrContext* ctx, skiatest::Reporter* reporter)
@ -68,22 +75,26 @@ public:
}
}
GrContext* ctx() const { return fCtx; }
GrCoverageCountingPathRenderer* ccpr() const { return fCCPR; }
bool valid() const { return fCCPR && fRTC; }
void clear() const { fRTC->clear(nullptr, 0, GrRenderTargetContext::CanClearFullscreen::kYes); }
void abandonGrContext() { fCtx = nullptr; fCCPR = nullptr; fRTC = nullptr; }
void drawPath(SkPath path, GrColor4f color = GrColor4f(0, 1, 0, 1)) const {
void drawPath(SkPath path, const SkMatrix& matrix = SkMatrix::I(),
MarkVolatile markVolatile = MarkVolatile::kYes) const {
SkASSERT(this->valid());
GrPaint paint;
paint.setColor4f(color);
paint.setColor4f(GrColor4f(0, 1, 0, 1));
GrNoClip noClip;
SkIRect clipBounds = SkIRect::MakeWH(kCanvasSize, kCanvasSize);
SkMatrix matrix = SkMatrix::I();
path.setIsVolatile(true);
if (MarkVolatile::kYes == markVolatile) {
path.setIsVolatile(true);
}
GrShape shape(path);
fCCPR->drawPath({fCtx, std::move(paint), &GrUserStencilSettings::kUnused, fRTC.get(),
@ -106,9 +117,9 @@ public:
}
private:
GrContext* fCtx;
GrCoverageCountingPathRenderer* fCCPR;
sk_sp<GrRenderTargetContext> fRTC;
GrContext* fCtx;
GrCoverageCountingPathRenderer* fCCPR;
sk_sp<GrRenderTargetContext> fRTC;
};
class CCPRTest {
@ -120,6 +131,9 @@ public:
mockOptions.fConfigOptions[kAlpha_half_GrPixelConfig].fRenderability =
GrMockOptions::ConfigOptions::Renderability::kNonMSAA;
mockOptions.fConfigOptions[kAlpha_half_GrPixelConfig].fTexturable = true;
mockOptions.fConfigOptions[kAlpha_8_GrPixelConfig].fRenderability =
GrMockOptions::ConfigOptions::Renderability::kNonMSAA;
mockOptions.fConfigOptions[kAlpha_8_GrPixelConfig].fTexturable = true;
mockOptions.fGeometryShaderSupport = true;
mockOptions.fIntegerSupport = true;
mockOptions.fFlatInterpolationSupport = true;
@ -155,8 +169,8 @@ protected:
virtual void customizeMockOptions(GrMockOptions*) {}
virtual void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) = 0;
sk_sp<GrContext> fMockContext;
SkPath fPath;
sk_sp<GrContext> fMockContext;
SkPath fPath;
};
#define DEF_CCPR_TEST(name) \
@ -263,6 +277,102 @@ class GrCCPRTest_parseEmptyPath : public CCPRTest {
};
DEF_CCPR_TEST(GrCCPRTest_parseEmptyPath)
// This test exercises CCPR's cache capabilities by drawing many paths with two different
// transformation matrices. We then vary the matrices independently by whole and partial pixels,
// and verify the caching behaved as expected.
class GrCCPRTest_cache : public CCPRTest {
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
static constexpr int kPathSize = 20;
SkRandom rand;
SkPath paths[200];
int primes[11] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31};
for (size_t i = 0; i < SK_ARRAY_COUNT(paths); ++i) {
int numPts = rand.nextRangeU(GrShape::kMaxKeyFromDataVerbCnt + 1,
GrShape::kMaxKeyFromDataVerbCnt * 2);
paths[i] = sk_tool_utils::make_star(SkRect::MakeIWH(kPathSize, kPathSize), numPts,
primes[rand.nextU() % SK_ARRAY_COUNT(primes)]);
}
SkMatrix matrices[2] = {
SkMatrix::MakeTrans(5, 5),
SkMatrix::MakeTrans(kCanvasSize - kPathSize - 5, kCanvasSize - kPathSize - 5)
};
int firstAtlasID = -1;
for (int flushIdx = 0; flushIdx < 10; ++flushIdx) {
// Draw all the paths and flush.
for (size_t i = 0; i < SK_ARRAY_COUNT(paths); ++i) {
ccpr.drawPath(paths[i], matrices[i % 2], MarkVolatile::kNo);
}
ccpr.flush();
// Figure out the mock backend ID of the atlas texture stashed away by CCPR.
GrMockTextureInfo stashedAtlasInfo;
stashedAtlasInfo.fID = -1;
const GrUniqueKey& stashedAtlasKey = ccpr.ccpr()->testingOnly_getStashedAtlasKey();
if (stashedAtlasKey.isValid()) {
GrResourceProvider* rp = ccpr.ctx()->contextPriv().resourceProvider();
sk_sp<GrSurface> stashedAtlas = rp->findByUniqueKey<GrSurface>(stashedAtlasKey);
REPORTER_ASSERT(reporter, stashedAtlas);
if (stashedAtlas) {
const auto& backendTexture = stashedAtlas->asTexture()->getBackendTexture();
backendTexture.getMockTextureInfo(&stashedAtlasInfo);
}
}
if (0 == flushIdx) {
// First flush: just note the ID of the stashed atlas and continue.
REPORTER_ASSERT(reporter, stashedAtlasKey.isValid());
firstAtlasID = stashedAtlasInfo.fID;
continue;
}
switch (flushIdx % 3) {
case 1:
// This draw should have gotten 100% cache hits; we only did integer translates
// last time (or none if it was the first flush). Therefore, no atlas should
// have been stashed away.
REPORTER_ASSERT(reporter, !stashedAtlasKey.isValid());
// Invalidate even path masks.
matrices[0].preTranslate(1.6f, 1.4f);
break;
case 2:
// Even path masks were invalidated last iteration by a subpixel translate. They
// should have been re-rendered this time and stashed away in the CCPR atlas.
REPORTER_ASSERT(reporter, stashedAtlasKey.isValid());
// 'firstAtlasID' should be kept as a scratch texture in the resource cache.
REPORTER_ASSERT(reporter, stashedAtlasInfo.fID == firstAtlasID);
// Invalidate odd path masks.
matrices[1].preTranslate(-1.4f, -1.6f);
break;
case 0:
// Odd path masks were invalidated last iteration by a subpixel translate. They
// should have been re-rendered this time and stashed away in the CCPR atlas.
REPORTER_ASSERT(reporter, stashedAtlasKey.isValid());
// 'firstAtlasID' is the same texture that got stashed away last time (assuming
// no assertion failures). So if it also got stashed this time, it means we
// first copied the even paths out of it, then recycled the exact same texture
// to render the odd paths. This is the expected behavior.
REPORTER_ASSERT(reporter, stashedAtlasInfo.fID == firstAtlasID);
// Integer translates: all path masks stay valid.
matrices[0].preTranslate(-1, -1);
matrices[1].preTranslate(1, 1);
break;
}
}
}
};
DEF_CCPR_TEST(GrCCPRTest_cache)
class CCPRRenderingTest {
public:
void run(skiatest::Reporter* reporter, GrContext* ctx) const {

View File

@ -25,6 +25,7 @@
#include "SkMathPriv.h"
#include "SkString.h"
#include "SkTo.h"
#include "ccpr/GrCoverageCountingPathRenderer.h"
#include "ops/GrMeshDrawOp.h"
#include "text/GrGlyphCache.h"
#include "text/GrTextBlobCache.h"
@ -301,6 +302,12 @@ GrPixelConfig GrBackendRenderTarget::testingOnly_getPixelConfig() const {
//////////////////////////////////////////////////////////////////////////////
const GrUniqueKey& GrCoverageCountingPathRenderer::testingOnly_getStashedAtlasKey() const {
return fStashedAtlasKey;
}
//////////////////////////////////////////////////////////////////////////////
#define DRAW_OP_TEST_EXTERN(Op) \
extern std::unique_ptr<GrDrawOp> Op##__Test(GrPaint&&, SkRandom*, GrContext*, GrFSAAType)
#define DRAW_OP_TEST_ENTRY(Op) Op##__Test