Extract a GrCCPathParser class that is decoupled from atlas ops
Allows coverage counts ultimately to be drawn either to an atlas or directly to the framebuffer. Bug: skia: Change-Id: I6cc07fce562c223381b89586d19ae98298bafe4d Reviewed-on: https://skia-review.googlesource.com/96083 Commit-Queue: Chris Dalton <csmartdalton@google.com> Reviewed-by: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
a583b813b9
commit
9ca27849d8
@ -300,8 +300,6 @@ skia_gpu_sources = [
|
||||
"$_src/gpu/ccpr/GrCCAtlas.h",
|
||||
"$_src/gpu/ccpr/GrCCClipProcessor.cpp",
|
||||
"$_src/gpu/ccpr/GrCCClipProcessor.h",
|
||||
"$_src/gpu/ccpr/GrCCCoverageOp.cpp",
|
||||
"$_src/gpu/ccpr/GrCCCoverageOp.h",
|
||||
"$_src/gpu/ccpr/GrCCCoverageProcessor.cpp",
|
||||
"$_src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp",
|
||||
"$_src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp",
|
||||
@ -310,6 +308,8 @@ skia_gpu_sources = [
|
||||
"$_src/gpu/ccpr/GrCCCubicShader.h",
|
||||
"$_src/gpu/ccpr/GrCCGeometry.cpp",
|
||||
"$_src/gpu/ccpr/GrCCGeometry.h",
|
||||
"$_src/gpu/ccpr/GrCCPathParser.cpp",
|
||||
"$_src/gpu/ccpr/GrCCPathParser.h",
|
||||
"$_src/gpu/ccpr/GrCCPathProcessor.cpp",
|
||||
"$_src/gpu/ccpr/GrCCPathProcessor.h",
|
||||
"$_src/gpu/ccpr/GrCCQuadraticShader.cpp",
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "SkMakeUnique.h"
|
||||
#include "SkMathPriv.h"
|
||||
#include "ccpr/GrCCCoverageProcessor.h"
|
||||
#include "ccpr/GrCCPathParser.h"
|
||||
#include "ops/GrDrawOp.h"
|
||||
|
||||
class GrCCAtlas::Node {
|
||||
@ -41,6 +42,40 @@ private:
|
||||
GrRectanizerSkyline fRectanizer;
|
||||
};
|
||||
|
||||
class GrCCAtlas::DrawCoverageCountOp : public GrDrawOp {
|
||||
public:
|
||||
DEFINE_OP_CLASS_ID
|
||||
|
||||
DrawCoverageCountOp(sk_sp<const GrCCPathParser> parser, CoverageCountBatchID batchID,
|
||||
const SkISize& drawBounds)
|
||||
: INHERITED(ClassID())
|
||||
, fParser(std::move(parser))
|
||||
, fBatchID(batchID)
|
||||
, fDrawBounds(drawBounds) {
|
||||
this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
|
||||
GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
|
||||
}
|
||||
|
||||
// GrDrawOp interface.
|
||||
const char* name() const override { return "GrCCAtlas::DrawCoverageCountOp"; }
|
||||
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
|
||||
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
|
||||
GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
|
||||
bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override { return false; }
|
||||
void onPrepare(GrOpFlushState*) override {}
|
||||
void onExecute(GrOpFlushState* flushState) override {
|
||||
fParser->drawCoverageCount(flushState, fBatchID,
|
||||
SkIRect::MakeWH(fDrawBounds.width(), fDrawBounds.height()));
|
||||
}
|
||||
|
||||
private:
|
||||
const sk_sp<const GrCCPathParser> fParser;
|
||||
const CoverageCountBatchID fBatchID;
|
||||
const SkISize fDrawBounds;
|
||||
|
||||
typedef GrDrawOp INHERITED;
|
||||
};
|
||||
|
||||
GrCCAtlas::GrCCAtlas(const GrCaps& caps, int minWidth, int minHeight)
|
||||
: fMaxAtlasSize(caps.maxRenderTargetSize()), fDrawBounds{0, 0} {
|
||||
SkASSERT(fMaxAtlasSize <= caps.maxTextureSize());
|
||||
@ -55,7 +90,8 @@ GrCCAtlas::GrCCAtlas(const GrCaps& caps, int minWidth, int minHeight)
|
||||
GrCCAtlas::~GrCCAtlas() {}
|
||||
|
||||
bool GrCCAtlas::addRect(int w, int h, SkIPoint16* loc) {
|
||||
// This can't be called anymore once finalize() has been called.
|
||||
// This can't be called anymore once setCoverageCountBatchID() has been called.
|
||||
SkASSERT(!fCoverageCountBatchID);
|
||||
SkASSERT(!fTextureProxy);
|
||||
|
||||
if (!this->internalPlaceRect(w, h, loc)) {
|
||||
@ -96,8 +132,9 @@ bool GrCCAtlas::internalPlaceRect(int w, int h, SkIPoint16* loc) {
|
||||
return true;
|
||||
}
|
||||
|
||||
sk_sp<GrRenderTargetContext> GrCCAtlas::finalize(
|
||||
GrOnFlushResourceProvider* onFlushRP, std::unique_ptr<GrDrawOp> atlasOp) {
|
||||
sk_sp<GrRenderTargetContext> GrCCAtlas::finalize(GrOnFlushResourceProvider* onFlushRP,
|
||||
sk_sp<const GrCCPathParser> parser) {
|
||||
SkASSERT(fCoverageCountBatchID);
|
||||
SkASSERT(!fTextureProxy);
|
||||
|
||||
GrSurfaceDesc desc;
|
||||
@ -113,7 +150,10 @@ sk_sp<GrRenderTargetContext> GrCCAtlas::finalize(
|
||||
|
||||
SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
|
||||
rtc->clear(&clearRect, 0, GrRenderTargetContext::CanClearFullscreen::kYes);
|
||||
rtc->addDrawOp(GrNoClip(), std::move(atlasOp));
|
||||
|
||||
auto op = skstd::make_unique<DrawCoverageCountOp>(std::move(parser), fCoverageCountBatchID,
|
||||
fDrawBounds);
|
||||
rtc->addDrawOp(GrNoClip(), std::move(op));
|
||||
|
||||
fTextureProxy = sk_ref_sp(rtc->asTextureProxy());
|
||||
return rtc;
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "SkSize.h"
|
||||
|
||||
class GrCaps;
|
||||
class GrCCPathParser;
|
||||
class GrDrawOp;
|
||||
class GrOnFlushResourceProvider;
|
||||
class GrRenderTargetContext;
|
||||
@ -27,19 +28,28 @@ class GrCCAtlas {
|
||||
public:
|
||||
static constexpr int kMinSize = 1024;
|
||||
|
||||
using CoverageCountBatchID = int;
|
||||
|
||||
GrCCAtlas(const GrCaps&, int minWidth, int minHeight);
|
||||
~GrCCAtlas();
|
||||
|
||||
bool addRect(int devWidth, int devHeight, SkIPoint16* loc);
|
||||
const SkISize& drawBounds() { return fDrawBounds; }
|
||||
|
||||
sk_sp<GrRenderTargetContext> SK_WARN_UNUSED_RESULT
|
||||
finalize(GrOnFlushResourceProvider*, std::unique_ptr<GrDrawOp> atlasOp);
|
||||
void setCoverageCountBatchID(CoverageCountBatchID batchID) {
|
||||
SkASSERT(!fCoverageCountBatchID);
|
||||
SkASSERT(!fTextureProxy);
|
||||
fCoverageCountBatchID = batchID;
|
||||
}
|
||||
|
||||
sk_sp<GrRenderTargetContext> SK_WARN_UNUSED_RESULT finalize(GrOnFlushResourceProvider*,
|
||||
sk_sp<const GrCCPathParser>);
|
||||
|
||||
GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
|
||||
|
||||
private:
|
||||
class Node;
|
||||
class DrawCoverageCountOp;
|
||||
|
||||
bool internalPlaceRect(int w, int h, SkIPoint16* loc);
|
||||
|
||||
@ -50,6 +60,7 @@ private:
|
||||
SkISize fDrawBounds;
|
||||
std::unique_ptr<Node> fTopNode;
|
||||
|
||||
CoverageCountBatchID fCoverageCountBatchID SkDEBUGCODE(= 0);
|
||||
sk_sp<GrTextureProxy> fTextureProxy;
|
||||
};
|
||||
|
||||
|
@ -1,175 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrCCCoverageOp_DEFINED
|
||||
#define GrCCCoverageOp_DEFINED
|
||||
|
||||
#include "GrMesh.h"
|
||||
#include "SkRect.h"
|
||||
#include "SkRefCnt.h"
|
||||
#include "ccpr/GrCCCoverageProcessor.h"
|
||||
#include "ccpr/GrCCGeometry.h"
|
||||
#include "ops/GrDrawOp.h"
|
||||
|
||||
class GrCCCoverageOp;
|
||||
class GrOnFlushResourceProvider;
|
||||
class SkMatrix;
|
||||
class SkPath;
|
||||
|
||||
/**
|
||||
* This class produces GrCCCoverageOps that render coverage count masks and atlases. A path is
|
||||
* added to the current op in two steps:
|
||||
*
|
||||
* 1) parsePath(ScissorMode, viewMatrix, path, &devBounds, &devBounds45);
|
||||
*
|
||||
* <client decides where to put the mask within an atlas, if wanted>
|
||||
*
|
||||
* 2) saveParsedPath(offsetX, offsetY, clipBounds);
|
||||
*
|
||||
* The client can flush the currently saved paths to a GrCCCoverageOp by calling emitOp, and
|
||||
* retrieve all emitted ops after calling finalize().
|
||||
*/
|
||||
class GrCCCoverageOpsBuilder {
|
||||
public:
|
||||
// Indicates whether a path should enforce a scissor clip when rendering its mask. (Specified
|
||||
// as an int because these values get used directly as indices into arrays.)
|
||||
enum class ScissorMode : int { kNonScissored = 0, kScissored = 1 };
|
||||
static constexpr int kNumScissorModes = 2;
|
||||
|
||||
GrCCCoverageOpsBuilder(int maxTotalPaths, int maxPathPoints, int numSkPoints, int numSkVerbs)
|
||||
: fPathsInfo(maxTotalPaths)
|
||||
, fLocalDevPtsBuffer(maxPathPoints + 1) // Overallocate by one point to accomodate for
|
||||
// overflow with Sk4f. (See parsePath.)
|
||||
, fGeometry(numSkPoints, numSkVerbs)
|
||||
, fTallies{PrimitiveTallies(), PrimitiveTallies()}
|
||||
, fScissorBatches(maxTotalPaths) {}
|
||||
|
||||
~GrCCCoverageOpsBuilder() {
|
||||
// Enforce the contract that the client always calls saveParsedPath or discardParsedPath.
|
||||
SkASSERT(!fParsingPath);
|
||||
}
|
||||
|
||||
// Parses an SkPath into a temporary staging area. The path will not yet be included in the next
|
||||
// Op unless there is a matching call to saveParsedPath. The user must complement this with a
|
||||
// following call to either saveParsedPath or discardParsedPath.
|
||||
//
|
||||
// Returns two tight bounding boxes: device space and "45 degree" (| 1 -1 | * devCoords) space.
|
||||
// | 1 1 |
|
||||
void parsePath(const SkMatrix&, const SkPath&, SkRect* devBounds, SkRect* devBounds45);
|
||||
|
||||
// Parses a device-space SkPath into a temporary staging area. The path will not yet be included
|
||||
// in the next Op unless there is a matching call to saveParsedPath. The user must complement
|
||||
// this with a following call to either saveParsedPath or discardParsedPath.
|
||||
void parseDeviceSpacePath(const SkPath&);
|
||||
|
||||
// Commits the currently-parsed path from staging to the next Op, and specifies whether the mask
|
||||
// should be rendered with a scissor clip in effect. Accepts an optional post-device-space
|
||||
// translate for placement in an atlas.
|
||||
void saveParsedPath(ScissorMode, const SkIRect& clippedDevIBounds, int16_t atlasOffsetX,
|
||||
int16_t atlasOffsetY);
|
||||
void discardParsedPath();
|
||||
|
||||
// Flushes all currently-saved paths internally to a GrCCCoverageOp.
|
||||
//
|
||||
// NOTE: if there is a parsed path in the staging area, it will not be included. But the client
|
||||
// may still call saveParsedPath to include it in a future Op.
|
||||
void emitOp(SkISize drawBounds);
|
||||
|
||||
// Builds GPU buffers and returns the list of GrCCCoverageOps as specified by calls to emitOp.
|
||||
bool finalize(GrOnFlushResourceProvider*, SkTArray<std::unique_ptr<GrCCCoverageOp>>*);
|
||||
|
||||
private:
|
||||
using PrimitiveTallies = GrCCGeometry::PrimitiveTallies;
|
||||
|
||||
// Every kBeginPath verb has a corresponding PathInfo entry.
|
||||
struct PathInfo {
|
||||
ScissorMode fScissorMode;
|
||||
int16_t fAtlasOffsetX, fAtlasOffsetY;
|
||||
std::unique_ptr<GrCCCoverageOp> fTerminatingOp;
|
||||
};
|
||||
|
||||
// Every PathInfo with a mode of kScissored has a corresponding ScissorBatch.
|
||||
struct ScissorBatch {
|
||||
PrimitiveTallies fInstanceCounts;
|
||||
SkIRect fScissor;
|
||||
};
|
||||
|
||||
void parsePath(const SkPath&, const SkPoint* deviceSpacePts);
|
||||
void endContourIfNeeded(bool insideContour);
|
||||
|
||||
// Staging area for the path being parsed.
|
||||
SkDEBUGCODE(int fParsingPath = false);
|
||||
int fCurrPathPointsIdx;
|
||||
int fCurrPathVerbsIdx;
|
||||
PrimitiveTallies fCurrPathTallies;
|
||||
|
||||
SkSTArray<32, PathInfo, true> fPathsInfo;
|
||||
|
||||
const SkAutoSTArray<32, SkPoint> fLocalDevPtsBuffer;
|
||||
GrCCGeometry fGeometry;
|
||||
|
||||
PrimitiveTallies fTallies[kNumScissorModes];
|
||||
SkTArray<ScissorBatch, true> fScissorBatches;
|
||||
|
||||
std::unique_ptr<GrCCCoverageOp> fTerminatingOp;
|
||||
|
||||
friend class GrCCCoverageOp; // For ScissorBatch.
|
||||
};
|
||||
|
||||
/**
|
||||
* This Op renders coverage count masks and atlases. Create it using GrCCCoverageOpsBuilder.
|
||||
*/
|
||||
class GrCCCoverageOp : public GrDrawOp {
|
||||
public:
|
||||
DEFINE_OP_CLASS_ID
|
||||
|
||||
// GrDrawOp interface.
|
||||
const char* name() const override { return "GrCCCoverageOp"; }
|
||||
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
|
||||
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
|
||||
GrPixelConfigIsClamped) override {
|
||||
return RequiresDstTexture::kNo;
|
||||
}
|
||||
bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override { return false; }
|
||||
void onPrepare(GrOpFlushState*) override {}
|
||||
void onExecute(GrOpFlushState*) override;
|
||||
|
||||
private:
|
||||
static constexpr int kNumScissorModes = GrCCCoverageOpsBuilder::kNumScissorModes;
|
||||
using PrimitiveTallies = GrCCGeometry::PrimitiveTallies;
|
||||
using ScissorBatch = GrCCCoverageOpsBuilder::ScissorBatch;
|
||||
|
||||
GrCCCoverageOp(SkTArray<ScissorBatch, true>&& scissorBatches, const SkISize& drawBounds)
|
||||
: INHERITED(ClassID())
|
||||
, fScissorBatches(std::move(scissorBatches))
|
||||
, fDrawBounds(drawBounds) {
|
||||
this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
|
||||
GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
|
||||
}
|
||||
|
||||
void setInstanceBuffer(sk_sp<GrBuffer> instanceBuffer,
|
||||
const PrimitiveTallies baseInstances[kNumScissorModes],
|
||||
const PrimitiveTallies endInstances[kNumScissorModes]);
|
||||
|
||||
void drawMaskPrimitives(GrOpFlushState*, const GrPipeline&, GrCCCoverageProcessor::RenderPass,
|
||||
int PrimitiveTallies::*instanceType) const;
|
||||
|
||||
sk_sp<GrBuffer> fInstanceBuffer;
|
||||
PrimitiveTallies fBaseInstances[kNumScissorModes];
|
||||
PrimitiveTallies fInstanceCounts[kNumScissorModes];
|
||||
const SkTArray<ScissorBatch, true> fScissorBatches;
|
||||
const SkISize fDrawBounds;
|
||||
|
||||
mutable SkTArray<GrMesh> fMeshesScratchBuffer;
|
||||
mutable SkTArray<GrPipeline::DynamicState, true> fDynamicStatesScratchBuffer;
|
||||
|
||||
friend class GrCCCoverageOpsBuilder;
|
||||
|
||||
typedef GrDrawOp INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -43,6 +43,7 @@ public:
|
||||
|
||||
void operator+=(const PrimitiveTallies&);
|
||||
PrimitiveTallies operator-(const PrimitiveTallies&) const;
|
||||
bool operator==(const PrimitiveTallies&);
|
||||
};
|
||||
|
||||
GrCCGeometry(int numSkPoints = 0, int numSkVerbs = 0)
|
||||
@ -138,4 +139,8 @@ inline GrCCGeometry::PrimitiveTallies::operator-(const PrimitiveTallies& b) cons
|
||||
fCubics - b.fCubics};
|
||||
}
|
||||
|
||||
inline bool GrCCGeometry::PrimitiveTallies::operator==(const PrimitiveTallies& b) {
|
||||
return fTriangles == b.fTriangles && fQuadratics == b.fQuadratics && fCubics == b.fCubics;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,7 +5,7 @@
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrCCCoverageOp.h"
|
||||
#include "GrCCPathParser.h"
|
||||
|
||||
#include "GrCaps.h"
|
||||
#include "GrGpuCommandBuffer.h"
|
||||
@ -20,8 +20,23 @@
|
||||
using TriangleInstance = GrCCCoverageProcessor::TriangleInstance;
|
||||
using CubicInstance = GrCCCoverageProcessor::CubicInstance;
|
||||
|
||||
void GrCCCoverageOpsBuilder::parsePath(const SkMatrix& m, const SkPath& path, SkRect* devBounds,
|
||||
SkRect* devBounds45) {
|
||||
GrCCPathParser::GrCCPathParser(int maxTotalPaths, int maxPathPoints, int numSkPoints,
|
||||
int numSkVerbs)
|
||||
: fLocalDevPtsBuffer(maxPathPoints + 1) // Overallocate by one point to accomodate for
|
||||
// overflow with Sk4f. (See parsePath.)
|
||||
, fGeometry(numSkPoints, numSkVerbs)
|
||||
, fPathsInfo(maxTotalPaths)
|
||||
, fScissorSubBatches(maxTotalPaths)
|
||||
, fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
|
||||
// Batches decide what to draw by looking where the previous one ended. Define initial batches
|
||||
// that "end" at the beginning of the data. These will not be drawn, but will only be be read by
|
||||
// the first actual batch.
|
||||
fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
|
||||
fCoverageCountBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count()};
|
||||
}
|
||||
|
||||
void GrCCPathParser::parsePath(const SkMatrix& m, const SkPath& path, SkRect* devBounds,
|
||||
SkRect* devBounds45) {
|
||||
const SkPoint* pts = SkPathPriv::PointData(path);
|
||||
int numPts = path.countPoints();
|
||||
SkASSERT(numPts + 1 <= fLocalDevPtsBuffer.count());
|
||||
@ -77,18 +92,19 @@ void GrCCCoverageOpsBuilder::parsePath(const SkMatrix& m, const SkPath& path, Sk
|
||||
this->parsePath(path, fLocalDevPtsBuffer.get());
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::parseDeviceSpacePath(const SkPath& deviceSpacePath) {
|
||||
void GrCCPathParser::parseDeviceSpacePath(const SkPath& deviceSpacePath) {
|
||||
this->parsePath(deviceSpacePath, SkPathPriv::PointData(deviceSpacePath));
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::parsePath(const SkPath& path, const SkPoint* deviceSpacePts) {
|
||||
SkASSERT(!fParsingPath);
|
||||
void GrCCPathParser::parsePath(const SkPath& path, const SkPoint* deviceSpacePts) {
|
||||
SkASSERT(!fInstanceBuffer); // Can't call after finalize().
|
||||
SkASSERT(!fParsingPath); // Call saveParsedPath() or discardParsedPath() for the last one first.
|
||||
SkDEBUGCODE(fParsingPath = true);
|
||||
SkASSERT(path.isEmpty() || deviceSpacePts);
|
||||
|
||||
fCurrPathPointsIdx = fGeometry.points().count();
|
||||
fCurrPathVerbsIdx = fGeometry.verbs().count();
|
||||
fCurrPathTallies = PrimitiveTallies();
|
||||
fCurrPathPrimitiveCounts = PrimitiveTallies();
|
||||
|
||||
fGeometry.beginPath();
|
||||
|
||||
@ -134,45 +150,46 @@ void GrCCCoverageOpsBuilder::parsePath(const SkPath& path, const SkPoint* device
|
||||
this->endContourIfNeeded(insideContour);
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::endContourIfNeeded(bool insideContour) {
|
||||
void GrCCPathParser::endContourIfNeeded(bool insideContour) {
|
||||
if (insideContour) {
|
||||
fCurrPathTallies += fGeometry.endContour();
|
||||
fCurrPathPrimitiveCounts += fGeometry.endContour();
|
||||
}
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::saveParsedPath(ScissorMode scissorMode,
|
||||
const SkIRect& clippedDevIBounds, int16_t atlasOffsetX,
|
||||
int16_t atlasOffsetY) {
|
||||
void GrCCPathParser::saveParsedPath(ScissorMode scissorMode, const SkIRect& clippedDevIBounds,
|
||||
int16_t atlasOffsetX, int16_t atlasOffsetY) {
|
||||
SkASSERT(fParsingPath);
|
||||
|
||||
fPathsInfo.push_back() = {scissorMode, atlasOffsetX, atlasOffsetY, std::move(fTerminatingOp)};
|
||||
|
||||
fTallies[(int)scissorMode] += fCurrPathTallies;
|
||||
fPathsInfo.push_back() = {scissorMode, atlasOffsetX, atlasOffsetY};
|
||||
fTotalPrimitiveCounts[(int)scissorMode] += fCurrPathPrimitiveCounts;
|
||||
|
||||
if (ScissorMode::kScissored == scissorMode) {
|
||||
fScissorBatches.push_back() = {fCurrPathTallies,
|
||||
clippedDevIBounds.makeOffset(atlasOffsetX, atlasOffsetY)};
|
||||
fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)ScissorMode::kScissored],
|
||||
clippedDevIBounds.makeOffset(atlasOffsetX, atlasOffsetY)};
|
||||
}
|
||||
|
||||
SkDEBUGCODE(fParsingPath = false);
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::discardParsedPath() {
|
||||
void GrCCPathParser::discardParsedPath() {
|
||||
SkASSERT(fParsingPath);
|
||||
|
||||
// The code will still work whether or not the below assertion is true. It is just unlikely that
|
||||
// the caller would want this, and probably indicative of of a mistake. (Why emit an
|
||||
// intermediate Op (to switch to a new atlas?), just to then throw the path away?)
|
||||
SkASSERT(!fTerminatingOp);
|
||||
|
||||
fGeometry.resize_back(fCurrPathPointsIdx, fCurrPathVerbsIdx);
|
||||
SkDEBUGCODE(fParsingPath = false);
|
||||
}
|
||||
|
||||
void GrCCCoverageOpsBuilder::emitOp(SkISize drawBounds) {
|
||||
SkASSERT(!fTerminatingOp);
|
||||
fTerminatingOp.reset(new GrCCCoverageOp(std::move(fScissorBatches), drawBounds));
|
||||
SkASSERT(fScissorBatches.empty());
|
||||
GrCCPathParser::CoverageCountBatchID GrCCPathParser::closeCurrentBatch() {
|
||||
SkASSERT(!fInstanceBuffer);
|
||||
SkASSERT(!fCoverageCountBatches.empty());
|
||||
|
||||
int maxMeshes = 1 + fScissorSubBatches.count() -
|
||||
fCoverageCountBatches.back().fEndScissorSubBatchIdx;
|
||||
fMaxMeshesPerDraw = SkTMax(fMaxMeshesPerDraw, maxMeshes);
|
||||
|
||||
fCoverageCountBatches.push_back() = {
|
||||
fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored],
|
||||
fScissorSubBatches.count()
|
||||
};
|
||||
return fCoverageCountBatches.count() - 1;
|
||||
}
|
||||
|
||||
// Emits a contour's triangle fan.
|
||||
@ -196,8 +213,8 @@ static TriangleInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
|
||||
return out;
|
||||
}
|
||||
|
||||
const int32_t oneThirdCount = indexCount / 3;
|
||||
const int32_t twoThirdsCount = (2 * indexCount) / 3;
|
||||
int32_t oneThirdCount = indexCount / 3;
|
||||
int32_t twoThirdsCount = (2 * indexCount) / 3;
|
||||
out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
|
||||
pts[indices[firstIndex + twoThirdsCount]], atlasOffset);
|
||||
|
||||
@ -215,49 +232,50 @@ static TriangleInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
|
||||
return out;
|
||||
}
|
||||
|
||||
bool GrCCCoverageOpsBuilder::finalize(GrOnFlushResourceProvider* onFlushRP,
|
||||
SkTArray<std::unique_ptr<GrCCCoverageOp>>* ops) {
|
||||
SkASSERT(!fParsingPath);
|
||||
bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
|
||||
SkASSERT(!fParsingPath); // Call saveParsedPath() or discardParsedPath().
|
||||
SkASSERT(fCoverageCountBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
|
||||
fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored]);
|
||||
SkASSERT(fCoverageCountBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
|
||||
|
||||
// Here we build a single instance buffer to share with every draw call from every CoverageOP we
|
||||
// plan to produce.
|
||||
// Here we build a single instance buffer to share with every internal batch.
|
||||
//
|
||||
// CoverageOps process 4 different types of primitives (triangles, quadratics, serpentines,
|
||||
// loops), and each primitive type is further divided into instances that require a scissor and
|
||||
// those that don't. This leaves us with 8 independent instance arrays to build for the GPU.
|
||||
// CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
|
||||
// type is further divided into instances that require a scissor and those that don't. This
|
||||
// leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
|
||||
//
|
||||
// Rather than placing each instance array in its own GPU buffer, we allocate a single
|
||||
// Rather than place each instance array in its own GPU buffer, we allocate a single
|
||||
// megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
|
||||
// our draw calls to direct the GPU to the applicable elements within a given array.
|
||||
//
|
||||
// We already know how big to make each of the 8 arrays from fTallies[kNumScissorModes], so
|
||||
// layout is straightforward.
|
||||
PrimitiveTallies baseInstances[kNumScissorModes];
|
||||
// We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
|
||||
// straightforward. Start with triangles and quadratics. They both view the instance buffer as
|
||||
// an array of TriangleInstance[], so we can begin at zero and lay them out one after the other.
|
||||
fBaseInstances[0].fTriangles = 0;
|
||||
fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
|
||||
fTotalPrimitiveCounts[0].fTriangles;
|
||||
fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
|
||||
fTotalPrimitiveCounts[1].fTriangles;
|
||||
fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
|
||||
fTotalPrimitiveCounts[0].fQuadratics;
|
||||
int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
|
||||
|
||||
// Start with triangles and quadratics. They both view the instance buffer as an array of
|
||||
// TriangleInstance[], so we can just start at zero and lay them out one after the other.
|
||||
baseInstances[0].fTriangles = 0;
|
||||
baseInstances[1].fTriangles = baseInstances[0].fTriangles + fTallies[0].fTriangles;
|
||||
baseInstances[0].fQuadratics = baseInstances[1].fTriangles + fTallies[1].fTriangles;
|
||||
baseInstances[1].fQuadratics = baseInstances[0].fQuadratics + fTallies[0].fQuadratics;
|
||||
int triEndIdx = baseInstances[1].fQuadratics + fTallies[1].fQuadratics;
|
||||
|
||||
// Cubics (loops and serpentines) view the same instance buffer as an array of CubicInstance[].
|
||||
// So, reinterpreting the instance data as CubicInstance[], we start them on the first index
|
||||
// that will not overwrite previous TriangleInstance data.
|
||||
// Cubics view the same instance buffer as an array of CubicInstance[]. So, reinterpreting the
|
||||
// instance data as CubicInstance[], we start them on the first index that will not overwrite
|
||||
// previous TriangleInstance data.
|
||||
int cubicBaseIdx =
|
||||
GR_CT_DIV_ROUND_UP(triEndIdx * sizeof(TriangleInstance), sizeof(CubicInstance));
|
||||
baseInstances[0].fCubics = cubicBaseIdx;
|
||||
baseInstances[1].fCubics = baseInstances[0].fCubics + fTallies[0].fCubics;
|
||||
int cubicEndIdx = baseInstances[1].fCubics + fTallies[1].fCubics;
|
||||
fBaseInstances[0].fCubics = cubicBaseIdx;
|
||||
fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
|
||||
int cubicEndIdx = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
|
||||
|
||||
sk_sp<GrBuffer> instanceBuffer =
|
||||
onFlushRP->makeBuffer(kVertex_GrBufferType, cubicEndIdx * sizeof(CubicInstance));
|
||||
if (!instanceBuffer) {
|
||||
fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
|
||||
cubicEndIdx * sizeof(CubicInstance));
|
||||
if (!fInstanceBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
TriangleInstance* triangleInstanceData = static_cast<TriangleInstance*>(instanceBuffer->map());
|
||||
TriangleInstance* triangleInstanceData = static_cast<TriangleInstance*>(fInstanceBuffer->map());
|
||||
CubicInstance* cubicInstanceData = reinterpret_cast<CubicInstance*>(triangleInstanceData);
|
||||
SkASSERT(cubicInstanceData);
|
||||
|
||||
@ -265,16 +283,10 @@ bool GrCCCoverageOpsBuilder::finalize(GrOnFlushResourceProvider* onFlushRP,
|
||||
float atlasOffsetX = 0.0, atlasOffsetY = 0.0;
|
||||
Sk2f atlasOffset;
|
||||
int ptsIdx = -1;
|
||||
PrimitiveTallies instanceIndices[2] = {baseInstances[0], baseInstances[1]};
|
||||
PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
|
||||
PrimitiveTallies* currIndices = nullptr;
|
||||
SkSTArray<256, int32_t, true> currFan;
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
int numScissoredPaths = 0;
|
||||
int numScissorBatches = 0;
|
||||
PrimitiveTallies initialBaseInstances[] = {baseInstances[0], baseInstances[1]};
|
||||
#endif
|
||||
|
||||
const SkTArray<SkPoint, true>& pts = fGeometry.points();
|
||||
|
||||
// Expand the ccpr verbs into GPU instance buffers.
|
||||
@ -286,18 +298,6 @@ bool GrCCCoverageOpsBuilder::finalize(GrOnFlushResourceProvider* onFlushRP,
|
||||
atlasOffsetX = static_cast<float>(currPathInfo->fAtlasOffsetX);
|
||||
atlasOffsetY = static_cast<float>(currPathInfo->fAtlasOffsetY);
|
||||
atlasOffset = {atlasOffsetX, atlasOffsetY};
|
||||
#ifdef SK_DEBUG
|
||||
if (ScissorMode::kScissored == currPathInfo->fScissorMode) {
|
||||
++numScissoredPaths;
|
||||
}
|
||||
#endif
|
||||
if (auto op = std::move(currPathInfo->fTerminatingOp)) {
|
||||
op->setInstanceBuffer(instanceBuffer, baseInstances, instanceIndices);
|
||||
baseInstances[0] = instanceIndices[0];
|
||||
baseInstances[1] = instanceIndices[1];
|
||||
SkDEBUGCODE(numScissorBatches += op->fScissorBatches.count());
|
||||
ops->push_back(std::move(op));
|
||||
}
|
||||
++currPathInfo;
|
||||
continue;
|
||||
|
||||
@ -345,37 +345,25 @@ bool GrCCCoverageOpsBuilder::finalize(GrOnFlushResourceProvider* onFlushRP,
|
||||
}
|
||||
}
|
||||
|
||||
instanceBuffer->unmap();
|
||||
|
||||
if (auto op = std::move(fTerminatingOp)) {
|
||||
op->setInstanceBuffer(std::move(instanceBuffer), baseInstances, instanceIndices);
|
||||
SkDEBUGCODE(numScissorBatches += op->fScissorBatches.count());
|
||||
ops->push_back(std::move(op));
|
||||
}
|
||||
fInstanceBuffer->unmap();
|
||||
|
||||
SkASSERT(currPathInfo == fPathsInfo.end());
|
||||
SkASSERT(ptsIdx == pts.count() - 1);
|
||||
SkASSERT(numScissoredPaths == numScissorBatches);
|
||||
SkASSERT(instanceIndices[0].fTriangles == initialBaseInstances[1].fTriangles);
|
||||
SkASSERT(instanceIndices[1].fTriangles == initialBaseInstances[0].fQuadratics);
|
||||
SkASSERT(instanceIndices[0].fQuadratics == initialBaseInstances[1].fQuadratics);
|
||||
SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
|
||||
SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
|
||||
SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
|
||||
SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
|
||||
SkASSERT(instanceIndices[0].fCubics == initialBaseInstances[1].fCubics);
|
||||
SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
|
||||
SkASSERT(instanceIndices[1].fCubics == cubicEndIdx);
|
||||
|
||||
fMeshesScratchBuffer.reserve(fMaxMeshesPerDraw);
|
||||
fDynamicStatesScratchBuffer.reserve(fMaxMeshesPerDraw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrCCCoverageOp::setInstanceBuffer(sk_sp<GrBuffer> instanceBuffer,
|
||||
const PrimitiveTallies baseInstances[kNumScissorModes],
|
||||
const PrimitiveTallies endInstances[kNumScissorModes]) {
|
||||
fInstanceBuffer = std::move(instanceBuffer);
|
||||
fBaseInstances[0] = baseInstances[0];
|
||||
fBaseInstances[1] = baseInstances[1];
|
||||
fInstanceCounts[0] = endInstances[0] - baseInstances[0];
|
||||
fInstanceCounts[1] = endInstances[1] - baseInstances[1];
|
||||
}
|
||||
|
||||
void GrCCCoverageOp::onExecute(GrOpFlushState* flushState) {
|
||||
void GrCCPathParser::drawCoverageCount(GrOpFlushState* flushState, CoverageCountBatchID batchID,
|
||||
const SkIRect& drawBounds) const {
|
||||
using RenderPass = GrCCCoverageProcessor::RenderPass;
|
||||
|
||||
SkASSERT(fInstanceBuffer);
|
||||
@ -383,76 +371,84 @@ void GrCCCoverageOp::onExecute(GrOpFlushState* flushState) {
|
||||
GrPipeline pipeline(flushState->drawOpArgs().fProxy, GrPipeline::ScissorState::kEnabled,
|
||||
SkBlendMode::kPlus);
|
||||
|
||||
fMeshesScratchBuffer.reserve(1 + fScissorBatches.count());
|
||||
fDynamicStatesScratchBuffer.reserve(1 + fScissorBatches.count());
|
||||
|
||||
// Triangles.
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kTriangleHulls,
|
||||
&PrimitiveTallies::fTriangles);
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kTriangleEdges,
|
||||
&PrimitiveTallies::fTriangles); // Might get skipped.
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kTriangleCorners,
|
||||
&PrimitiveTallies::fTriangles);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleHulls,
|
||||
&PrimitiveTallies::fTriangles, drawBounds);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleEdges,
|
||||
&PrimitiveTallies::fTriangles, drawBounds); // Might get skipped.
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleCorners,
|
||||
&PrimitiveTallies::fTriangles, drawBounds);
|
||||
|
||||
// Quadratics.
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kQuadraticHulls,
|
||||
&PrimitiveTallies::fQuadratics);
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kQuadraticCorners,
|
||||
&PrimitiveTallies::fQuadratics);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kQuadraticHulls,
|
||||
&PrimitiveTallies::fQuadratics, drawBounds);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kQuadraticCorners,
|
||||
&PrimitiveTallies::fQuadratics, drawBounds);
|
||||
|
||||
// Cubics.
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kCubicHulls,
|
||||
&PrimitiveTallies::fCubics);
|
||||
this->drawMaskPrimitives(flushState, pipeline, RenderPass::kCubicCorners,
|
||||
&PrimitiveTallies::fCubics);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kCubicHulls,
|
||||
&PrimitiveTallies::fCubics, drawBounds);
|
||||
this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kCubicCorners,
|
||||
&PrimitiveTallies::fCubics, drawBounds);
|
||||
}
|
||||
|
||||
void GrCCCoverageOp::drawMaskPrimitives(GrOpFlushState* flushState, const GrPipeline& pipeline,
|
||||
GrCCCoverageProcessor::RenderPass renderPass,
|
||||
int PrimitiveTallies::*instanceType) const {
|
||||
using ScissorMode = GrCCCoverageOpsBuilder::ScissorMode;
|
||||
void GrCCPathParser::drawRenderPass(GrOpFlushState* flushState, const GrPipeline& pipeline,
|
||||
CoverageCountBatchID batchID,
|
||||
GrCCCoverageProcessor::RenderPass renderPass,
|
||||
int PrimitiveTallies::*instanceType,
|
||||
const SkIRect& drawBounds) const {
|
||||
SkASSERT(pipeline.getScissorState().enabled());
|
||||
|
||||
if (!GrCCCoverageProcessor::DoesRenderPass(renderPass, *flushState->caps().shaderCaps())) {
|
||||
return;
|
||||
}
|
||||
|
||||
fMeshesScratchBuffer.reset();
|
||||
fDynamicStatesScratchBuffer.reset();
|
||||
// Don't call reset(), as that also resets the reserve count.
|
||||
fMeshesScratchBuffer.pop_back_n(fMeshesScratchBuffer.count());
|
||||
fDynamicStatesScratchBuffer.pop_back_n(fDynamicStatesScratchBuffer.count());
|
||||
|
||||
GrCCCoverageProcessor proc(flushState->resourceProvider(), renderPass,
|
||||
*flushState->caps().shaderCaps());
|
||||
|
||||
if (int instanceCount = fInstanceCounts[(int)ScissorMode::kNonScissored].*instanceType) {
|
||||
SkASSERT(batchID > 0);
|
||||
SkASSERT(batchID < fCoverageCountBatches.count());
|
||||
const CoverageCountBatch& previousBatch = fCoverageCountBatches[batchID - 1];
|
||||
const CoverageCountBatch& batch = fCoverageCountBatches[batchID];
|
||||
|
||||
if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
|
||||
previousBatch.fEndNonScissorIndices.*instanceType) {
|
||||
SkASSERT(instanceCount > 0);
|
||||
int baseInstance = fBaseInstances[(int)ScissorMode::kNonScissored].*instanceType;
|
||||
int baseInstance = fBaseInstances[(int)ScissorMode::kNonScissored].*instanceType +
|
||||
previousBatch.fEndNonScissorIndices.*instanceType;
|
||||
proc.appendMesh(fInstanceBuffer.get(), instanceCount, baseInstance, &fMeshesScratchBuffer);
|
||||
fDynamicStatesScratchBuffer.push_back().fScissorRect.setXYWH(0, 0, fDrawBounds.width(),
|
||||
fDrawBounds.height());
|
||||
fDynamicStatesScratchBuffer.push_back().fScissorRect.setXYWH(0, 0, drawBounds.width(),
|
||||
drawBounds.height());
|
||||
}
|
||||
|
||||
if (fInstanceCounts[(int)ScissorMode::kScissored].*instanceType) {
|
||||
int baseInstance = fBaseInstances[(int)ScissorMode::kScissored].*instanceType;
|
||||
for (const ScissorBatch& batch : fScissorBatches) {
|
||||
SkASSERT(this->bounds().contains(batch.fScissor));
|
||||
const int instanceCount = batch.fInstanceCounts.*instanceType;
|
||||
if (!instanceCount) {
|
||||
continue;
|
||||
}
|
||||
SkASSERT(instanceCount > 0);
|
||||
proc.appendMesh(fInstanceBuffer.get(), instanceCount, baseInstance,
|
||||
&fMeshesScratchBuffer);
|
||||
fDynamicStatesScratchBuffer.push_back().fScissorRect = batch.fScissor;
|
||||
baseInstance += instanceCount;
|
||||
SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
|
||||
SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
|
||||
int baseScissorInstance = fBaseInstances[(int)ScissorMode::kScissored].*instanceType;
|
||||
for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
|
||||
const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
|
||||
const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
|
||||
int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
|
||||
int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
|
||||
if (!instanceCount) {
|
||||
continue;
|
||||
}
|
||||
SkASSERT(instanceCount > 0);
|
||||
proc.appendMesh(fInstanceBuffer.get(), instanceCount,
|
||||
baseScissorInstance + startIndex, &fMeshesScratchBuffer);
|
||||
fDynamicStatesScratchBuffer.push_back().fScissorRect = scissorSubBatch.fScissor;
|
||||
}
|
||||
|
||||
SkASSERT(fMeshesScratchBuffer.count() == fDynamicStatesScratchBuffer.count());
|
||||
SkASSERT(fMeshesScratchBuffer.count() <= fMaxMeshesPerDraw);
|
||||
|
||||
if (!fMeshesScratchBuffer.empty()) {
|
||||
SkASSERT(flushState->rtCommandBuffer());
|
||||
flushState->rtCommandBuffer()->draw(pipeline, proc, fMeshesScratchBuffer.begin(),
|
||||
fDynamicStatesScratchBuffer.begin(),
|
||||
fMeshesScratchBuffer.count(), this->bounds());
|
||||
fMeshesScratchBuffer.count(), SkRect::Make(drawBounds));
|
||||
}
|
||||
}
|
124
src/gpu/ccpr/GrCCPathParser.h
Normal file
124
src/gpu/ccpr/GrCCPathParser.h
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright 2017 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrCCPathParser_DEFINED
|
||||
#define GrCCPathParser_DEFINED
|
||||
|
||||
#include "GrMesh.h"
|
||||
#include "GrNonAtomicRef.h"
|
||||
#include "SkRect.h"
|
||||
#include "SkRefCnt.h"
|
||||
#include "ccpr/GrCCCoverageProcessor.h"
|
||||
#include "ccpr/GrCCGeometry.h"
|
||||
#include "ops/GrDrawOp.h"
|
||||
|
||||
class GrOnFlushResourceProvider;
|
||||
class SkMatrix;
|
||||
class SkPath;
|
||||
|
||||
/**
|
||||
* This class parses SkPaths into CCPR primitives in GPU buffers, then issues calls to draw their
|
||||
* coverage counts.
|
||||
*/
|
||||
class GrCCPathParser : public GrNonAtomicRef<GrCCPathParser> {
|
||||
public:
|
||||
// Indicates whether a path should enforce a scissor clip when rendering its mask. (Specified
|
||||
// as an int because these values get used directly as indices into arrays.)
|
||||
enum class ScissorMode : int { kNonScissored = 0, kScissored = 1 };
|
||||
static constexpr int kNumScissorModes = 2;
|
||||
|
||||
GrCCPathParser(int maxTotalPaths, int maxPathPoints, int numSkPoints, int numSkVerbs);
|
||||
|
||||
~GrCCPathParser() {
|
||||
// Enforce the contract that the client always calls saveParsedPath or discardParsedPath.
|
||||
SkASSERT(!fParsingPath);
|
||||
}
|
||||
|
||||
using CoverageCountBatchID = int;
|
||||
|
||||
// Parses an SkPath into a temporary staging area. The path will not be included in the current
|
||||
// batch until there is a matching call to saveParsedPath. The user must complement this with a
|
||||
// following call to either saveParsedPath or discardParsedPath.
|
||||
//
|
||||
// Returns two tight bounding boxes: device space and "45 degree" (| 1 -1 | * devCoords) space.
|
||||
// | 1 1 |
|
||||
void parsePath(const SkMatrix&, const SkPath&, SkRect* devBounds, SkRect* devBounds45);
|
||||
|
||||
// Parses a device-space SkPath into a temporary staging area. The path will not be included in
|
||||
// the current batch until there is a matching call to saveParsedPath. The user must complement
|
||||
// this with a following call to either saveParsedPath or discardParsedPath.
|
||||
void parseDeviceSpacePath(const SkPath&);
|
||||
|
||||
// Commits the currently-parsed path from staging to the current batch, and specifies whether
|
||||
// the mask should be rendered with a scissor in effect. Accepts an optional post-device-space
|
||||
// translate for placement in an atlas.
|
||||
void saveParsedPath(ScissorMode, const SkIRect& clippedDevIBounds, int16_t atlasOffsetX,
|
||||
int16_t atlasOffsetY);
|
||||
void discardParsedPath();
|
||||
|
||||
// Compiles the outstanding saved paths into a batch, and returns an ID that can be used to draw
|
||||
// their coverage counts in the future.
|
||||
CoverageCountBatchID closeCurrentBatch();
|
||||
|
||||
// Builds internal GPU buffers and prepares for calls to drawCoverageCount. Caller must close
|
||||
// the current batch before calling this method, and cannot parse new paths afer.
|
||||
bool finalize(GrOnFlushResourceProvider*);
|
||||
|
||||
// Called after finalize. Draws the given batch of parsed paths.
|
||||
void drawCoverageCount(GrOpFlushState*, CoverageCountBatchID, const SkIRect& drawBounds) const;
|
||||
|
||||
private:
|
||||
using PrimitiveTallies = GrCCGeometry::PrimitiveTallies;
|
||||
|
||||
// Every kBeginPath verb has a corresponding PathInfo entry.
|
||||
struct PathInfo {
|
||||
ScissorMode fScissorMode;
|
||||
int16_t fAtlasOffsetX, fAtlasOffsetY;
|
||||
};
|
||||
|
||||
// Defines a batch of CCPR primitives. Start indices are deduced by looking at the previous
|
||||
// CoverageCountBatch in the list.
|
||||
struct CoverageCountBatch {
|
||||
PrimitiveTallies fEndNonScissorIndices;
|
||||
int fEndScissorSubBatchIdx;
|
||||
};
|
||||
|
||||
// Defines a sub-batch from CoverageCountBatch that will be drawn with the given scissor rect.
|
||||
// Start indices are deduced by looking at the previous ScissorSubBatch in the list.
|
||||
struct ScissorSubBatch {
|
||||
PrimitiveTallies fEndPrimitiveIndices;
|
||||
SkIRect fScissor;
|
||||
};
|
||||
|
||||
void parsePath(const SkPath&, const SkPoint* deviceSpacePts);
|
||||
void endContourIfNeeded(bool insideContour);
|
||||
|
||||
void drawRenderPass(GrOpFlushState*, const GrPipeline&, CoverageCountBatchID,
|
||||
GrCCCoverageProcessor::RenderPass, int PrimitiveTallies::*instanceType,
|
||||
const SkIRect& drawBounds) const;
|
||||
|
||||
// Staging area for the path being parsed.
|
||||
SkDEBUGCODE(int fParsingPath = false);
|
||||
const SkAutoSTArray<32, SkPoint> fLocalDevPtsBuffer;
|
||||
int fCurrPathPointsIdx;
|
||||
int fCurrPathVerbsIdx;
|
||||
PrimitiveTallies fCurrPathPrimitiveCounts;
|
||||
|
||||
GrCCGeometry fGeometry;
|
||||
SkSTArray<32, PathInfo, true> fPathsInfo;
|
||||
SkSTArray<32, CoverageCountBatch, true> fCoverageCountBatches;
|
||||
SkSTArray<32, ScissorSubBatch, true> fScissorSubBatches;
|
||||
PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes];
|
||||
int fMaxMeshesPerDraw = 0;
|
||||
|
||||
sk_sp<GrBuffer> fInstanceBuffer;
|
||||
PrimitiveTallies fBaseInstances[kNumScissorModes];
|
||||
mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer;
|
||||
mutable SkSTArray<32, GrPipeline::DynamicState> fDynamicStatesScratchBuffer;
|
||||
};
|
||||
|
||||
#endif
|
@ -277,6 +277,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
|
||||
SkASSERT(!fPerFlushIndexBuffer);
|
||||
SkASSERT(!fPerFlushVertexBuffer);
|
||||
SkASSERT(!fPerFlushInstanceBuffer);
|
||||
SkASSERT(!fPerFlushPathParser);
|
||||
SkASSERT(fPerFlushAtlases.empty());
|
||||
SkDEBUGCODE(fFlushing = true);
|
||||
|
||||
@ -347,7 +348,8 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
|
||||
SkASSERT(pathInstanceData);
|
||||
int pathInstanceIdx = 0;
|
||||
|
||||
GrCCCoverageOpsBuilder atlasOpsBuilder(maxTotalPaths, maxPathPoints, numSkPoints, numSkVerbs);
|
||||
fPerFlushPathParser = sk_make_sp<GrCCPathParser>(maxTotalPaths, maxPathPoints, numSkPoints,
|
||||
numSkVerbs);
|
||||
SkDEBUGCODE(int skippedTotalPaths = 0);
|
||||
|
||||
// Allocate atlas(es) and fill out GPU instance buffers.
|
||||
@ -362,14 +364,13 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
|
||||
drawOpsIter.init(rtPendingPaths.fDrawOps,
|
||||
SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
|
||||
while (DrawPathsOp* op = drawOpsIter.get()) {
|
||||
pathInstanceIdx = op->setupResources(onFlushRP, &atlasOpsBuilder, pathInstanceData,
|
||||
pathInstanceIdx);
|
||||
pathInstanceIdx = op->setupResources(onFlushRP, pathInstanceData, pathInstanceIdx);
|
||||
drawOpsIter.next();
|
||||
SkDEBUGCODE(skippedTotalPaths += op->numSkippedInstances_debugOnly());
|
||||
}
|
||||
|
||||
for (auto& clipsIter : rtPendingPaths.fClipPaths) {
|
||||
clipsIter.second.placePathInAtlas(this, onFlushRP, &atlasOpsBuilder);
|
||||
clipsIter.second.placePathInAtlas(this, onFlushRP, fPerFlushPathParser.get());
|
||||
}
|
||||
}
|
||||
|
||||
@ -378,36 +379,30 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
|
||||
SkASSERT(pathInstanceIdx == maxTotalPaths - skippedTotalPaths - numClipPaths);
|
||||
|
||||
if (!fPerFlushAtlases.empty()) {
|
||||
atlasOpsBuilder.emitOp(fPerFlushAtlases.back().drawBounds());
|
||||
auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
|
||||
fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
|
||||
}
|
||||
|
||||
SkSTArray<4, std::unique_ptr<GrCCCoverageOp>> atlasOps(fPerFlushAtlases.count());
|
||||
if (!atlasOpsBuilder.finalize(onFlushRP, &atlasOps)) {
|
||||
SkDebugf("WARNING: failed to allocate ccpr atlas buffers. No paths will be drawn.\n");
|
||||
if (!fPerFlushPathParser->finalize(onFlushRP)) {
|
||||
SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
|
||||
return;
|
||||
}
|
||||
SkASSERT(atlasOps.count() == fPerFlushAtlases.count());
|
||||
|
||||
// Draw the coverage ops into their respective atlases.
|
||||
// Draw the atlas(es).
|
||||
GrTAllocator<GrCCAtlas>::Iter atlasIter(&fPerFlushAtlases);
|
||||
for (std::unique_ptr<GrCCCoverageOp>& atlasOp : atlasOps) {
|
||||
SkAssertResult(atlasIter.next());
|
||||
GrCCAtlas* atlas = atlasIter.get();
|
||||
SkASSERT(atlasOp->bounds() ==
|
||||
SkRect::MakeIWH(atlas->drawBounds().width(), atlas->drawBounds().height()));
|
||||
if (auto rtc = atlas->finalize(onFlushRP, std::move(atlasOp))) {
|
||||
while (atlasIter.next()) {
|
||||
if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPerFlushPathParser)) {
|
||||
results->push_back(std::move(rtc));
|
||||
}
|
||||
}
|
||||
SkASSERT(!atlasIter.next());
|
||||
|
||||
fPerFlushResourcesAreValid = true;
|
||||
}
|
||||
|
||||
int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
|
||||
GrCCCoverageOpsBuilder* atlasOpsBuilder,
|
||||
GrCCPathProcessor::Instance* pathInstanceData,
|
||||
int pathInstanceIdx) {
|
||||
GrCCPathParser* parser = fCCPR->fPerFlushPathParser.get();
|
||||
const GrCCAtlas* currentAtlas = nullptr;
|
||||
SkASSERT(fInstanceCount > 0);
|
||||
SkASSERT(-1 == fBaseInstance);
|
||||
@ -418,14 +413,14 @@ int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
|
||||
// one rotated an additional 45 degrees. The path vertex shader uses these two bounding
|
||||
// boxes to generate an octagon that circumscribes the path.
|
||||
SkRect devBounds, devBounds45;
|
||||
atlasOpsBuilder->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45);
|
||||
parser->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45);
|
||||
|
||||
SkIRect devIBounds;
|
||||
devBounds.roundOut(&devIBounds);
|
||||
|
||||
int16_t offsetX, offsetY;
|
||||
GrCCAtlas* atlas = fCCPR->placeParsedPathInAtlas(onFlushRP, draw->fClipIBounds, devIBounds,
|
||||
&offsetX, &offsetY, atlasOpsBuilder);
|
||||
&offsetX, &offsetY);
|
||||
if (!atlas) {
|
||||
SkDEBUGCODE(++fNumSkippedInstances);
|
||||
continue;
|
||||
@ -457,12 +452,12 @@ int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
|
||||
|
||||
void CCPR::ClipPath::placePathInAtlas(GrCoverageCountingPathRenderer* ccpr,
|
||||
GrOnFlushResourceProvider* onFlushRP,
|
||||
GrCCCoverageOpsBuilder* atlasOpsBuilder) {
|
||||
GrCCPathParser* parser) {
|
||||
SkASSERT(!this->isUninitialized());
|
||||
SkASSERT(!fHasAtlas);
|
||||
atlasOpsBuilder->parseDeviceSpacePath(fDeviceSpacePath);
|
||||
parser->parseDeviceSpacePath(fDeviceSpacePath);
|
||||
fAtlas = ccpr->placeParsedPathInAtlas(onFlushRP, fAccessRect, fPathDevIBounds, &fAtlasOffsetX,
|
||||
&fAtlasOffsetY, atlasOpsBuilder);
|
||||
&fAtlasOffsetY);
|
||||
SkDEBUGCODE(fHasAtlas = true);
|
||||
}
|
||||
|
||||
@ -471,9 +466,8 @@ GrCCAtlas* GrCoverageCountingPathRenderer::placeParsedPathInAtlas(
|
||||
const SkIRect& clipIBounds,
|
||||
const SkIRect& pathIBounds,
|
||||
int16_t* atlasOffsetX,
|
||||
int16_t* atlasOffsetY,
|
||||
GrCCCoverageOpsBuilder* atlasOpsBuilder) {
|
||||
using ScissorMode = GrCCCoverageOpsBuilder::ScissorMode;
|
||||
int16_t* atlasOffsetY) {
|
||||
using ScissorMode = GrCCPathParser::ScissorMode;
|
||||
|
||||
ScissorMode scissorMode;
|
||||
SkIRect clippedPathIBounds;
|
||||
@ -483,23 +477,25 @@ GrCCAtlas* GrCoverageCountingPathRenderer::placeParsedPathInAtlas(
|
||||
} else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
|
||||
scissorMode = ScissorMode::kScissored;
|
||||
} else {
|
||||
atlasOpsBuilder->discardParsedPath();
|
||||
fPerFlushPathParser->discardParsedPath();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
SkIPoint16 atlasLocation;
|
||||
const int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
|
||||
int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
|
||||
if (fPerFlushAtlases.empty() || !fPerFlushAtlases.back().addRect(w, h, &atlasLocation)) {
|
||||
if (!fPerFlushAtlases.empty()) {
|
||||
// The atlas is out of room and can't grow any bigger.
|
||||
atlasOpsBuilder->emitOp(fPerFlushAtlases.back().drawBounds());
|
||||
auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
|
||||
fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
|
||||
}
|
||||
fPerFlushAtlases.emplace_back(*onFlushRP->caps(), w, h).addRect(w, h, &atlasLocation);
|
||||
}
|
||||
|
||||
*atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
|
||||
*atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
|
||||
atlasOpsBuilder->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX, *atlasOffsetY);
|
||||
fPerFlushPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX,
|
||||
*atlasOffsetY);
|
||||
|
||||
return &fPerFlushAtlases.back();
|
||||
}
|
||||
@ -553,6 +549,7 @@ void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint
|
||||
int numOpListIDs) {
|
||||
SkASSERT(fFlushing);
|
||||
fPerFlushAtlases.reset();
|
||||
fPerFlushPathParser.reset();
|
||||
fPerFlushInstanceBuffer.reset();
|
||||
fPerFlushVertexBuffer.reset();
|
||||
fPerFlushIndexBuffer.reset();
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "GrPathRenderer.h"
|
||||
#include "SkTInternalLList.h"
|
||||
#include "ccpr/GrCCAtlas.h"
|
||||
#include "ccpr/GrCCCoverageOp.h"
|
||||
#include "ccpr/GrCCPathParser.h"
|
||||
#include "ccpr/GrCCPathProcessor.h"
|
||||
#include "ops/GrDrawOp.h"
|
||||
|
||||
@ -77,7 +77,7 @@ public:
|
||||
void onPrepare(GrOpFlushState*) override {}
|
||||
void onExecute(GrOpFlushState*) override;
|
||||
|
||||
int setupResources(GrOnFlushResourceProvider*, GrCCCoverageOpsBuilder*,
|
||||
int setupResources(GrOnFlushResourceProvider*,
|
||||
GrCCPathProcessor::Instance* pathInstanceData, int pathInstanceIdx);
|
||||
|
||||
private:
|
||||
@ -156,7 +156,7 @@ public:
|
||||
return fPathDevIBounds;
|
||||
}
|
||||
void placePathInAtlas(GrCoverageCountingPathRenderer*, GrOnFlushResourceProvider*,
|
||||
GrCCCoverageOpsBuilder*);
|
||||
GrCCPathParser*);
|
||||
|
||||
const SkVector& atlasScale() const {
|
||||
SkASSERT(fHasAtlasTransform);
|
||||
@ -201,7 +201,7 @@ private:
|
||||
|
||||
GrCCAtlas* placeParsedPathInAtlas(GrOnFlushResourceProvider*, const SkIRect& accessRect,
|
||||
const SkIRect& pathIBounds, int16_t* atlasOffsetX,
|
||||
int16_t* atlasOffsetY, GrCCCoverageOpsBuilder*);
|
||||
int16_t* atlasOffsetY);
|
||||
|
||||
struct RTPendingPaths {
|
||||
~RTPendingPaths() {
|
||||
@ -221,6 +221,7 @@ private:
|
||||
sk_sp<const GrBuffer> fPerFlushIndexBuffer;
|
||||
sk_sp<const GrBuffer> fPerFlushVertexBuffer;
|
||||
sk_sp<GrBuffer> fPerFlushInstanceBuffer;
|
||||
sk_sp<GrCCPathParser> fPerFlushPathParser;
|
||||
GrSTAllocator<4, GrCCAtlas> fPerFlushAtlases;
|
||||
bool fPerFlushResourcesAreValid;
|
||||
SkDEBUGCODE(bool fFlushing = false);
|
||||
|
Loading…
Reference in New Issue
Block a user