Reland "Purge ccpr"

This is a reland of 7bf6bc0d06

Original change's description:
> Purge ccpr
>
> Now that the clip atlas has been successfully migrated to
> tessellation, we don't need this code anymore!
>
> Change-Id: Ic97f50cff7c4ee59f4476f8410f0b30a32df4e90
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/419857
> Reviewed-by: Robert Phillips <robertphillips@google.com>
> Reviewed-by: Brian Osman <brianosman@google.com>
> Commit-Queue: Chris Dalton <csmartdalton@google.com>

Change-Id: If0be86902e7cc4755eba91a89be1ec1a6a4b54b2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/419720
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
This commit is contained in:
Chris Dalton 2021-06-19 11:50:17 -06:00 committed by Skia Commit-Bot
parent 2421b9901b
commit 685e09b31a
49 changed files with 33 additions and 1480 deletions

View File

@ -909,11 +909,6 @@ optional("gpu") {
sources =
skia_gpu_sources + skia_sksl_gpu_sources + skia_gpu_processor_outputs
if (!skia_enable_ccpr) {
sources -= skia_ccpr_sources
sources += [ "src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp" ]
}
if (skia_enable_nga) {
sources += skia_nga_sources
}

View File

@ -163,7 +163,6 @@ echo "Compiling bitcode"
${GN_GPU} \
\
skia_enable_skshaper=true \
skia_enable_ccpr=false \
skia_enable_nga=false \
skia_enable_pdf=false"

View File

@ -110,7 +110,6 @@ echo "Compiling bitcode"
skia_gl_standard=\"webgl\" \
skia_enable_tools=false \
skia_enable_skshaper=false \
skia_enable_ccpr=false \
skia_enable_nga=false \
skia_enable_fontmgr_custom_directory=false \
skia_enable_fontmgr_custom_embedded=true \

View File

@ -45,23 +45,11 @@ private:
SkISize onISize() override { return SkISize::Make(fStarSize * 2, fStarSize * 2); }
void modifyGrContextOptions(GrContextOptions* ctxOptions) override {
ctxOptions->fGpuPathRenderers = GpuPathRenderers::kCoverageCounting;
ctxOptions->fAllowPathMaskCaching = true;
}
DrawResult onDraw(GrRecordingContext* rContext, GrSurfaceDrawContext* rtc, SkCanvas* canvas,
SkString* errorMsg) override {
if (rtc->numSamples() > 1) {
errorMsg->set("ccpr is currently only used for coverage AA");
return DrawResult::kSkip;
}
auto* ccpr = rContext->priv().drawingManager()->getCoverageCountingPathRenderer();
if (!ccpr) {
errorMsg->set("ccpr only");
return DrawResult::kSkip;
}
auto dContext = GrAsDirectContext(rContext);
if (!dContext) {
*errorMsg = "Requires a direct context.";

View File

@ -582,23 +582,6 @@ skia_gl_gpu_sources = [
"$_src/gpu/gl/builders/GrGLShaderStringBuilder.h",
]
skia_ccpr_sources = [
# coverage counting path renderer
"$_src/gpu/ccpr/GrCCAtlas.cpp",
"$_src/gpu/ccpr/GrCCAtlas.h",
"$_src/gpu/ccpr/GrCCClipPath.cpp",
"$_src/gpu/ccpr/GrCCClipPath.h",
"$_src/gpu/ccpr/GrCCClipProcessor.cpp",
"$_src/gpu/ccpr/GrCCClipProcessor.h",
"$_src/gpu/ccpr/GrCCPerFlushResources.cpp",
"$_src/gpu/ccpr/GrCCPerFlushResources.h",
"$_src/gpu/ccpr/GrCCPerOpsTaskPaths.h",
"$_src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp",
"$_src/gpu/ccpr/GrCoverageCountingPathRenderer.h",
]
skia_gpu_sources += skia_ccpr_sources
skia_null_gpu_sources = [ "$_src/gpu/gl/GrGLMakeNativeInterface_none.cpp" ]
skia_nga_sources = [

View File

@ -13,7 +13,6 @@ declare_args() {
skia_compile_processors = false
skia_enable_api_available_macro = true
skia_enable_android_utils = is_skia_dev_build
skia_enable_ccpr = true
skia_enable_nga = false
skia_enable_oga = true
skia_enable_discrete_gpu = true

View File

@ -426,7 +426,6 @@ pathops_tests_sources = [
]
oga_tests_sources = [
"$_tests/GrCCPRTest.cpp",
"$_tests/GrClipStackTest.cpp",
"$_tests/PathRendererCacheTests.cpp",
]

View File

@ -20,7 +20,6 @@ class SkDeferredDisplayListPriv;
#include <map>
class GrRenderTask;
class GrRenderTargetProxy;
struct GrCCPerOpsTaskPaths;
#else
using GrRenderTargetProxy = SkRefCnt;
#endif
@ -97,14 +96,6 @@ private:
const SkSurfaceCharacterization fCharacterization;
#if SK_SUPPORT_GPU
// This needs to match the same type in GrCoverageCountingPathRenderer.h
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
// When programs are stored in 'fProgramData' their memory is actually allocated in
// 'fArenas.fRecordTimeAllocator'. In that case that arena must be freed before
// 'fPendingPaths' which relies on uniquely holding the atlas proxies used by the
// GrCCClipPaths.
PendingPathsMap fPendingPaths; // This is the path data from CCPR.
// These are ordered such that the destructor cleans op tasks up first (which may refer back
// to the arena and memory pool in their destructors).
GrRecordingContext::OwnedArenas fArenas;

View File

@ -259,7 +259,7 @@ func (b *taskBuilder) nanobenchFlags(doUpload bool) {
"--dmsaaStatsDump")
// Don't collect stats on the skps generated from vector content. We want these to actually
// trigger dmsaa.
match = append(match, "~svg", "~chalkboard", "~motionmark", "~ccpr")
match = append(match, "~svg", "~chalkboard", "~motionmark")
}
// We do not need or want to benchmark the decodes of incomplete images.

View File

@ -22820,7 +22820,7 @@
"skia/infra/bots/run_recipe.py",
"${ISOLATED_OUTDIR}",
"perf",
"{\"$kitchen\":{\"devshell\":true,\"git_auth\":true},\"buildbucket_build_id\":\"<(BUILDBUCKET_BUILD_ID)\",\"buildername\":\"Perf-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-DMSAAStats\",\"do_upload\":\"true\",\"nanobench_flags\":\"[\\\"nanobench\\\",\\\"--pre_log\\\",\\\"--gpuStatsDump\\\",\\\"true\\\",\\\"--scales\\\",\\\"1.0\\\",\\\"1.1\\\",\\\"--nocpu\\\",\\\"--config\\\",\\\"gl\\\",\\\"glsrgb\\\",\\\"glnarrow\\\",\\\"glmsaa8\\\",\\\"--sourceType\\\",\\\"skp\\\",\\\"--clip\\\",\\\"0,0,1600,16384\\\",\\\"--GPUbenchTileW\\\",\\\"1600\\\",\\\"--GPUbenchTileH\\\",\\\"512\\\",\\\"--samples\\\",\\\"1\\\",\\\"--loops\\\",\\\"1\\\",\\\"--config\\\",\\\"gldmsaa\\\",\\\"--dmsaaStatsDump\\\",\\\"--match\\\",\\\"~svg\\\",\\\"~chalkboard\\\",\\\"~motionmark\\\",\\\"~ccpr\\\",\\\"~inc0.gif\\\",\\\"~inc1.gif\\\",\\\"~incInterlaced.gif\\\",\\\"~inc0.jpg\\\",\\\"~incGray.jpg\\\",\\\"~inc0.wbmp\\\",\\\"~inc1.wbmp\\\",\\\"~inc0.webp\\\",\\\"~inc1.webp\\\",\\\"~inc0.ico\\\",\\\"~inc1.ico\\\",\\\"~inc0.png\\\",\\\"~inc1.png\\\",\\\"~inc2.png\\\",\\\"~inc12.png\\\",\\\"~inc13.png\\\",\\\"~inc14.png\\\",\\\"~inc0.webp\\\",\\\"~inc1.webp\\\",\\\"--key\\\",\\\"arch\\\",\\\"x86_64\\\",\\\"compiler\\\",\\\"Clang\\\",\\\"cpu_or_gpu\\\",\\\"GPU\\\",\\\"cpu_or_gpu_value\\\",\\\"QuadroP400\\\",\\\"extra_config\\\",\\\"DMSAAStats\\\",\\\"model\\\",\\\"Golo\\\",\\\"os\\\",\\\"Ubuntu18\\\"]\",\"nanobench_properties\":\"{\\\"gitHash\\\":\\\"<(REVISION)\\\",\\\"issue\\\":\\\"<(ISSUE)\\\",\\\"patch_storage\\\":\\\"<(PATCH_STORAGE)\\\",\\\"patchset\\\":\\\"<(PATCHSET)\\\",\\\"swarming_bot_id\\\":\\\"${SWARMING_BOT_ID}\\\",\\\"swarming_task_id\\\":\\\"${SWARMING_TASK_ID}\\\"}\",\"patch_issue\":\"<(ISSUE_INT)\",\"patch_ref\":\"<(PATCH_REF)\",\"patch_repo\":\"<(PATCH_REPO)\",\"patch_set\":\"<(PATCHSET_INT)\",\"patch_storage\":\"<(PATCH_STORAGE)\",\"repository\":\"<(REPO)\",\"resources\":\"true\",\"revision\":\"<(REVISION)\",\"skps\":\"true\",\"svgs\":\"true\",\"swarm_out_dir\":\"perf\",\"task_id\":\"<(TASK_ID)\"}",
"{\"$kitchen\":{\"devshell\":true,\"git_auth\":true},\"buildbucket_build_id\":\"<(BUILDBUCKET_BUILD_ID)\",\"buildername\":\"Perf-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-DMSAAStats\",\"do_upload\":\"true\",\"nanobench_flags\":\"[\\\"nanobench\\\",\\\"--pre_log\\\",\\\"--gpuStatsDump\\\",\\\"true\\\",\\\"--scales\\\",\\\"1.0\\\",\\\"1.1\\\",\\\"--nocpu\\\",\\\"--config\\\",\\\"gl\\\",\\\"glsrgb\\\",\\\"glnarrow\\\",\\\"glmsaa8\\\",\\\"--sourceType\\\",\\\"skp\\\",\\\"--clip\\\",\\\"0,0,1600,16384\\\",\\\"--GPUbenchTileW\\\",\\\"1600\\\",\\\"--GPUbenchTileH\\\",\\\"512\\\",\\\"--samples\\\",\\\"1\\\",\\\"--loops\\\",\\\"1\\\",\\\"--config\\\",\\\"gldmsaa\\\",\\\"--dmsaaStatsDump\\\",\\\"--match\\\",\\\"~svg\\\",\\\"~chalkboard\\\",\\\"~motionmark\\\",\\\"~inc0.gif\\\",\\\"~inc1.gif\\\",\\\"~incInterlaced.gif\\\",\\\"~inc0.jpg\\\",\\\"~incGray.jpg\\\",\\\"~inc0.wbmp\\\",\\\"~inc1.wbmp\\\",\\\"~inc0.webp\\\",\\\"~inc1.webp\\\",\\\"~inc0.ico\\\",\\\"~inc1.ico\\\",\\\"~inc0.png\\\",\\\"~inc1.png\\\",\\\"~inc2.png\\\",\\\"~inc12.png\\\",\\\"~inc13.png\\\",\\\"~inc14.png\\\",\\\"~inc0.webp\\\",\\\"~inc1.webp\\\",\\\"--key\\\",\\\"arch\\\",\\\"x86_64\\\",\\\"compiler\\\",\\\"Clang\\\",\\\"cpu_or_gpu\\\",\\\"GPU\\\",\\\"cpu_or_gpu_value\\\",\\\"QuadroP400\\\",\\\"extra_config\\\",\\\"DMSAAStats\\\",\\\"model\\\",\\\"Golo\\\",\\\"os\\\",\\\"Ubuntu18\\\"]\",\"nanobench_properties\":\"{\\\"gitHash\\\":\\\"<(REVISION)\\\",\\\"issue\\\":\\\"<(ISSUE)\\\",\\\"patch_storage\\\":\\\"<(PATCH_STORAGE)\\\",\\\"patchset\\\":\\\"<(PATCHSET)\\\",\\\"swarming_bot_id\\\":\\\"${SWARMING_BOT_ID}\\\",\\\"swarming_task_id\\\":\\\"${SWARMING_TASK_ID}\\\"}\",\"patch_issue\":\"<(ISSUE_INT)\",\"patch_ref\":\"<(PATCH_REF)\",\"patch_repo\":\"<(PATCH_REPO)\",\"patch_set\":\"<(PATCHSET_INT)\",\"patch_storage\":\"<(PATCH_STORAGE)\",\"repository\":\"<(REPO)\",\"resources\":\"true\",\"revision\":\"<(REVISION)\",\"skps\":\"true\",\"svgs\":\"true\",\"swarm_out_dir\":\"perf\",\"task_id\":\"<(TASK_ID)\"}",
"skia"
],
"dependencies": [

View File

@ -103,13 +103,13 @@ if [[ $@ == *no_skottie* ]]; then
SKOTTIE_BINDINGS=""
fi
GN_VIEWER="skia_use_expat=false skia_enable_ccpr=false skia_enable_nga=false"
GN_VIEWER="skia_use_expat=false skia_enable_nga=false"
VIEWER_BINDINGS=""
VIEWER_LIB=""
if [[ $@ == *viewer* ]]; then
echo "Including viewer"
GN_VIEWER="skia_use_expat=true skia_enable_ccpr=true skia_enable_nga=false"
GN_VIEWER="skia_use_expat=true skia_enable_nga=false"
VIEWER_BINDINGS="$BASE_DIR/viewer_bindings.cpp"
VIEWER_LIB="$BUILD_DIR/libviewer_wasm.a"
IS_OFFICIAL_BUILD="false"

View File

@ -128,7 +128,6 @@ echo "Compiling bitcode"
${GN_GPU} \
${GN_FONT} \
skia_use_expat=true \
skia_enable_ccpr=true \
skia_enable_nga=false \
skia_enable_svg=true \
skia_enable_skshaper=true \

View File

@ -223,7 +223,6 @@ BASE_SRCS_ALL = struct(
"src/utils/win/**/*",
# Exclude multiple definitions.
"src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp",
"src/gpu/gl/GrGLMakeNativeInterface_none.cpp",
"src/pdf/SkDocument_PDF_None.cpp", # We use src/pdf/SkPDFDocument.cpp.

View File

@ -36,7 +36,7 @@ static const char* verb_type_name(VerbType verbType) {
/**
* This sample visualizes simple strokes.
*/
class CCPRGeometryView : public Sample {
class StrokeVerbView : public Sample {
void onOnceBeforeDraw() override { this->updatePath(); }
void onDrawContent(SkCanvas*) override;
@ -64,7 +64,7 @@ class CCPRGeometryView : public Sample {
SkPath fPath;
};
void CCPRGeometryView::onDrawContent(SkCanvas* canvas) {
void StrokeVerbView::onDrawContent(SkCanvas* canvas) {
canvas->clear(SK_ColorBLACK);
SkPaint outlinePaint;
@ -104,7 +104,7 @@ void CCPRGeometryView::onDrawContent(SkCanvas* canvas) {
canvas->drawString(caption, 10, 30, font, captionPaint);
}
void CCPRGeometryView::updatePath() {
void StrokeVerbView::updatePath() {
fPath.reset();
fPath.moveTo(fPoints[0]);
switch (fVerbType) {
@ -125,7 +125,7 @@ void CCPRGeometryView::updatePath() {
}
}
class CCPRGeometryView::Click : public Sample::Click {
class StrokeVerbView::Click : public Sample::Click {
public:
Click(int ptIdx) : fPtIdx(ptIdx) {}
@ -143,7 +143,7 @@ private:
int fPtIdx;
};
Sample::Click* CCPRGeometryView::onFindClickHandler(SkScalar x, SkScalar y, skui::ModifierKey) {
Sample::Click* StrokeVerbView::onFindClickHandler(SkScalar x, SkScalar y, skui::ModifierKey) {
for (int i = 0; i < 4; ++i) {
if (VerbType::kCubics != fVerbType && 2 == i) {
continue;
@ -155,14 +155,14 @@ Sample::Click* CCPRGeometryView::onFindClickHandler(SkScalar x, SkScalar y, skui
return new Click(-1);
}
bool CCPRGeometryView::onClick(Sample::Click* click) {
bool StrokeVerbView::onClick(Sample::Click* click) {
Click* myClick = (Click*)click;
myClick->doClick(fPoints);
this->updateAndInval();
return true;
}
bool CCPRGeometryView::onChar(SkUnichar unichar) {
bool StrokeVerbView::onChar(SkUnichar unichar) {
if (unichar >= '1' && unichar <= '4') {
fVerbType = VerbType(unichar - '1');
this->updateAndInval();
@ -218,6 +218,6 @@ bool CCPRGeometryView::onChar(SkUnichar unichar) {
return false;
}
DEF_SAMPLE(return new CCPRGeometryView;)
DEF_SAMPLE(return new StrokeVerbView;)
#endif // SK_SUPPORT_GPU

View File

@ -16,7 +16,6 @@ class SkSurfaceCharacterization;
#if SK_SUPPORT_GPU
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrRenderTask.h"
#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
#endif
SkDeferredDisplayList::SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,

View File

@ -1104,7 +1104,7 @@ void GrClipStack::SaveRecord::replaceWithElement(RawElement&& toAdd, RawElement:
// of the draws, with extra head room for more complex clips encountered in the wild.
//
// The mask stack increment size was chosen to be smaller since only 0.2% of the evaluated draw call
// set ever used a mask (which includes stencil masks), or up to 0.3% when CCPR is disabled.
// set ever used a mask (which includes stencil masks), or up to 0.3% when the atlas is disabled.
static constexpr int kElementStackIncrement = 8;
static constexpr int kSaveStackIncrement = 8;
static constexpr int kMaskStackIncrement = 4;
@ -1453,7 +1453,7 @@ GrClip::Effect GrClipStack::apply(GrRecordingContext* context, GrSurfaceDrawCont
}
if (clipFP) {
// This will include all analytic FPs, all CCPR atlas FPs, and a SW mask FP.
// This will include all analytic FPs, all atlas FPs, and a SW mask FP.
out->addCoverageFP(std::move(clipFP));
}

View File

@ -132,10 +132,9 @@ GrClip::Effect GrClipStackClip::apply(GrRecordingContext* context,
// We disable MSAA when stencil isn't supported.
SkASSERT(surfaceDrawContext->asRenderTargetProxy()->canUseStencil(*context->priv().caps()));
}
auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
GrReducedClip reducedClip(*fStack, devBounds, context->priv().caps(), maxWindowRectangles,
maxAnalyticElements, ccpr ? maxAnalyticElements : 0);
maxAnalyticElements);
if (InitialState::kAllOut == reducedClip.initialState() &&
reducedClip.maskElements().isEmpty()) {
return Effect::kClippedOut;
@ -164,7 +163,7 @@ GrClip::Effect GrClipStackClip::apply(GrRecordingContext* context,
// can cause a flush or otherwise change which opstask our draw is going into.
uint32_t opsTaskID = surfaceDrawContext->getOpsTask()->uniqueID();
auto [success, clipFPs] = reducedClip.finishAndDetachAnalyticElements(context, *fMatrixProvider,
ccpr, opsTaskID);
opsTaskID);
if (success) {
out->addCoverageFP(std::move(clipFPs));
effect = Effect::kClipped;

View File

@ -21,7 +21,6 @@
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/GrShaderUtils.h"
#include "src/gpu/GrSurfaceContext.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/effects/GrSkSLFP.h"
#include "src/gpu/gl/GrGLGpu.h"
#include "src/gpu/mock/GrMockGpu.h"

View File

@ -42,7 +42,6 @@
#include "src/gpu/GrTransferFromRenderTask.h"
#include "src/gpu/GrWaitRenderTask.h"
#include "src/gpu/GrWritePixelsRenderTask.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/text/GrSDFTControl.h"
#include "src/image/SkSurface_Gpu.h"
@ -601,14 +600,6 @@ void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
fContext->priv().detachProgramData(&ddl->fProgramData);
#if GR_OGA
if (fPathRendererChain) {
if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
ddl->fPendingPaths = ccpr->detachPendingPaths();
}
}
#endif
SkDEBUGCODE(this->validate());
}
@ -643,14 +634,6 @@ void GrDrawingManager::createDDLTask(sk_sp<const SkDeferredDisplayList> ddl,
// The lazy proxy that references it (in the DDL opsTasks) will then steal its GrTexture.
ddl->fLazyProxyData->fReplayDest = newDest.get();
#if GR_OGA
if (ddl->fPendingPaths.size()) {
GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
ccpr->mergePendingPaths(ddl->fPendingPaths);
}
#endif
// Add a task to handle drawing and lifetime management of the DDL.
SkDEBUGCODE(auto ddlTask =) this->appendTask(sk_make_sp<GrDDLTask>(this,
std::move(newDest),
@ -940,14 +923,6 @@ GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
return fSoftwarePathRenderer.get();
}
GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
if (!fPathRendererChain) {
fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext,
fOptionsForPathRendererChain);
}
return fPathRendererChain->getCoverageCountingPathRenderer();
}
GrTessellationPathRenderer* GrDrawingManager::getTessellationPathRenderer() {
if (!fPathRendererChain) {
fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext,

View File

@ -27,7 +27,6 @@
#define GR_PATH_RENDERER_SPEW 0
class GrArenas;
class GrCoverageCountingPathRenderer;
class GrGpuBuffer;
class GrOnFlushCallbackObject;
class GrOpFlushState;
@ -114,10 +113,6 @@ public:
GrPathRenderer* getSoftwarePathRenderer();
// Returns a direct pointer to the coverage counting path renderer, or null if it is not
// supported and turned on.
GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
// Returns a direct pointer to the tessellation path renderer, or null if it is not supported
// and turned on.
GrTessellationPathRenderer* getTessellationPathRenderer();

View File

@ -276,9 +276,8 @@ private:
// the shared state once and then issue draws for each mesh.
struct Draw {
~Draw();
// The geometry processor is always forced to be in an arena allocation or appears on
// the stack (for CCPR). In either case this object does not need to manage its
// lifetime.
// The geometry processor is always forced to be in an arena allocation. This object does
// not need to manage its lifetime.
const GrGeometryProcessor* fGeometryProcessor = nullptr;
// Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
const GrSurfaceProxy* const* fGeomProcProxies = nullptr;

View File

@ -15,7 +15,6 @@
#include "src/gpu/GrGpu.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrShaderCaps.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/geometry/GrStyledShape.h"
#include "src/gpu/ops/GrAAConvexPathRenderer.h"
#include "src/gpu/ops/GrAAHairLinePathRenderer.h"
@ -34,14 +33,6 @@ GrPathRendererChain::GrPathRendererChain(GrRecordingContext* context, const Opti
if (options.fGpuPathRenderers & GpuPathRenderers::kAAConvex) {
fChain.push_back(sk_make_sp<GrAAConvexPathRenderer>());
}
if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
fCoverageCountingPathRenderer = GrCoverageCountingPathRenderer::CreateIfSupported(context);
if (fCoverageCountingPathRenderer) {
// Don't add to the chain. This is only for clips.
// TODO: Remove from here.
context->priv().addOnFlushCallbackObject(fCoverageCountingPathRenderer.get());
}
}
if (options.fGpuPathRenderers & GpuPathRenderers::kAAHairline) {
fChain.push_back(sk_make_sp<GrAAHairLinePathRenderer>());
}

View File

@ -15,7 +15,6 @@
#include "include/private/SkNoncopyable.h"
#include "include/private/SkTArray.h"
class GrCoverageCountingPathRenderer;
class GrTessellationPathRenderer;
/**
@ -48,12 +47,6 @@ public:
DrawType drawType,
GrPathRenderer::StencilSupport* stencilSupport);
/** Returns a direct pointer to the coverage counting path renderer, or null if it is not in the
chain. */
GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer() {
return fCoverageCountingPathRenderer.get();
}
/** Returns a direct pointer to the tessellation path renderer, or null if it is not in the
chain. */
GrTessellationPathRenderer* getTessellationPathRenderer() {
@ -65,7 +58,6 @@ private:
kPreAllocCount = 8,
};
SkSTArray<kPreAllocCount, sk_sp<GrPathRenderer>> fChain;
std::unique_ptr<GrCoverageCountingPathRenderer> fCoverageCountingPathRenderer;
GrTessellationPathRenderer* fTessellationPathRenderer = nullptr;
};

View File

@ -58,8 +58,6 @@ public:
kGrAlphaThresholdFragmentProcessor_ClassID,
kGrBicubicEffect_ClassID,
kGrBitmapTextGeoProc_ClassID,
kGrCCClipProcessor_ClassID,
kGrCCPathProcessor_ClassID,
kGrCircleBlurFragmentProcessor_ClassID,
kGrCircleEffect_ClassID,
kGrClampedGradientEffect_ClassID,

View File

@ -20,7 +20,6 @@
#include "src/gpu/GrStyle.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/GrUserStencilSettings.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/effects/GrConvexPolyEffect.h"
#include "src/gpu/effects/GrRRectEffect.h"
#include "src/gpu/effects/generated/GrAARectEffect.h"
@ -36,15 +35,12 @@
* take a rect in case the caller knows a bound on what is to be drawn through this clip.
*/
GrReducedClip::GrReducedClip(const SkClipStack& stack, const SkRect& queryBounds,
const GrCaps* caps, int maxWindowRectangles, int maxAnalyticElements,
int maxCCPRClipPaths)
const GrCaps* caps, int maxWindowRectangles, int maxAnalyticElements)
: fCaps(caps)
, fMaxWindowRectangles(maxWindowRectangles)
, fMaxAnalyticElements(maxAnalyticElements)
, fMaxCCPRClipPaths(maxCCPRClipPaths) {
, fMaxAnalyticElements(maxAnalyticElements) {
SkASSERT(!queryBounds.isEmpty());
SkASSERT(fMaxWindowRectangles <= GrWindowRectangles::kMaxWindows);
SkASSERT(fMaxCCPRClipPaths <= fMaxAnalyticElements);
if (stack.isWideOpen()) {
fInitialState = InitialState::kAllIn;
@ -696,19 +692,6 @@ GrReducedClip::ClipResult GrReducedClip::addAnalyticPath(const SkPath& deviceSpa
return ClipResult::kClipped;
}
if (fCCPRClipPaths.count() < fMaxCCPRClipPaths && GrAA::kYes == aa) {
const SkRect& bounds = deviceSpacePath.getBounds();
if (bounds.height() * bounds.width() <= GrCoverageCountingPathRenderer::kMaxClipPathArea) {
// Set aside CCPR paths for later. We will create their clip FPs once we know the ID of
// the opsTask they will operate in.
SkPath& ccprClipPath = fCCPRClipPaths.push_back(deviceSpacePath);
if (Invert::kYes == invert) {
ccprClipPath.toggleInverseFillType();
}
return ClipResult::kClipped;
}
}
return ClipResult::kNotClipped;
}
@ -721,7 +704,6 @@ void GrReducedClip::makeEmpty() {
fInitialState = InitialState::kAllOut;
fAnalyticFP = nullptr;
fNumAnalyticElements = 0;
fCCPRClipPaths.reset();
}
////////////////////////////////////////////////////////////////////////////////
@ -899,29 +881,12 @@ bool GrReducedClip::drawStencilClipMask(GrRecordingContext* context,
}
int GrReducedClip::numAnalyticElements() const {
return fCCPRClipPaths.size() + fNumAnalyticElements;
return fNumAnalyticElements;
}
GrFPResult GrReducedClip::finishAndDetachAnalyticElements(GrRecordingContext* context,
const SkMatrixProvider& matrixProvider,
GrCoverageCountingPathRenderer* ccpr,
uint32_t opsTaskID) {
// Combine the analytic FP with any CCPR clip processors.
std::unique_ptr<GrFragmentProcessor> clipFP = std::move(fAnalyticFP);
fNumAnalyticElements = 0;
for (const SkPath& ccprClipPath : fCCPRClipPaths) {
SkASSERT(ccpr);
SkASSERT(fHasScissor);
bool success;
std::tie(success, clipFP) = ccpr->makeClipProcessor(std::move(clipFP), opsTaskID,
ccprClipPath, fScissor, *fCaps);
if (!success) {
return GrFPFailure(nullptr);
}
}
fCCPRClipPaths.reset();
// Create the shader.
std::unique_ptr<GrFragmentProcessor> shaderFP;
if (fShader != nullptr) {
@ -935,5 +900,5 @@ GrFPResult GrReducedClip::finishAndDetachAnalyticElements(GrRecordingContext* co
}
// Compose the clip and shader FPs.
return GrFPSuccess(GrFragmentProcessor::Compose(std::move(shaderFP), std::move(clipFP)));
return GrFPSuccess(GrFragmentProcessor::Compose(std::move(shaderFP), std::move(fAnalyticFP)));
}

View File

@ -13,7 +13,6 @@
#include "src/gpu/GrFragmentProcessor.h"
#include "src/gpu/GrWindowRectangles.h"
class GrCoverageCountingPathRenderer;
class GrRecordingContext;
class GrSurfaceDrawContext;
@ -27,8 +26,7 @@ public:
using ElementList = SkTLList<SkClipStack::Element, 16>;
GrReducedClip(const SkClipStack&, const SkRect& queryBounds, const GrCaps* caps,
int maxWindowRectangles = 0, int maxAnalyticElements = 0,
int maxCCPRClipPaths = 0);
int maxWindowRectangles = 0, int maxAnalyticElements = 0);
enum class InitialState : bool {
kAllIn,
@ -105,8 +103,7 @@ public:
* may cause flushes or otherwise change which opsTask the actual draw is going into.
*/
GrFPResult finishAndDetachAnalyticElements(GrRecordingContext*, const SkMatrixProvider&
matrixProvider, GrCoverageCountingPathRenderer*,
uint32_t opsTaskID);
matrixProvider, uint32_t opsTaskID);
private:
void walkStack(const SkClipStack&, const SkRect& queryBounds);
@ -142,7 +139,6 @@ private:
const GrCaps* fCaps;
const int fMaxWindowRectangles;
const int fMaxAnalyticElements;
const int fMaxCCPRClipPaths;
InitialState fInitialState;
SkIRect fScissor;
@ -156,7 +152,6 @@ private:
bool fMaskRequiresAA = false;
std::unique_ptr<GrFragmentProcessor> fAnalyticFP;
int fNumAnalyticElements = 0;
SkSTArray<4, SkPath> fCCPRClipPaths; // Converted to FPs once we have an opsTask ID for CCPR.
// Will be the combination of all kShader elements or null if there's no clip shader.
// Does not count against the analytic FP limit.
sk_sp<SkShader> fShader;

View File

@ -141,8 +141,8 @@ protected:
// The basic knowledge version is used for DDL where we know the type of proxy we are going to
// use, but we don't have access to the GPU yet to instantiate it.
//
// The minimal knowledge version is used for CCPR where we are generating an atlas but we do not
// know the final size until flush time.
// The minimal knowledge version is used for when we are generating an atlas but we do not know
// the final size until we have finished adding to it.
GrRenderTargetProxy(LazyInstantiateCallback&&,
const GrBackendFormat&,
SkISize,

View File

@ -26,7 +26,6 @@
class GrBackendSemaphore;
class GrClip;
class GrColorSpaceXform;
class GrCoverageCountingPathRenderer;
class GrDrawOp;
class GrDstProxyView;
class GrHardClip;

View File

@ -128,8 +128,8 @@ protected:
// The basic knowledge version is used for DDL where we know the type of proxy we are going to
// use, but we don't have access to the GPU yet to instantiate it.
//
// The minimal knowledge version is used for CCPR where we are generating an atlas but we do not
// know the final size until flush time.
// The minimal knowledge version is used for when we are generating an atlas but we do not know
// the final size until we have finished adding to it.
GrTextureProxy(LazyInstantiateCallback&&,
const GrBackendFormat&,
SkISize,

View File

@ -1,40 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/ccpr/GrCCAtlas.h"
#include "include/private/SkTPin.h"
#include "src/core/SkIPoint16.h"
#include "src/gpu/GrOnFlushResourceProvider.h"
static SkISize choose_initial_atlas_size(const GrCCAtlas::Specs& specs) {
// Begin with the first pow2 dimensions whose area is theoretically large enough to contain the
// pending paths, favoring height over width if necessary.
int log2area = SkNextLog2(std::max(specs.fApproxNumPixels, 1));
int height = 1 << ((log2area + 1) / 2);
int width = 1 << (log2area / 2);
width = SkTPin(width, specs.fMinTextureSize, specs.fMaxPreferredTextureSize);
height = SkTPin(height, specs.fMinTextureSize, specs.fMaxPreferredTextureSize);
return SkISize::Make(width, height);
}
static int choose_max_atlas_size(const GrCCAtlas::Specs& specs, const GrCaps& caps) {
return (std::max(specs.fMinHeight, specs.fMinWidth) <= specs.fMaxPreferredTextureSize) ?
specs.fMaxPreferredTextureSize : caps.maxRenderTargetSize();
}
GrCCAtlas::GrCCAtlas(const Specs& specs, const GrCaps& caps)
: GrDynamicAtlas(GrColorType::kAlpha_8, InternalMultisample::kYes,
choose_initial_atlas_size(specs), choose_max_atlas_size(specs, caps),
caps) {
SkASSERT(specs.fMaxPreferredTextureSize > 0);
}
GrCCAtlas::~GrCCAtlas() {
}

View File

@ -1,54 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCAtlas_DEFINED
#define GrCCAtlas_DEFINED
#include "src/gpu/GrDynamicAtlas.h"
#include "src/gpu/GrTBlockList.h"
class GrCCCachedAtlas;
/**
* GrDynamicAtlas with CCPR caching capabilities.
*/
class GrCCAtlas : public GrDynamicAtlas {
public:
// This struct encapsulates the minimum and desired requirements for an atlas, as well as an
// approximate number of pixels to help select a good initial size.
struct Specs {
int fMaxPreferredTextureSize = 0;
int fMinTextureSize = 0;
int fMinWidth = 0; // If there are 100 20x10 paths, this should be 20.
int fMinHeight = 0; // If there are 100 20x10 paths, this should be 10.
int fApproxNumPixels = 0;
// Add space for a rect in the desired atlas specs.
void accountForSpace(int width, int height);
};
static sk_sp<GrTextureProxy> MakeLazyAtlasProxy(LazyInstantiateAtlasCallback&& callback,
const GrCaps& caps,
GrSurfaceProxy::UseAllocator useAllocator) {
return GrDynamicAtlas::MakeLazyAtlasProxy(std::move(callback),
GrColorType::kAlpha_8,
InternalMultisample::kYes,
caps,
useAllocator);
}
GrCCAtlas(const Specs&, const GrCaps&);
~GrCCAtlas() override;
};
inline void GrCCAtlas::Specs::accountForSpace(int width, int height) {
fMinWidth = std::max(width, fMinWidth);
fMinHeight = std::max(height, fMinHeight);
fApproxNumPixels += (width + kPadding) * (height + kPadding);
}
#endif

View File

@ -1,48 +0,0 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/ccpr/GrCCClipPath.h"
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrRenderTarget.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
GrCCClipPath::GrCCClipPath(const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps)
: fDeviceSpacePath(deviceSpacePath)
, fPathDevIBounds(fDeviceSpacePath.getBounds().roundOut())
, fAccessRect(accessRect)
, fAtlasLazyProxy(GrCCAtlas::MakeLazyAtlasProxy(
[](GrResourceProvider*, const GrCCAtlas::LazyAtlasDesc&) {
// GrCCClipPaths get instantiated explicitly after the atlas is laid out. If
// this callback gets invoked, it means atlas proxy itself failed to
// instantiate.
return GrSurfaceProxy::LazyCallbackResult();
}, caps, GrSurfaceProxy::UseAllocator::kYes)) {
SkASSERT(!deviceSpacePath.isEmpty());
SkASSERT(SkIRect::Intersects(fAccessRect, fPathDevIBounds));
}
void GrCCClipPath::accountForOwnPath(GrCCAtlas::Specs* specs) const {
SkIRect ibounds;
if (ibounds.intersect(fAccessRect, fPathDevIBounds)) {
specs->accountForSpace(ibounds.width(), ibounds.height());
}
}
std::unique_ptr<GrCCAtlas> GrCCClipPath::renderPathInAtlas(GrCCPerFlushResources* resources,
GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(!fHasAtlas);
auto retiredAtlas = resources->renderDeviceSpacePathInAtlas(
onFlushRP, fAccessRect, fDeviceSpacePath, fPathDevIBounds,
GrFillRuleForSkPath(fDeviceSpacePath), &fDevToAtlasOffset);
SkDEBUGCODE(fHasAtlas = true);
return retiredAtlas;
}

View File

@ -1,66 +0,0 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCClipPath_DEFINED
#define GrCCClipPath_DEFINED
#include "include/core/SkPath.h"
#include "src/gpu/GrSurfaceProxyPriv.h"
#include "src/gpu/GrTextureProxy.h"
#include "src/gpu/ccpr/GrCCAtlas.h"
struct GrCCPerFlushResourceSpecs;
class GrCCPerFlushResources;
class GrOnFlushResourceProvider;
class GrProxyProvider;
/**
* These are keyed by SkPath generation ID, and store which device-space paths are accessed and
* where by clip FPs in a given opsTask. A single GrCCClipPath can be referenced by multiple FPs. At
* flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
*/
class GrCCClipPath : public SkRefCnt {
public:
GrCCClipPath(const SkPath& deviceSpacePath, const SkIRect&, const GrCaps&);
GrCCClipPath(const GrCCClipPath&) = delete;
void addAccess(const SkIRect& accessRect) { fAccessRect.join(accessRect); }
GrTextureProxy* atlasLazyProxy() const { return fAtlasLazyProxy.get(); }
const SkPath& deviceSpacePath() const { return fDeviceSpacePath; }
const SkIRect& pathDevIBounds() const { return fPathDevIBounds; }
void accountForOwnPath(GrCCAtlas::Specs*) const;
// Allocates our clip path in an atlas and records the offset.
//
// If the return value is non-null, it means the given path did not fit in the then-current
// atlas, so it was retired and a new one was added to the stack. The return value is the
// newly-retired atlas. (*NOT* the atlas this path will reside in.) The caller must call
// assignAtlasTexture on all prior GrCCClipPaths that will use the retired atlas.
std::unique_ptr<GrCCAtlas> renderPathInAtlas(GrCCPerFlushResources*,
GrOnFlushResourceProvider*);
const SkIVector& atlasTranslate() const {
SkASSERT(fHasAtlas);
return fDevToAtlasOffset;
}
void assignAtlasTexture(sk_sp<GrTexture> atlasTexture) {
fAtlasLazyProxy->priv().assign(std::move(atlasTexture));
}
private:
SkPath fDeviceSpacePath;
SkIRect fPathDevIBounds;
SkIRect fAccessRect;
sk_sp<GrTextureProxy> fAtlasLazyProxy;
SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
SkDEBUGCODE(bool fHasAtlas = false;)
};
#endif

View File

@ -1,124 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/ccpr/GrCCClipProcessor.h"
#include "src/gpu/ccpr/GrCCClipPath.h"
#include "src/gpu/effects/GrTextureEffect.h"
#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
static GrSurfaceProxyView make_view(const GrCaps& caps, GrSurfaceProxy* proxy) {
GrSwizzle swizzle = caps.getReadSwizzle(proxy->backendFormat(), GrColorType::kAlpha_8);
return { sk_ref_sp(proxy), GrCCAtlas::kTextureOrigin, swizzle };
}
GrCCClipProcessor::GrCCClipProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
const GrCaps& caps,
sk_sp<const GrCCClipPath> clipPath,
MustCheckBounds mustCheckBounds)
: INHERITED(kGrCCClipProcessor_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
, fClipPath(std::move(clipPath))
, fMustCheckBounds(MustCheckBounds::kYes == mustCheckBounds) {
auto view = make_view(caps, fClipPath->atlasLazyProxy());
auto texEffect = GrTextureEffect::Make(std::move(view), kUnknown_SkAlphaType);
this->registerChild(std::move(texEffect), SkSL::SampleUsage::Explicit());
this->registerChild(std::move(inputFP));
}
GrCCClipProcessor::GrCCClipProcessor(const GrCCClipProcessor& that)
: INHERITED(kGrCCClipProcessor_ClassID, that.optimizationFlags())
, fClipPath(that.fClipPath)
, fMustCheckBounds(that.fMustCheckBounds) {
this->cloneAndRegisterAllChildProcessors(that);
}
std::unique_ptr<GrFragmentProcessor> GrCCClipProcessor::clone() const {
return std::unique_ptr<GrFragmentProcessor>(new GrCCClipProcessor(*this));
}
void GrCCClipProcessor::onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const {
const SkPath& clipPath = fClipPath->deviceSpacePath();
uint32_t key = ((clipPath.isInverseFillType()) ? 1 : 0);
key = (key << 1) | ((fMustCheckBounds) ? 1 : 0);
b->add32(key);
}
bool GrCCClipProcessor::onIsEqual(const GrFragmentProcessor& fp) const {
const GrCCClipProcessor& that = fp.cast<GrCCClipProcessor>();
return that.fClipPath->deviceSpacePath().getGenerationID() ==
fClipPath->deviceSpacePath().getGenerationID() &&
that.fClipPath->deviceSpacePath().getFillType() ==
fClipPath->deviceSpacePath().getFillType() &&
that.fMustCheckBounds == fMustCheckBounds;
}
class GrCCClipProcessor::Impl : public GrGLSLFragmentProcessor {
public:
void emitCode(EmitArgs& args) override {
const GrCCClipProcessor& proc = args.fFp.cast<GrCCClipProcessor>();
GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
f->codeAppend("half coverage;");
if (proc.fMustCheckBounds) {
const char* pathIBounds;
fPathIBoundsUniform = uniHandler->addUniform(&proc, kFragment_GrShaderFlag,
kFloat4_GrSLType, "path_ibounds",
&pathIBounds);
f->codeAppendf("if (all(greaterThan(float4(sk_FragCoord.xy, %s.RB), "
"float4(%s.LT, sk_FragCoord.xy)))) {",
pathIBounds, pathIBounds);
}
const char* atlasTranslate;
fAtlasTranslateUniform = uniHandler->addUniform(&proc, kFragment_GrShaderFlag,
kFloat2_GrSLType, "atlas_translate",
&atlasTranslate);
SkString coord;
coord.printf("sk_FragCoord.xy + %s.xy", atlasTranslate);
constexpr int kTexEffectFPIndex = 0;
SkString sample = this->invokeChild(kTexEffectFPIndex, args, coord.c_str());
f->codeAppendf("coverage = %s.a;", sample.c_str());
if (proc.fMustCheckBounds) {
f->codeAppend("} else {");
f->codeAppend( "coverage = 0;");
f->codeAppend("}");
}
if (proc.fClipPath->deviceSpacePath().isInverseFillType()) {
f->codeAppend("coverage = 1 - coverage;");
}
constexpr int kInputFPIndex = 1;
SkString inputColor = this->invokeChild(kInputFPIndex, args);
f->codeAppendf("return %s * coverage;", inputColor.c_str());
}
void onSetData(const GrGLSLProgramDataManager& pdman,
const GrFragmentProcessor& fp) override {
const GrCCClipProcessor& proc = fp.cast<GrCCClipProcessor>();
if (proc.fMustCheckBounds) {
const SkRect pathIBounds = SkRect::Make(proc.fClipPath->pathDevIBounds());
pdman.set4f(fPathIBoundsUniform, pathIBounds.left(), pathIBounds.top(),
pathIBounds.right(), pathIBounds.bottom());
}
const SkIVector& trans = proc.fClipPath->atlasTranslate();
pdman.set2f(fAtlasTranslateUniform, trans.x(), trans.y());
}
private:
UniformHandle fPathIBoundsUniform;
UniformHandle fAtlasTranslateUniform;
};
std::unique_ptr<GrGLSLFragmentProcessor> GrCCClipProcessor::onMakeProgramImpl() const {
return std::make_unique<Impl>();
}

View File

@ -1,42 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCClipProcessor_DEFINED
#define GrCCClipProcessor_DEFINED
#include "src/gpu/GrFragmentProcessor.h"
class GrCCClipPath;
class GrCCClipProcessor : public GrFragmentProcessor {
public:
enum class MustCheckBounds : bool {
kNo = false,
kYes = true
};
GrCCClipProcessor(std::unique_ptr<GrFragmentProcessor>, const GrCaps&,
sk_sp<const GrCCClipPath>, MustCheckBounds);
const char* name() const override { return "GrCCClipProcessor"; }
std::unique_ptr<GrFragmentProcessor> clone() const override;
void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override;
bool onIsEqual(const GrFragmentProcessor&) const override;
std::unique_ptr<GrGLSLFragmentProcessor> onMakeProgramImpl() const override;
private:
explicit GrCCClipProcessor(const GrCCClipProcessor&);
const sk_sp<const GrCCClipPath> fClipPath;
const bool fMustCheckBounds;
class Impl;
using INHERITED = GrFragmentProcessor;
};
#endif

View File

@ -1,186 +0,0 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
#include "include/gpu/GrRecordingContext.h"
#include "src/core/SkIPoint16.h"
#include "src/gpu/GrFixedClip.h"
#include "src/gpu/GrMemoryPool.h"
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/geometry/GrStyledShape.h"
#include "src/gpu/ops/GrFillRectOp.h"
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCAtlas::Specs& specs)
: fAtlasSpecs(specs) {
}
std::unique_ptr<GrCCAtlas> GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
GrOnFlushResourceProvider* onFlushRP, const SkIRect& clipIBounds, const SkPath& devPath,
const SkIRect& devPathIBounds, GrFillRule fillRule, SkIVector* devToAtlasOffset) {
SkASSERT(!devPath.isEmpty());
GrScissorTest enableScissorInAtlas;
SkIRect clippedPathIBounds;
if (clipIBounds.contains(devPathIBounds)) {
clippedPathIBounds = devPathIBounds;
enableScissorInAtlas = GrScissorTest::kDisabled;
} else {
SkAssertResult(clippedPathIBounds.intersect(clipIBounds, devPathIBounds));
enableScissorInAtlas = GrScissorTest::kEnabled;
}
auto retiredAtlas = this->placeRenderedPathInAtlas(onFlushRP, clippedPathIBounds,
enableScissorInAtlas, devToAtlasOffset);
SkMatrix atlasMatrix = SkMatrix::Translate(devToAtlasOffset->fX, devToAtlasOffset->fY);
this->enqueueRenderedPath(devPath, fillRule, clippedPathIBounds, atlasMatrix,
enableScissorInAtlas, *devToAtlasOffset);
return retiredAtlas;
}
std::unique_ptr<GrCCAtlas> GrCCPerFlushResources::placeRenderedPathInAtlas(
GrOnFlushResourceProvider* onFlushRP, const SkIRect& clippedPathIBounds,
GrScissorTest scissorTest, SkIVector* devToAtlasOffset) {
std::unique_ptr<GrCCAtlas> retiredAtlas;
SkIPoint16 location;
if (!fAtlas ||
!fAtlas->addRect(clippedPathIBounds.width(), clippedPathIBounds.height(), &location)) {
// The retired atlas is out of room and can't grow any bigger.
if (fAtlas) {
this->flushRenderedPaths(onFlushRP);
retiredAtlas = std::move(fAtlas);
}
fAtlas = std::make_unique<GrCCAtlas>(fAtlasSpecs, *onFlushRP->caps());
SkASSERT(clippedPathIBounds.width() <= fAtlasSpecs.fMinWidth);
SkASSERT(clippedPathIBounds.height() <= fAtlasSpecs.fMinHeight);
SkAssertResult(fAtlas->addRect(clippedPathIBounds.width(), clippedPathIBounds.height(),
&location));
}
devToAtlasOffset->set(location.x() - clippedPathIBounds.left(),
location.y() - clippedPathIBounds.top());
return retiredAtlas;
}
void GrCCPerFlushResources::enqueueRenderedPath(const SkPath& path, GrFillRule fillRule,
const SkIRect& clippedDevIBounds,
const SkMatrix& pathToAtlasMatrix,
GrScissorTest enableScissorInAtlas,
SkIVector devToAtlasOffset) {
SkPath* atlasPath;
if (enableScissorInAtlas == GrScissorTest::kDisabled) {
atlasPath = &fAtlasPaths[(int)fillRule].fUberPath;
} else {
auto& [scissoredPath, scissor] = fAtlasPaths[(int)fillRule].fScissoredPaths.push_back();
scissor = clippedDevIBounds.makeOffset(devToAtlasOffset);
atlasPath = &scissoredPath;
}
auto origin = clippedDevIBounds.topLeft() + devToAtlasOffset;
atlasPath->moveTo(origin.fX, origin.fY); // Implicit moveTo(0,0).
atlasPath->addPath(path, pathToAtlasMatrix);
}
static void draw_stencil_to_coverage(GrOnFlushResourceProvider* onFlushRP,
GrSurfaceDrawContext* surfaceDrawContext, SkRect&& rect) {
auto aaType = GrAAType::kMSAA;
auto fillRectFlags = GrSimpleMeshDrawOpHelper::InputFlags::kNone;
// This will be the final op in the surfaceDrawContext. So if Ganesh is planning to discard the
// stencil values anyway, then we might not actually need to reset the stencil values back to 0.
bool mustResetStencil = !onFlushRP->caps()->discardStencilValuesAfterRenderPass();
const GrUserStencilSettings* stencil;
if (mustResetStencil) {
constexpr static GrUserStencilSettings kTestAndResetStencil(
GrUserStencilSettings::StaticInit<
0x0000,
GrUserStencilTest::kNotEqual,
0xffff,
GrUserStencilOp::kZero,
GrUserStencilOp::kKeep,
0xffff>());
// Outset the cover rect in case there are T-junctions in the path bounds.
rect.outset(1, 1);
stencil = &kTestAndResetStencil;
} else {
constexpr static GrUserStencilSettings kTestStencil(
GrUserStencilSettings::StaticInit<
0x0000,
GrUserStencilTest::kNotEqual,
0xffff,
GrUserStencilOp::kKeep,
GrUserStencilOp::kKeep,
0xffff>());
stencil = &kTestStencil;
}
GrPaint paint;
paint.setColor4f(SK_PMColor4fWHITE);
GrQuad coverQuad(rect);
DrawQuad drawQuad{coverQuad, coverQuad, GrQuadAAFlags::kAll};
auto coverOp = GrFillRectOp::Make(surfaceDrawContext->recordingContext(), std::move(paint),
aaType, &drawQuad, stencil, fillRectFlags);
surfaceDrawContext->addDrawOp(nullptr, std::move(coverOp));
}
void GrCCPerFlushResources::flushRenderedPaths(GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(fAtlas);
auto surfaceDrawContext = fAtlas->instantiate(onFlushRP);
if (!surfaceDrawContext) {
for (int i = 0; i < (int)SK_ARRAY_COUNT(fAtlasPaths); ++i) {
fAtlasPaths[i].fUberPath.reset();
fAtlasPaths[i].fScissoredPaths.reset();
}
return;
}
for (int i = 0; i < (int)SK_ARRAY_COUNT(fAtlasPaths); ++i) {
SkPathFillType fillType = (i == (int)GrFillRule::kNonzero) ? SkPathFillType::kWinding
: SkPathFillType::kEvenOdd;
SkPath& uberPath = fAtlasPaths[i].fUberPath;
if (!uberPath.isEmpty()) {
uberPath.setIsVolatile(true);
uberPath.setFillType(fillType);
surfaceDrawContext->stencilPath(nullptr, GrAA::kYes, SkMatrix::I(), uberPath);
uberPath.reset();
}
for (auto& [scissoredPath, scissor] : fAtlasPaths[i].fScissoredPaths) {
GrFixedClip fixedClip(
surfaceDrawContext->asRenderTargetProxy()->backingStoreDimensions(), scissor);
scissoredPath.setIsVolatile(true);
scissoredPath.setFillType(fillType);
surfaceDrawContext->stencilPath(&fixedClip, GrAA::kYes, SkMatrix::I(), scissoredPath);
}
fAtlasPaths[i].fScissoredPaths.reset();
}
draw_stencil_to_coverage(onFlushRP, surfaceDrawContext.get(),
SkRect::MakeSize(SkSize::Make(fAtlas->drawBounds())));
if (surfaceDrawContext->asSurfaceProxy()->requiresManualMSAAResolve()) {
onFlushRP->addTextureResolveTask(sk_ref_sp(surfaceDrawContext->asTextureProxy()),
GrSurfaceProxy::ResolveFlags::kMSAA);
}
}
std::unique_ptr<GrCCAtlas> GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
if (fAtlas) {
this->flushRenderedPaths(onFlushRP);
}
#ifdef SK_DEBUG
// These paths should have been rendered and reset to empty by this point.
for (size_t i = 0; i < SK_ARRAY_COUNT(fAtlasPaths); ++i) {
SkASSERT(fAtlasPaths[i].fUberPath.isEmpty());
SkASSERT(fAtlasPaths[i].fScissoredPaths.empty());
}
#endif
return std::move(fAtlas);
}

View File

@ -1,69 +0,0 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCPerFlushResources_DEFINED
#define GrCCPerFlushResources_DEFINED
#include "src/gpu/GrNonAtomicRef.h"
#include "src/gpu/ccpr/GrCCAtlas.h"
class GrOnFlushResourceProvider;
/**
* This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
* preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
* It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
*/
class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
public:
GrCCPerFlushResources(GrOnFlushResourceProvider*, const GrCCAtlas::Specs&);
// Renders a path into an atlas.
//
// If the return value is non-null, it means the given path did not fit in the then-current
// atlas, so it was retired and a new one was added to the stack. The return value is the
// newly-retired atlas. (*NOT* the atlas the path was just drawn into.) The caller must call
// assignAtlasTexture on all GrCCClipPaths that will use the retired atlas.
std::unique_ptr<GrCCAtlas> renderDeviceSpacePathInAtlas(
GrOnFlushResourceProvider*, const SkIRect& clipIBounds, const SkPath& devPath,
const SkIRect& devPathIBounds, GrFillRule fillRule, SkIVector* devToAtlasOffset);
// Finishes off the GPU buffers and renders the atlas(es).
std::unique_ptr<GrCCAtlas> finalize(GrOnFlushResourceProvider*);
private:
// If the return value is non-null, it means the given path did not fit in the then-current
// atlas, so it was retired and a new one was added to the stack. The return value is the
// newly-retired atlas. (*NOT* the atlas the path was just drawn into.) The caller must call
// assignAtlasTexture on all GrCCClipPaths that will use the retired atlas.
std::unique_ptr<GrCCAtlas> placeRenderedPathInAtlas(
GrOnFlushResourceProvider*, const SkIRect& clippedPathIBounds, GrScissorTest,
SkIVector* devToAtlasOffset);
// Enqueues the given path to be rendered during the next call to flushRenderedPaths().
void enqueueRenderedPath(const SkPath&, GrFillRule, const SkIRect& clippedDevIBounds,
const SkMatrix& pathToAtlasMatrix, GrScissorTest enableScissorInAtlas,
SkIVector devToAtlasOffset);
// Renders all enqueued paths into the given atlas and clears our path queue.
void flushRenderedPaths(GrOnFlushResourceProvider*);
const GrCCAtlas::Specs fAtlasSpecs;
// Paths to be rendered in the atlas we are currently building.
struct AtlasPaths {
SkPath fUberPath; // Contains all contours from all non-scissored paths.
SkSTArray<32, std::tuple<SkPath, SkIRect>> fScissoredPaths;
};
static_assert((int)GrFillRule::kNonzero == 0);
static_assert((int)GrFillRule::kEvenOdd == 1);
AtlasPaths fAtlasPaths[2]; // One for "nonzero" fill rule and one for "even-odd".
std::unique_ptr<GrCCAtlas> fAtlas;
};
#endif

View File

@ -1,28 +0,0 @@
/*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCCPerOpsTaskPaths_DEFINED
#define GrCCPerOpsTaskPaths_DEFINED
#include "include/core/SkRefCnt.h"
#include "src/core/SkArenaAlloc.h"
#include "src/core/SkTInternalLList.h"
#include "src/gpu/ccpr/GrCCClipPath.h"
#include <map>
class GrCCPerFlushResources;
/**
* Tracks all the CCPR paths in a given opsTask that will be drawn when it flushes.
*/
// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
struct GrCCPerOpsTaskPaths : public SkRefCnt {
std::map<uint32_t, sk_sp<GrCCClipPath>> fClipPaths;
};
#endif

View File

@ -1,203 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include <memory>
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/ccpr/GrCCClipProcessor.h"
bool GrCoverageCountingPathRenderer::IsSupported(const GrRecordingContext* ctx) {
if (ctx->backend() != GrBackendApi::kMock) {
// The atlas isn't ready for primetime. Disable it everywhere except for testing.
return false;
}
const GrCaps& caps = *ctx->priv().caps();
const GrShaderCaps& shaderCaps = *caps.shaderCaps();
GrBackendFormat defaultA8Format = caps.getDefaultBackendFormat(GrColorType::kAlpha_8,
GrRenderable::kYes);
if (caps.driverDisableMSAAClipAtlas() || !shaderCaps.integerSupport() ||
!caps.drawInstancedSupport() || !shaderCaps.floatIs32Bits() ||
!defaultA8Format.isValid() || // This checks both texturable and renderable
!caps.halfFloatVertexAttributeSupport()) {
return false;
}
if (caps.internalMultisampleCount(defaultA8Format) > 1 &&
caps.sampleLocationsSupport() &&
shaderCaps.sampleMaskSupport()) {
return true;
}
return false;
}
std::unique_ptr<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
const GrRecordingContext* ctx) {
if (IsSupported(ctx)) {
return std::make_unique<GrCoverageCountingPathRenderer>();
}
return nullptr;
}
GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
auto it = fPendingPaths.find(opsTaskID);
if (fPendingPaths.end() == it) {
sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
}
return it->second.get();
}
GrFPResult GrCoverageCountingPathRenderer::makeClipProcessor(
std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps) {
#ifdef SK_DEBUG
SkASSERT(!fFlushing);
SkIRect pathIBounds;
deviceSpacePath.getBounds().roundOut(&pathIBounds);
SkIRect maskBounds;
if (maskBounds.intersect(accessRect, pathIBounds)) {
SkASSERT(maskBounds.height64() * maskBounds.width64() <= kMaxClipPathArea);
}
#endif
if (deviceSpacePath.isEmpty() ||
!SkIRect::Intersects(accessRect, deviceSpacePath.getBounds().roundOut())) {
// "Intersect" draws that don't intersect the clip can be dropped.
return deviceSpacePath.isInverseFillType() ? GrFPSuccess(nullptr) : GrFPFailure(nullptr);
}
uint32_t key = deviceSpacePath.getGenerationID();
key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
sk_sp<GrCCClipPath>& clipPath = this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
if (!clipPath) {
// This the first time we've accessed this clip path key in the map.
clipPath = sk_make_sp<GrCCClipPath>(deviceSpacePath, accessRect, caps);
} else {
clipPath->addAccess(accessRect);
}
auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
!clipPath->pathDevIBounds().contains(accessRect));
return GrFPSuccess(std::make_unique<GrCCClipProcessor>(std::move(inputFP), caps, clipPath,
mustCheckBounds));
}
namespace {
// Iterates all GrCCClipPaths in an array of non-empty maps.
class ClipMapsIter {
public:
ClipMapsIter(sk_sp<GrCCPerOpsTaskPaths>* mapsList) : fMapsList(mapsList) {}
bool operator!=(const ClipMapsIter& that) {
if (fMapsList != that.fMapsList) {
return true;
}
// fPerOpsTaskClipPaths will be null when we are on the first element.
if (fPerOpsTaskClipPaths != that.fPerOpsTaskClipPaths) {
return true;
}
return fPerOpsTaskClipPaths && fClipPathsIter != that.fClipPathsIter;
}
void operator++() {
// fPerOpsTaskClipPaths is null when we are on the first element.
if (!fPerOpsTaskClipPaths) {
fPerOpsTaskClipPaths = &(*fMapsList)->fClipPaths;
SkASSERT(!fPerOpsTaskClipPaths->empty()); // We don't handle empty lists.
fClipPathsIter = fPerOpsTaskClipPaths->begin();
}
if ((++fClipPathsIter) == fPerOpsTaskClipPaths->end()) {
++fMapsList;
fPerOpsTaskClipPaths = nullptr;
}
}
GrCCClipPath* operator->() {
// fPerOpsTaskClipPaths is null when we are on the first element.
const auto& it = (!fPerOpsTaskClipPaths) ? (*fMapsList)->fClipPaths.begin()
: fClipPathsIter;
return it->second.get();
}
private:
sk_sp<GrCCPerOpsTaskPaths>* fMapsList;
std::map<uint32_t, sk_sp<GrCCClipPath>>* fPerOpsTaskClipPaths = nullptr;
std::map<uint32_t, sk_sp<GrCCClipPath>>::iterator fClipPathsIter;
};
} // namespace
static void assign_atlas_textures(GrTexture* atlasTexture, ClipMapsIter nextPathToAssign,
const ClipMapsIter& end) {
if (!atlasTexture) {
return;
}
for (; nextPathToAssign != end; ++nextPathToAssign) {
nextPathToAssign->assignAtlasTexture(sk_ref_sp(atlasTexture));
}
}
void GrCoverageCountingPathRenderer::preFlush(
GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t> /*taskIDs*/) {
SkASSERT(!fFlushing);
SkDEBUGCODE(fFlushing = true);
if (fPendingPaths.empty()) {
return; // Nothing to draw.
}
GrCCAtlas::Specs specs;
int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
specs.fMaxPreferredTextureSize = maxPreferredRTSize;
specs.fMinTextureSize = std::min(512, maxPreferredRTSize);
// Move the path lists from fPendingPaths to flushingPaths,
// and count them up so we can preallocate buffers.
// NOTE: This assumes a full flush, as opposed to partial flush. This CCPR atlasing technique
// is on its way out, though. skbug.com/11948
// Also, this temporary array could go away but the ClipMapsIter code would get a whole lot
// messier. Leave it be.
SkSTArray<8, sk_sp<GrCCPerOpsTaskPaths>> flushingPaths;
flushingPaths.reserve_back(fPendingPaths.size());
for (auto& [taskID, paths] : fPendingPaths) {
flushingPaths.push_back(std::move(paths));
for (const auto& clipsIter : flushingPaths.back()->fClipPaths) {
clipsIter.second->accountForOwnPath(&specs);
}
}
fPendingPaths.clear();
GrCCPerFlushResources perFlushResources(onFlushRP, specs);
// Layout the atlas(es) and render paths.
ClipMapsIter it(flushingPaths.begin());
ClipMapsIter end(flushingPaths.end());
ClipMapsIter nextPathToAssign = it; // The next GrCCClipPath to call assignAtlasTexture on.
for (; it != end; ++it) {
if (auto retiredAtlas = it->renderPathInAtlas(&perFlushResources, onFlushRP)) {
assign_atlas_textures(retiredAtlas->textureProxy()->peekTexture(), nextPathToAssign,
it);
nextPathToAssign = it;
}
}
// Allocate resources and then render the atlas(es).
auto atlas = perFlushResources.finalize(onFlushRP);
assign_atlas_textures(atlas->textureProxy()->peekTexture(), nextPathToAssign, end);
}
void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken,
SkSpan<const uint32_t> /* taskIDs */) {
SkASSERT(fFlushing);
SkDEBUGCODE(fFlushing = false);
}

View File

@ -1,83 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCoverageCountingPathRenderer_DEFINED
#define GrCoverageCountingPathRenderer_DEFINED
#include <map>
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrOpsTask.h"
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
/**
* This is a path renderer that draws antialiased paths by counting coverage in an offscreen
* buffer. (See GrCCCoverageProcessor, GrCCPathProcessor.)
*
* It also serves as the per-render-target tracker for pending path draws, and at the start of
* flush, it compiles GPU buffers and renders a "coverage count atlas" for the upcoming paths.
*/
class GrCoverageCountingPathRenderer : public GrOnFlushCallbackObject {
public:
static bool IsSupported(const GrRecordingContext*);
static std::unique_ptr<GrCoverageCountingPathRenderer> CreateIfSupported(
const GrRecordingContext*);
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
// In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpsTaskPaths to the DDL
// object (detachPendingPaths) and then return them upon replay (mergePendingPaths).
PendingPathsMap detachPendingPaths() { return std::move(fPendingPaths); }
void mergePendingPaths(const PendingPathsMap& paths) {
#ifdef SK_DEBUG
// Ensure there are no duplicate opsTask IDs between the incoming path map and ours.
// This should always be true since opsTask IDs are globally unique and these are coming
// from different DDL recordings.
for (const auto& it : paths) {
SkASSERT(!fPendingPaths.count(it.first));
}
#endif
fPendingPaths.insert(paths.begin(), paths.end());
}
// The atlas can take up a lot of memory. We should only use clip processors for small paths.
// Large clip paths should consider a different method, like MSAA stencil.
constexpr static int64_t kMaxClipPathArea = 256 * 256;
GrFPResult makeClipProcessor(std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps);
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
void postFlush(GrDeferredUploadToken, SkSpan<const uint32_t> taskIDs) override;
// If a path spans more pixels than this, we need to crop it or else analytic AA can run out of
// fp32 precision.
static constexpr float kPathCropThreshold = 1 << 16;
// Maximum inflation of path bounds due to stroking (from width, miter, caps). Strokes wider
// than this will be converted to fill paths and drawn by the CCPR filler instead.
static constexpr float kMaxBoundsInflationFromStroke = 4096;
static constexpr int kDoCopiesThreshold = 100;
private:
GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
// fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
// flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
// reside in fPendingPaths, then be moved to fFlushingPaths during preFlush().
PendingPathsMap fPendingPaths;
SkDEBUGCODE(bool fFlushing = false);
};
#endif

View File

@ -1,24 +0,0 @@
/*
* Copyright 2018 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include <memory>
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
bool GrCoverageCountingPathRenderer::IsSupported(const GrRecordingContext*) {
return false;
}
std::unique_ptr<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
const GrRecordingContext*) {
return nullptr;
}
GrFPResult GrCoverageCountingPathRenderer::makeClipProcessor(
std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps) {
return GrFPFailure(nullptr);
}

View File

@ -371,8 +371,8 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
ctxInfo.hasExtension("GL_OES_standard_derivatives");
// Mali and early Adreno both have support for geometry shaders, but they appear to be
// implemented in software. In practice with ccpr, they are slower than the backup impl that
// only uses vertex shaders.
// implemented in software. In practice with ccpr, they were slower than the backup impl
// that only uses vertex shaders.
if (ctxInfo.vendor() != GrGLVendor::kARM &&
ctxInfo.renderer() != GrGLRenderer::kAdreno3xx &&
ctxInfo.renderer() != GrGLRenderer::kAdreno4xx_other) {

View File

@ -280,7 +280,6 @@ protected:
// Counter for generating unique scratch variable names in a shader.
int fTmpVariableCounter;
friend class GrCCCoverageProcessor; // to access code().
friend class GrGLSLProgramBuilder;
friend class GrGLProgramBuilder;
friend class GrD3DPipelineStateBuilder;

View File

@ -401,7 +401,7 @@ void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface*
this->initShaderCaps(properties, features);
if (kQualcomm_VkVendor == properties.vendorID) {
// A "clear" load for the CCPR atlas runs faster on QC than a "discard" load followed by a
// A "clear" load for atlases runs faster on QC than a "discard" load followed by a
// scissored clear.
// On NVIDIA and Intel, the discard load followed by clear is faster.
// TODO: Evaluate on ARM, Imagination, and ATI.

View File

@ -1,309 +0,0 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "include/core/SkTypes.h"
#include "tests/Test.h"
#include "include/core/SkMatrix.h"
#include "include/core/SkPathBuilder.h"
#include "include/core/SkRect.h"
#include "include/gpu/GrDirectContext.h"
#include "include/gpu/GrRecordingContext.h"
#include "include/gpu/mock/GrMockTypes.h"
#include "src/core/SkPathPriv.h"
#include "src/gpu/GrClip.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrDrawingManager.h"
#include "src/gpu/GrPaint.h"
#include "src/gpu/GrPathRenderer.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/geometry/GrStyledShape.h"
#include "tools/ToolUtils.h"
#include <cmath>
static constexpr int kCanvasSize = 100;
class CCPRClip : public GrClip {
public:
CCPRClip(GrCoverageCountingPathRenderer* ccpr, const SkPath& path) : fCCPR(ccpr), fPath(path) {}
private:
SkIRect getConservativeBounds() const final { return fPath.getBounds().roundOut(); }
Effect apply(GrRecordingContext* context, GrSurfaceDrawContext* rtc, GrAAType,
GrAppliedClip* out, SkRect* bounds) const override {
auto [success, fp] = fCCPR->makeClipProcessor(/*inputFP=*/nullptr,
rtc->getOpsTask()->uniqueID(), fPath,
SkIRect::MakeWH(rtc->width(), rtc->height()),
*context->priv().caps());
if (success) {
out->addCoverageFP(std::move(fp));
return Effect::kClipped;
} else {
return Effect::kClippedOut;
}
}
GrCoverageCountingPathRenderer* const fCCPR;
const SkPath fPath;
};
class CCPRPathDrawer {
public:
CCPRPathDrawer(sk_sp<GrDirectContext> dContext, skiatest::Reporter* reporter)
: fDContext(dContext)
, fCCPR(fDContext->priv().drawingManager()->getCoverageCountingPathRenderer())
, fSDC(GrSurfaceDrawContext::Make(
fDContext.get(), GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact,
{kCanvasSize, kCanvasSize}, SkSurfaceProps())) {
if (!fCCPR) {
ERRORF(reporter, "ccpr not enabled in GrDirectContext for ccpr tests");
}
if (!fSDC) {
ERRORF(reporter, "failed to create GrSurfaceDrawContext for ccpr tests");
}
}
GrDirectContext* dContext() const { return fDContext.get(); }
GrCoverageCountingPathRenderer* ccpr() const { return fCCPR; }
bool valid() const { return fCCPR && fSDC; }
void clear() const { fSDC->clear(SK_PMColor4fTRANSPARENT); }
void destroyGrContext() {
SkASSERT(fDContext->unique());
fSDC.reset();
fCCPR = nullptr;
fDContext.reset();
}
void clipFullscreenRect(SkPath clipPath, const SkMatrix& matrix = SkMatrix::I()) const {
SkASSERT(this->valid());
GrPaint paint;
paint.setColor4f({0, 1, 0, 1});
CCPRClip clip(fCCPR, clipPath);
fSDC->drawRect(&clip, std::move(paint), GrAA::kYes, SkMatrix::I(),
SkRect::MakeIWH(kCanvasSize, kCanvasSize));
}
void flush() const {
SkASSERT(this->valid());
fDContext->flushAndSubmit();
}
private:
sk_sp<GrDirectContext> fDContext;
GrCoverageCountingPathRenderer* fCCPR;
std::unique_ptr<GrSurfaceDrawContext> fSDC;
};
class CCPRTest {
public:
void run(skiatest::Reporter* reporter) {
GrMockOptions mockOptions;
mockOptions.fDrawInstancedSupport = true;
mockOptions.fHalfFloatVertexAttributeSupport = true;
mockOptions.fMapBufferFlags = GrCaps::kCanMap_MapFlag;
mockOptions.fConfigOptions[(int)GrColorType::kAlpha_F16].fRenderability =
GrMockOptions::ConfigOptions::Renderability::kNonMSAA;
mockOptions.fConfigOptions[(int)GrColorType::kAlpha_F16].fTexturable = true;
mockOptions.fConfigOptions[(int)GrColorType::kAlpha_8].fRenderability =
GrMockOptions::ConfigOptions::Renderability::kMSAA;
mockOptions.fConfigOptions[(int)GrColorType::kAlpha_8].fTexturable = true;
mockOptions.fGeometryShaderSupport = true;
mockOptions.fIntegerSupport = true;
mockOptions.fFlatInterpolationSupport = true;
GrContextOptions ctxOptions;
ctxOptions.fAllowPathMaskCaching = false;
ctxOptions.fGpuPathRenderers = GpuPathRenderers::kCoverageCounting;
this->customizeOptions(&mockOptions, &ctxOptions);
sk_sp<GrDirectContext> mockContext = GrDirectContext::MakeMock(&mockOptions, ctxOptions);
if (!mockContext) {
ERRORF(reporter, "could not create mock context");
return;
}
if (!mockContext->unique()) {
ERRORF(reporter, "mock context is not unique");
return;
}
CCPRPathDrawer ccpr(std::exchange(mockContext, nullptr), reporter);
if (!ccpr.valid()) {
return;
}
fPath.moveTo(0, 0);
fPath.cubicTo(50, 50, 0, 50, 50, 0);
this->onRun(reporter, ccpr);
}
virtual ~CCPRTest() {}
protected:
virtual void customizeOptions(GrMockOptions*, GrContextOptions*) {}
virtual void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) = 0;
SkPath fPath;
};
#define DEF_CCPR_TEST(name) \
DEF_GPUTEST(name, reporter, /* options */) { \
name test; \
test.run(reporter); \
}
class CCPR_cleanup : public CCPRTest {
protected:
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
// Ensure paths get unreffed when we delete the context without flushing.
for (int i = 0; i < 10; ++i) {
ccpr.clipFullscreenRect(fPath);
ccpr.clipFullscreenRect(fPath);
}
REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath));
ccpr.destroyGrContext();
REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
}
};
DEF_CCPR_TEST(CCPR_cleanup)
class CCPR_cleanupWithTexAllocFail : public CCPR_cleanup {
void customizeOptions(GrMockOptions* mockOptions, GrContextOptions*) override {
mockOptions->fFailTextureAllocations = true;
}
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
((GrRecordingContext*)ccpr.dContext())->priv().incrSuppressWarningMessages();
this->CCPR_cleanup::onRun(reporter, ccpr);
}
};
DEF_CCPR_TEST(CCPR_cleanupWithTexAllocFail)
class CCPR_parseEmptyPath : public CCPRTest {
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
// Make a path large enough that ccpr chooses to crop it by the RT bounds, and ends up with
// an empty path.
SkPath largeOutsidePath = SkPath::Polygon({
{-1e30f, -1e30f},
{-1e30f, +1e30f},
{-1e10f, +1e30f},
}, false);
ccpr.clipFullscreenRect(largeOutsidePath);
// Normally an empty path is culled before reaching ccpr, however we use a back door for
// testing so this path will make it.
SkPath emptyPath;
SkASSERT(emptyPath.isEmpty());
ccpr.clipFullscreenRect(emptyPath);
// This is the test. It will exercise various internal asserts and verify we do not crash.
ccpr.flush();
// Now try again with clips.
ccpr.clipFullscreenRect(largeOutsidePath);
ccpr.clipFullscreenRect(emptyPath);
ccpr.flush();
// ... and both.
ccpr.clipFullscreenRect(largeOutsidePath);
ccpr.clipFullscreenRect(largeOutsidePath);
ccpr.clipFullscreenRect(emptyPath);
ccpr.clipFullscreenRect(emptyPath);
ccpr.flush();
}
};
DEF_CCPR_TEST(CCPR_parseEmptyPath)
class CCPRRenderingTest {
public:
void run(skiatest::Reporter* reporter, GrDirectContext* dContext) const {
if (dContext->priv().drawingManager()->getCoverageCountingPathRenderer()) {
CCPRPathDrawer drawer(sk_ref_sp(dContext), reporter);
if (!drawer.valid()) {
return;
}
this->onRun(reporter, drawer);
}
}
virtual ~CCPRRenderingTest() {}
protected:
virtual void onRun(skiatest::Reporter* reporter, const CCPRPathDrawer& ccpr) const = 0;
};
#define DEF_CCPR_RENDERING_TEST(name) \
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(name, reporter, ctxInfo) { \
name test; \
test.run(reporter, ctxInfo.directContext()); \
}
class CCPR_busyPath : public CCPRRenderingTest {
void onRun(skiatest::Reporter* reporter, const CCPRPathDrawer& ccpr) const override {
static constexpr int kNumBusyVerbs = 1 << 17;
ccpr.clear();
SkPathBuilder busyPath;
busyPath.moveTo(0, 0); // top left
busyPath.lineTo(kCanvasSize, kCanvasSize); // bottom right
for (int i = 2; i < kNumBusyVerbs; ++i) {
float offset = i * ((float)kCanvasSize / kNumBusyVerbs);
busyPath.lineTo(kCanvasSize - offset, kCanvasSize + offset); // offscreen
}
ccpr.clipFullscreenRect(busyPath.detach());
ccpr.flush(); // If this doesn't crash, the test passed.
// If it does, maybe fiddle with fMaxInstancesPerDrawArraysWithoutCrashing in
// your platform's GrGLCaps.
}
};
DEF_CCPR_RENDERING_TEST(CCPR_busyPath)
// https://bugs.chromium.org/p/chromium/issues/detail?id=1102117
class CCPR_evictCacheEntryForPendingDrawOp : public CCPRRenderingTest {
void onRun(skiatest::Reporter* reporter, const CCPRPathDrawer& ccpr) const override {
static constexpr SkRect kRect = SkRect::MakeWH(50, 50);
ccpr.clear();
// make sure path is cached.
for (int i = 0; i < 2; i++) {
SkPath path;
path.addRect(kRect);
ccpr.clipFullscreenRect(path);
ccpr.flush();
}
// make enough cached draws to make DoCopies happen.
for (int i = 0; i <= GrCoverageCountingPathRenderer::kDoCopiesThreshold; i++) {
SkPath path;
path.addRect(kRect);
ccpr.clipFullscreenRect(path);
}
// now draw the path in an incompatible matrix. Previous draw's cached atlas should
// not be invalidated. otherwise, this flush would render more paths than allocated for.
auto m = SkMatrix::Translate(0.1f, 0.1f);
SkPath path;
path.addRect(kRect);
ccpr.clipFullscreenRect(path, m);
ccpr.flush();
// if this test does not crash, it is passed.
}
};
DEF_CCPR_RENDERING_TEST(CCPR_evictCacheEntryForPendingDrawOp)

View File

@ -30,8 +30,8 @@ static DEFINE_bool(alwaysHwTess, false,
static DEFINE_string(pr, "",
"Set of enabled gpu path renderers. Defined as a list of: "
"[~]none [~]dashline [~]ccpr [~]aahairline [~]aaconvex [~]aalinearizing "
"[~]small [~]tri [~]tess [~]all");
"[~]none [~]dashline [~]aahairline [~]aaconvex [~]aalinearizing [~]small [~]tri "
"[~]tess [~]all");
static DEFINE_int(internalSamples, 4, "Number of samples for internal draws that use MSAA.");
@ -52,8 +52,6 @@ static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
return GpuPathRenderers::kNone;
} else if (!strcmp(name, "dashline")) {
return GpuPathRenderers::kDashLine;
} else if (!strcmp(name, "ccpr")) {
return GpuPathRenderers::kCoverageCounting;
} else if (!strcmp(name, "aahairline")) {
return GpuPathRenderers::kAAHairline;
} else if (!strcmp(name, "aaconvex")) {

View File

@ -25,7 +25,6 @@
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/SkGr.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/text/GrStrikeCache.h"
#include "src/gpu/text/GrTextBlobCache.h"
#include "src/image/SkImage_Gpu.h"

View File

@ -204,21 +204,6 @@ array of the test names and what they drew.
// keys invalid
'GrPathKeys',
// flaky crash.
// crash seems more likely the faster you hit the "Start Tests" button.
'CCPR_cache_animationAtlasReuse',
'CCPR_cache_deferredCleanup',
'CCPR_cache_hashTable',
'CCPR_cache_mostlyVisible',
'CCPR_cache_multiFlush',
'CCPR_cache_multiTileCache',
'CCPR_cache_partialInvalidate',
'CCPR_cache_recycleEntries',
'CCPR_cleanup',
'CCPR_cleanupWithTexAllocFail',
'CCPR_busyPath',
'CCPR_evictCacheEntryForPendingDrawOp',
// Creates only 35 of 36 expected fragment processor factories
'ProcessorCloneTest',
'ProcessorOptimizationValidationTest',

View File

@ -31,7 +31,6 @@
#include "src/gpu/GrGpu.h"
#include "src/gpu/GrPersistentCacheUtils.h"
#include "src/gpu/GrShaderUtils.h"
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/tessellate/GrTessellationPathRenderer.h"
#include "src/image/SkImage_Base.h"
#include "src/sksl/SkSLCompiler.h"
@ -343,7 +342,6 @@ Viewer::Viewer(int argc, char** argv, void* platformData)
gPathRendererNames[GpuPathRenderers::kDefault] = "Default Path Renderers";
gPathRendererNames[GpuPathRenderers::kTessellation] = "Tessellation";
gPathRendererNames[GpuPathRenderers::kSmall] = "Small paths (cached sdf or alpha masks)";
gPathRendererNames[GpuPathRenderers::kCoverageCounting] = "CCPR";
gPathRendererNames[GpuPathRenderers::kTriangulating] = "Triangulating";
gPathRendererNames[GpuPathRenderers::kNone] = "Software masks";
@ -1938,9 +1936,6 @@ void Viewer::drawImGui() {
}
}
if (1 == fWindow->sampleCount()) {
if (GrCoverageCountingPathRenderer::IsSupported(ctx)) {
prButton(GpuPathRenderers::kCoverageCounting);
}
prButton(GpuPathRenderers::kSmall);
}
prButton(GpuPathRenderers::kTriangulating);
@ -2772,10 +2767,6 @@ void Viewer::updateUIState() {
}
}
if (1 == fWindow->sampleCount()) {
if(GrCoverageCountingPathRenderer::IsSupported(ctx)) {
writer.appendString(
gPathRendererNames[GpuPathRenderers::kCoverageCounting].c_str());
}
writer.appendString(gPathRendererNames[GpuPathRenderers::kSmall].c_str());
}
writer.appendString(gPathRendererNames[GpuPathRenderers::kTriangulating].c_str());