diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp index fe44231d4f..b651700886 100644 --- a/src/gpu/GrDrawingManager.cpp +++ b/src/gpu/GrDrawingManager.cpp @@ -578,7 +578,7 @@ void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) { fDAG.swap(&ddl->fRenderTasks); for (auto renderTask : ddl->fRenderTasks) { - renderTask->prePrepare(); + renderTask->prePrepare(fContext); } if (fPathRendererChain) { diff --git a/src/gpu/GrOpsTask.cpp b/src/gpu/GrOpsTask.cpp index 229dd9c8f4..4d4236b2d9 100644 --- a/src/gpu/GrOpsTask.cpp +++ b/src/gpu/GrOpsTask.cpp @@ -391,7 +391,7 @@ void GrOpsTask::endFlush() { fAuditTrail = nullptr; } -void GrOpsTask::onPrePrepare() { +void GrOpsTask::onPrePrepare(GrRecordingContext* context) { SkASSERT(this->isClosed()); #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK TRACE_EVENT0("skia.gpu", TRACE_FUNC); @@ -406,7 +406,7 @@ void GrOpsTask::onPrePrepare() { for (const auto& chain : fOpChains) { if (chain.shouldExecute()) { - chain.head()->prePrepare(); + chain.head()->prePrepare(context); } } } diff --git a/src/gpu/GrOpsTask.h b/src/gpu/GrOpsTask.h index 9531ab98c8..c2238120b6 100644 --- a/src/gpu/GrOpsTask.h +++ b/src/gpu/GrOpsTask.h @@ -50,7 +50,7 @@ public: */ void endFlush() override; - void onPrePrepare() override; + void onPrePrepare(GrRecordingContext*) override; /** * Together these two functions flush all queued up draws to GrCommandBuffer. The return value * of executeOps() indicates whether any commands were actually issued to the GPU. diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h index 22bd25eb8a..3fb3a18bab 100644 --- a/src/gpu/GrRenderTask.h +++ b/src/gpu/GrRenderTask.h @@ -29,7 +29,7 @@ public: void makeClosed(const GrCaps&); - void prePrepare() { this->onPrePrepare(); } + void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); } // These two methods are only invoked at flush time void prepare(GrOpFlushState* flushState); @@ -185,7 +185,8 @@ private: } }; - virtual void onPrePrepare() {} // Only the GrOpsTask currently overrides this virtual + // Only the GrOpsTask currently overrides this virtual + virtual void onPrePrepare(GrRecordingContext*) {} virtual void onPrepare(GrOpFlushState*) {} // Only the GrOpsTask overrides this virtual virtual bool onExecute(GrOpFlushState* flushState) = 0; diff --git a/src/gpu/ops/GrMeshDrawOp.h b/src/gpu/ops/GrMeshDrawOp.h index b956a4be58..6bdb63768a 100644 --- a/src/gpu/ops/GrMeshDrawOp.h +++ b/src/gpu/ops/GrMeshDrawOp.h @@ -72,10 +72,12 @@ protected: }; private: - void onPrePrepare() final { this->onPrePrepareDraws(); } + void onPrePrepare(GrRecordingContext* context) final { this->onPrePrepareDraws(context); } void onPrepare(GrOpFlushState* state) final; - virtual void onPrePrepareDraws() {} // Only the GrTextureOp currently overrides this virtual + // Only the GrTextureOp currently overrides this virtual + virtual void onPrePrepareDraws(GrRecordingContext*) {} + virtual void onPrepareDraws(Target*) = 0; typedef GrDrawOp INHERITED; }; diff --git a/src/gpu/ops/GrOp.h b/src/gpu/ops/GrOp.h index f42b5b5045..ac738f9af5 100644 --- a/src/gpu/ops/GrOp.h +++ b/src/gpu/ops/GrOp.h @@ -158,7 +158,7 @@ public: * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called * ahead of time and when it has not been called). */ - void prePrepare() { this->onPrePrepare(); } + void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); } /** * Called prior to executing. The op should perform any resource creation or data transfers @@ -289,7 +289,8 @@ private: return CombineResult::kCannotCombine; } - virtual void onPrePrepare() {} // Only GrMeshDrawOp currently overrides this virtual + // Only GrMeshDrawOp currently overrides this virtual + virtual void onPrePrepare(GrRecordingContext*) {} virtual void onPrepare(GrOpFlushState*) = 0; // If this op is chained then chainBounds is the union of the bounds of all ops in the chain. // Otherwise, this op's bounds. diff --git a/src/gpu/ops/GrQuadPerEdgeAA.cpp b/src/gpu/ops/GrQuadPerEdgeAA.cpp index ad060dd59f..27bd00eec0 100644 --- a/src/gpu/ops/GrQuadPerEdgeAA.cpp +++ b/src/gpu/ops/GrQuadPerEdgeAA.cpp @@ -477,32 +477,10 @@ static V4f compute_nested_persp_quad_vertices(const GrQuadAAFlags aaFlags, Verti return coverage; } -enum class CoverageMode { - kNone, - kWithPosition, - kWithColor -}; - -static CoverageMode get_mode_for_spec(const GrQuadPerEdgeAA::VertexSpec& spec) { - if (spec.usesCoverageAA()) { - if (spec.compatibleWithCoverageAsAlpha() && spec.hasVertexColors() && - !spec.requiresGeometryDomain()) { - // Using a geometric domain acts as a second source of coverage and folding the original - // coverage into color makes it impossible to apply the color's alpha to the geometric - // domain's coverage when the original shape is clipped. - return CoverageMode::kWithColor; - } else { - return CoverageMode::kWithPosition; - } - } else { - return CoverageMode::kNone; - } -} - // Writes four vertices in triangle strip order, including the additional data for local // coordinates, geometry + texture domains, color, and coverage as needed to satisfy the vertex spec static void write_quad(GrVertexWriter* vb, const GrQuadPerEdgeAA::VertexSpec& spec, - CoverageMode mode, const V4f& coverage, SkPMColor4f color4f, + GrQuadPerEdgeAA::CoverageMode mode, const V4f& coverage, SkPMColor4f color4f, const SkRect& geomDomain, const SkRect& texDomain, const Vertices& quad) { static constexpr auto If = GrVertexWriter::If; @@ -511,13 +489,14 @@ static void write_quad(GrVertexWriter* vb, const GrQuadPerEdgeAA::VertexSpec& sp // perspective and coverage mode. vb->write(quad.fX[i], quad.fY[i], If(spec.deviceQuadType() == GrQuad::Type::kPerspective, quad.fW[i]), - If(mode == CoverageMode::kWithPosition, coverage[i])); + If(mode == GrQuadPerEdgeAA::CoverageMode::kWithPosition, coverage[i])); // save color if (spec.hasVertexColors()) { bool wide = spec.colorType() == GrQuadPerEdgeAA::ColorType::kHalf; vb->write(GrVertexColor( - color4f * (mode == CoverageMode::kWithColor ? coverage[i] : 1.f), wide)); + color4f * (mode == GrQuadPerEdgeAA::CoverageMode::kWithColor ? coverage[i] : 1.f), + wide)); } // save local position @@ -584,7 +563,7 @@ void* Tessellate(void* vertices, const VertexSpec& spec, const GrQuad& deviceQua SkASSERT(deviceQuad.quadType() <= spec.deviceQuadType()); SkASSERT(!spec.hasLocalCoords() || localQuad.quadType() <= spec.localQuadType()); - CoverageMode mode = get_mode_for_spec(spec); + GrQuadPerEdgeAA::CoverageMode mode = spec.coverageMode(); // Load position data into V4fs (always x, y, and load w to avoid branching down the road) Vertices outer; @@ -685,6 +664,63 @@ int VertexSpec::localDimensionality() const { return fHasLocalCoords ? (this->localQuadType() == GrQuad::Type::kPerspective ? 3 : 2) : 0; } +CoverageMode VertexSpec::coverageMode() const { + if (this->usesCoverageAA()) { + if (this->compatibleWithCoverageAsAlpha() && this->hasVertexColors() && + !this->requiresGeometryDomain()) { + // Using a geometric domain acts as a second source of coverage and folding + // the original coverage into color makes it impossible to apply the color's + // alpha to the geometric domain's coverage when the original shape is clipped. + return CoverageMode::kWithColor; + } else { + return CoverageMode::kWithPosition; + } + } else { + return CoverageMode::kNone; + } +} + +// This needs to stay in sync w/ QuadPerEdgeAAGeometryProcessor::initializeAttrs +size_t VertexSpec::vertexSize() const { + bool needsPerspective = (this->deviceDimensionality() == 3); + CoverageMode coverageMode = this->coverageMode(); + + size_t count = 0; + + if (coverageMode == CoverageMode::kWithPosition) { + if (needsPerspective) { + count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType); + } else { + count += GrVertexAttribTypeSize(kFloat2_GrVertexAttribType) + + GrVertexAttribTypeSize(kFloat_GrVertexAttribType); + } + } else { + if (needsPerspective) { + count += GrVertexAttribTypeSize(kFloat3_GrVertexAttribType); + } else { + count += GrVertexAttribTypeSize(kFloat2_GrVertexAttribType); + } + } + + if (this->requiresGeometryDomain()) { + count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType); + } + + count += this->localDimensionality() * GrVertexAttribTypeSize(kFloat_GrVertexAttribType); + + if (ColorType::kByte == this->colorType()) { + count += GrVertexAttribTypeSize(kUByte4_norm_GrVertexAttribType); + } else if (ColorType::kHalf == this->colorType()) { + count += GrVertexAttribTypeSize(kHalf4_GrVertexAttribType); + } + + if (this->hasDomain()) { + count += GrVertexAttribTypeSize(kFloat4_GrVertexAttribType); + } + + return count; +} + ////////////////// Geometry Processor Implementation class QuadPerEdgeAAGeometryProcessor : public GrGeometryProcessor { @@ -918,9 +954,10 @@ private: this->setTextureSamplerCnt(1); } + // This needs to stay in sync w/ VertexSpec::vertexSize void initializeAttrs(const VertexSpec& spec) { fNeedsPerspective = spec.deviceDimensionality() == 3; - fCoverageMode = get_mode_for_spec(spec); + fCoverageMode = spec.coverageMode(); if (fCoverageMode == CoverageMode::kWithPosition) { if (fNeedsPerspective) { diff --git a/src/gpu/ops/GrQuadPerEdgeAA.h b/src/gpu/ops/GrQuadPerEdgeAA.h index 3558218c52..06bb315efc 100644 --- a/src/gpu/ops/GrQuadPerEdgeAA.h +++ b/src/gpu/ops/GrQuadPerEdgeAA.h @@ -25,6 +25,7 @@ class GrShaderCaps; namespace GrQuadPerEdgeAA { using Saturate = GrTextureOp::Saturate; + enum class CoverageMode { kNone, kWithPosition, kWithColor }; enum class Domain : bool { kNo = false, kYes = true }; enum class ColorType { kNone, kByte, kHalf, kLast = kHalf }; static const int kColorTypeCount = static_cast(ColorType::kLast) + 1; @@ -65,6 +66,10 @@ namespace GrQuadPerEdgeAA { int localDimensionality() const; int verticesPerQuad() const { return fUsesCoverageAA ? 8 : 4; } + + CoverageMode coverageMode() const; + size_t vertexSize() const; + private: static_assert(GrQuad::kTypeCount <= 4, "GrQuad::Type doesn't fit in 2 bits"); static_assert(kColorTypeCount <= 4, "Color doesn't fit in 2 bits"); diff --git a/src/gpu/ops/GrTextureOp.cpp b/src/gpu/ops/GrTextureOp.cpp index 880a751aeb..bd4673e26a 100644 --- a/src/gpu/ops/GrTextureOp.cpp +++ b/src/gpu/ops/GrTextureOp.cpp @@ -427,24 +427,50 @@ private: } } - void onPrePrepareDraws() override { + void onPrePrepareDraws(GrRecordingContext* context) override { SkASSERT(!fPrePrepared); // Pull forward the tessellation of the quads to here + + //GrOpMemoryPool* pool = context->priv().opMemoryPool(); + fPrePrepared = true; } - // onPrePrepareDraws may or may not have been called at this point - void onPrepareDraws(Target* target) override { - TRACE_EVENT0("skia.gpu", TRACE_FUNC); - GrQuad::Type quadType = GrQuad::Type::kAxisAligned; - GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned; - Domain domain = Domain::kNo; - ColorType colorType = ColorType::kNone; - int numProxies = 0; - int numTotalQuads = 0; +#ifdef SK_DEBUG + void validate() const override { auto textureType = fProxies[0].fProxy->textureType(); const GrSwizzle& swizzle = fProxies[0].fProxy->textureSwizzle(); GrAAType aaType = this->aaType(); + + for (const auto& op : ChainRange(this)) { + for (unsigned p = 0; p < op.fProxyCnt; ++p) { + auto* proxy = op.fProxies[p].fProxy; + SkASSERT(proxy); + SkASSERT(proxy->textureType() == textureType); + SkASSERT(proxy->textureSwizzle() == swizzle); + } + + // Each individual op must be a single aaType. kCoverage and kNone ops can chain + // together but kMSAA ones do not. + if (aaType == GrAAType::kCoverage || aaType == GrAAType::kNone) { + SkASSERT(op.aaType() == GrAAType::kCoverage || op.aaType() == GrAAType::kNone); + } else { + SkASSERT(aaType == GrAAType::kMSAA && op.aaType() == GrAAType::kMSAA); + } + } + } +#endif + + VertexSpec characterize(int* numProxies, int* numTotalQuads) const { + GrQuad::Type quadType = GrQuad::Type::kAxisAligned; + ColorType colorType = ColorType::kNone; + GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned; + Domain domain = Domain::kNo; + GrAAType overallAAType = this->aaType(); + + *numProxies = 0; + *numTotalQuads = 0; + for (const auto& op : ChainRange(this)) { if (op.fQuads.deviceQuadType() > quadType) { quadType = op.fQuads.deviceQuadType(); @@ -456,35 +482,28 @@ private: domain = Domain::kYes; } colorType = SkTMax(colorType, static_cast(op.fColorType)); - numProxies += op.fProxyCnt; + *numProxies += op.fProxyCnt; for (unsigned p = 0; p < op.fProxyCnt; ++p) { - numTotalQuads += op.fProxies[p].fQuadCnt; - auto* proxy = op.fProxies[p].fProxy; - if (!proxy->isInstantiated()) { - return; - } - SkASSERT(proxy->textureType() == textureType); - SkASSERT(proxy->textureSwizzle() == swizzle); + *numTotalQuads += op.fProxies[p].fQuadCnt; } if (op.aaType() == GrAAType::kCoverage) { - SkASSERT(aaType == GrAAType::kCoverage || aaType == GrAAType::kNone); - aaType = GrAAType::kCoverage; + overallAAType = GrAAType::kCoverage; } } - VertexSpec vertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true, domain, aaType, - /* alpha as coverage */ true); + return VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true, domain, + overallAAType, /* alpha as coverage */ true); + } - GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp, - this->filter()); - GrGpu* gpu = target->resourceProvider()->priv().gpu(); - uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram( - samplerState, fProxies[0].fProxy->backendFormat()); + // onPrePrepareDraws may or may not have been called at this point + void onPrepareDraws(Target* target) override { + TRACE_EVENT0("skia.gpu", TRACE_FUNC); - auto saturate = static_cast(fSaturate); - sk_sp gp = GrQuadPerEdgeAA::MakeTexturedProcessor( - vertexSpec, *target->caps().shaderCaps(), textureType, samplerState, swizzle, - extraSamplerKey, std::move(fTextureColorSpaceXform), saturate); + SkDEBUGCODE(this->validate();) + + int numProxies, numTotalQuads; + + const VertexSpec vertexSpec = this->characterize(&numProxies, &numTotalQuads); // We'll use a dynamic state array for the GP textures when there are multiple ops. // Otherwise, we use fixed dynamic state to specify the single op's proxy. @@ -498,7 +517,7 @@ private: fixedDynamicState->fPrimitiveProcessorTextures[0] = fProxies[0].fProxy; } - size_t vertexSize = gp->vertexStride(); + size_t vertexSize = vertexSpec.vertexSize(); GrMesh* meshes = target->allocMeshes(numProxies); sk_sp vbuffer; @@ -549,6 +568,29 @@ private: } SkASSERT(!numQuadVerticesLeft); SkASSERT(!numAllocatedVertices); + + sk_sp gp; + + { + auto textureType = fProxies[0].fProxy->textureType(); + const GrSwizzle& swizzle = fProxies[0].fProxy->textureSwizzle(); + + GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp, + this->filter()); + + auto saturate = static_cast(fSaturate); + + GrGpu* gpu = target->resourceProvider()->priv().gpu(); + uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram( + samplerState, fProxies[0].fProxy->backendFormat()); + + gp = GrQuadPerEdgeAA::MakeTexturedProcessor( + vertexSpec, *target->caps().shaderCaps(), textureType, samplerState, swizzle, + extraSamplerKey, std::move(fTextureColorSpaceXform), saturate); + + SkASSERT(vertexSize == gp->vertexStride()); + } + target->recordDraw( std::move(gp), meshes, numProxies, fixedDynamicState, dynamicStateArrays); }