Extract a GrVertexChunkArray class with a builder
This will be used by the new stroke tessellator. All the other tessellators should start chopping and chunking too. That will allow us to quit cropping paths if we are afraid they might need more segments than are supported. Bug: chromium:1172543 Change-Id: I30f0ebb581f56cac099d8c05e0e181c4657c3db8 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/390096 Commit-Queue: Chris Dalton <csmartdalton@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
7724291b6e
commit
8ed7a8d1c6
@ -195,9 +195,10 @@ DEF_PATH_TESS_BENCH(wangs_formula_conic_log2, make_conic_path(), SkMatrix::I())
|
||||
DEF_PATH_TESS_BENCH(middle_out_triangulation,
|
||||
ToolUtils::make_star(SkRect::MakeWH(500, 500), kNumCubicsInChalkboard),
|
||||
SkMatrix::I()) {
|
||||
sk_sp<const GrBuffer> buffer;
|
||||
int baseVertex;
|
||||
auto vertexData = static_cast<SkPoint*>(fTarget->makeVertexSpace(
|
||||
sizeof(SkPoint), kNumCubicsInChalkboard, nullptr, &baseVertex));
|
||||
sizeof(SkPoint), kNumCubicsInChalkboard, &buffer, &baseVertex));
|
||||
GrMiddleOutPolygonTriangulator::WritePathInnerFan(vertexData, 3, fPath);
|
||||
}
|
||||
|
||||
@ -303,8 +304,8 @@ private:
|
||||
SkMatrix matrix = SkMatrix::Scale(fMatrixScale, fMatrixScale);
|
||||
for (int i = 0; i < loops; ++i) {
|
||||
GrStrokeHardwareTessellator tessellator(fShaderFlags, fPathStrokes.data(),
|
||||
fTotalVerbCount, *fTarget->caps().shaderCaps());
|
||||
tessellator.prepare(fTarget.get(), matrix);
|
||||
*fTarget->caps().shaderCaps());
|
||||
tessellator.prepare(fTarget.get(), matrix, fTotalVerbCount);
|
||||
fTarget->resetAllocator();
|
||||
}
|
||||
}
|
||||
@ -364,7 +365,7 @@ private:
|
||||
GrStrokeIndirectTessellator tessellator(ShaderFlags::kNone, SkMatrix::I(),
|
||||
&pathStroke, path.countVerbs(),
|
||||
fTarget->allocator());
|
||||
tessellator.prepare(fTarget.get(), SkMatrix::I());
|
||||
tessellator.prepare(fTarget.get(), SkMatrix::I(), path.countVerbs());
|
||||
}
|
||||
fTarget->resetAllocator();
|
||||
}
|
||||
|
@ -271,6 +271,7 @@ skia_gpu_sources = [
|
||||
"$_src/gpu/GrUserStencilSettings.h",
|
||||
"$_src/gpu/GrUtil.cpp",
|
||||
"$_src/gpu/GrUtil.h",
|
||||
"$_src/gpu/GrVertexChunkArray.h",
|
||||
"$_src/gpu/GrVertexWriter.h",
|
||||
"$_src/gpu/GrVx.h",
|
||||
"$_src/gpu/GrWaitRenderTask.cpp",
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
|
||||
bool operator==(const GrDrawIndexedIndirectWriter& that) { return fData == that.fData; }
|
||||
|
||||
bool isValid() const { return fData != nullptr; }
|
||||
operator bool() const { return fData != nullptr; }
|
||||
|
||||
GrDrawIndexedIndirectWriter makeOffset(int drawCount) const { return {fData + drawCount}; }
|
||||
|
||||
|
102
src/gpu/GrVertexChunkArray.h
Normal file
102
src/gpu/GrVertexChunkArray.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright 2021 Google LLC.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrVertexChunkArray_DEFINED
|
||||
#define GrVertexChunkArray_DEFINED
|
||||
|
||||
#include "include/private/SkNoncopyable.h"
|
||||
#include "include/private/SkTArray.h"
|
||||
#include "src/gpu/GrBuffer.h"
|
||||
#include "src/gpu/GrVertexWriter.h"
|
||||
#include "src/gpu/ops/GrMeshDrawOp.h"
|
||||
|
||||
// Represents a chunk of vertex data. Use with GrVertexChunkArray and GrVertexChunkBuilder. We write
|
||||
// the data out in chunks when we don't start out knowing exactly how many vertices (or instances)
|
||||
// we will end up writing.
|
||||
struct GrVertexChunk {
|
||||
sk_sp<const GrBuffer> fBuffer;
|
||||
int fVertexCount = 0;
|
||||
int fBaseVertex;
|
||||
};
|
||||
|
||||
// Represents an array of GrVertexChunks.
|
||||
//
|
||||
// We only preallocate 1 chunk because if the array needs to grow, then we're also allocating a
|
||||
// brand new GPU buffer anyway.
|
||||
using GrVertexChunkArray = SkSTArray<1, GrVertexChunk>;
|
||||
|
||||
// Builds a GrVertexChunkArray. The provided Target must not be used externally throughout the
|
||||
// entire lifetime of this object.
|
||||
class GrVertexChunkBuilder : SkNoncopyable {
|
||||
public:
|
||||
GrVertexChunkBuilder(GrMeshDrawOp::Target* target, GrVertexChunkArray* chunks, size_t stride,
|
||||
int minVerticesPerChunk)
|
||||
: fTarget(target)
|
||||
, fChunks(chunks)
|
||||
, fStride(stride)
|
||||
, fMinVerticesPerChunk(minVerticesPerChunk) {
|
||||
SkASSERT(fMinVerticesPerChunk > 0);
|
||||
}
|
||||
|
||||
~GrVertexChunkBuilder() {
|
||||
if (!fChunks->empty()) {
|
||||
fTarget->putBackVertices(fCurrChunkVertexCapacity - fCurrChunkVertexCount, fStride);
|
||||
fChunks->back().fVertexCount = fCurrChunkVertexCount;
|
||||
}
|
||||
}
|
||||
|
||||
// Appends 'count' contiguous vertices. These vertices are not guaranteed to be contiguous with
|
||||
// previous or future calls to appendVertices.
|
||||
SK_ALWAYS_INLINE GrVertexWriter appendVertices(int count) {
|
||||
SkASSERT(count > 0);
|
||||
if (fCurrChunkVertexCount + count > fCurrChunkVertexCapacity && !this->allocChunk(count)) {
|
||||
return {nullptr};
|
||||
}
|
||||
SkASSERT(fCurrChunkVertexCount + count <= fCurrChunkVertexCapacity);
|
||||
fCurrChunkVertexCount += count;
|
||||
return std::exchange(fCurrChunkVertexWriter,
|
||||
fCurrChunkVertexWriter.makeOffset(fStride * count));
|
||||
}
|
||||
|
||||
SK_ALWAYS_INLINE GrVertexWriter appendVertex() { return this->appendVertices(1); }
|
||||
|
||||
private:
|
||||
bool allocChunk(int minCount) {
|
||||
if (!fChunks->empty()) {
|
||||
// No need to put back vertices; the buffer is full.
|
||||
fChunks->back().fVertexCount = fCurrChunkVertexCount;
|
||||
}
|
||||
fCurrChunkVertexCount = 0;
|
||||
GrVertexChunk* chunk = &fChunks->push_back();
|
||||
fCurrChunkVertexWriter = {fTarget->makeVertexSpaceAtLeast(fStride,
|
||||
fMinVerticesPerChunk * minCount,
|
||||
fMinVerticesPerChunk * minCount,
|
||||
&chunk->fBuffer,
|
||||
&chunk->fBaseVertex,
|
||||
&fCurrChunkVertexCapacity)};
|
||||
if (!fCurrChunkVertexWriter || !chunk->fBuffer || fCurrChunkVertexCapacity < minCount) {
|
||||
SkDebugf("WARNING: Failed to allocate vertex buffer for GrVertexChunk.\n");
|
||||
fChunks->pop_back();
|
||||
SkASSERT(fCurrChunkVertexCount == 0);
|
||||
fCurrChunkVertexCapacity = 0;
|
||||
return false;
|
||||
}
|
||||
fMinVerticesPerChunk *= 2;
|
||||
return true;
|
||||
}
|
||||
|
||||
GrMeshDrawOp::Target* const fTarget;
|
||||
GrVertexChunkArray* const fChunks;
|
||||
const size_t fStride;
|
||||
size_t fMinVerticesPerChunk;
|
||||
|
||||
GrVertexWriter fCurrChunkVertexWriter;
|
||||
int fCurrChunkVertexCount = 0;
|
||||
int fCurrChunkVertexCapacity = 0;
|
||||
};
|
||||
|
||||
#endif
|
@ -37,11 +37,8 @@ struct GrVertexWriter {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const GrVertexWriter& that) {
|
||||
return fPtr == that.fPtr;
|
||||
}
|
||||
|
||||
bool isValid() const { return fPtr != nullptr; }
|
||||
bool operator==(const GrVertexWriter& that) const { return fPtr == that.fPtr; }
|
||||
operator bool() const { return fPtr != nullptr; }
|
||||
|
||||
GrVertexWriter makeOffset(size_t offsetInBytes) const {
|
||||
return {SkTAddOffset<void>(fPtr, offsetInBytes)};
|
||||
|
@ -10,13 +10,20 @@
|
||||
|
||||
#include "include/gpu/GrDirectContext.h"
|
||||
#include "src/gpu/GrDirectContextPriv.h"
|
||||
#include "src/gpu/GrGpu.h"
|
||||
#include "src/gpu/ops/GrMeshDrawOp.h"
|
||||
|
||||
// This is a mock GrMeshDrawOp::Target implementation that just gives back pointers into
|
||||
// pre-allocated CPU buffers, rather than allocating and mapping GPU buffers.
|
||||
class GrMockOpTarget : public GrMeshDrawOp::Target {
|
||||
public:
|
||||
GrMockOpTarget(sk_sp<GrDirectContext> mockContext) : fMockContext(std::move(mockContext)) {}
|
||||
GrMockOpTarget(sk_sp<GrDirectContext> mockContext) : fMockContext(std::move(mockContext)) {
|
||||
fStaticVertexBuffer = fMockContext->priv().getGpu()->createBuffer(
|
||||
sizeof(fStaticVertexData), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern);
|
||||
fStaticIndirectBuffer = fMockContext->priv().getGpu()->createBuffer(
|
||||
sizeof(fStaticIndirectData), GrGpuBufferType::kDrawIndirect,
|
||||
kDynamic_GrAccessPattern);
|
||||
}
|
||||
const GrDirectContext* mockContext() const { return fMockContext.get(); }
|
||||
const GrCaps& caps() const override { return *fMockContext->priv().caps(); }
|
||||
GrThreadSafeCache* threadSafeCache() const override {
|
||||
@ -34,23 +41,25 @@ public:
|
||||
GrXferBarrierFlags renderPassBarriers() const override { return GrXferBarrierFlags::kNone; }
|
||||
GrLoadOp colorLoadOp() const override { return GrLoadOp::kLoad; }
|
||||
|
||||
void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
|
||||
void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>* buffer,
|
||||
int* startVertex) override {
|
||||
if (vertexSize * vertexCount > sizeof(fStaticVertexData)) {
|
||||
SK_ABORT("FATAL: wanted %zu bytes of static vertex data; only have %zu.\n",
|
||||
vertexSize * vertexCount, sizeof(fStaticVertexData));
|
||||
}
|
||||
*buffer = fStaticVertexBuffer;
|
||||
*startVertex = 0;
|
||||
return fStaticVertexData;
|
||||
}
|
||||
|
||||
void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
|
||||
sk_sp<const GrBuffer>*, int* startVertex,
|
||||
sk_sp<const GrBuffer>* buffer, int* startVertex,
|
||||
int* actualVertexCount) override {
|
||||
if (vertexSize * minVertexCount > sizeof(fStaticVertexData)) {
|
||||
SK_ABORT("FATAL: wanted %zu bytes of static vertex data; only have %zu.\n",
|
||||
vertexSize * minVertexCount, sizeof(fStaticVertexData));
|
||||
}
|
||||
*buffer = fStaticVertexBuffer;
|
||||
*startVertex = 0;
|
||||
*actualVertexCount = sizeof(fStaticVertexData) / vertexSize;
|
||||
return fStaticVertexData;
|
||||
@ -62,6 +71,7 @@ public:
|
||||
SK_ABORT("FATAL: wanted %zu bytes of static indirect data; only have %zu.\n",
|
||||
sizeof(GrDrawIndirectCommand) * drawCount, sizeof(fStaticIndirectData));
|
||||
}
|
||||
*buffer = fStaticIndirectBuffer;
|
||||
*offsetInBytes = 0;
|
||||
return fStaticIndirectData;
|
||||
}
|
||||
@ -75,6 +85,7 @@ public:
|
||||
SK_ABORT("FATAL: wanted %zu bytes of static indirect data; only have %zu.\n",
|
||||
sizeof(GrDrawIndexedIndirectCommand) * drawCount, sizeof(fStaticIndirectData));
|
||||
}
|
||||
*buffer = fStaticIndirectBuffer;
|
||||
*offsetInBytes = 0;
|
||||
return fStaticIndirectData;
|
||||
}
|
||||
@ -103,7 +114,9 @@ public:
|
||||
private:
|
||||
sk_sp<GrDirectContext> fMockContext;
|
||||
char fStaticVertexData[6 * 1024 * 1024];
|
||||
sk_sp<GrGpuBuffer> fStaticVertexBuffer;
|
||||
char fStaticIndirectData[sizeof(GrDrawIndexedIndirectCommand) * 32];
|
||||
sk_sp<GrGpuBuffer> fStaticIndirectBuffer;
|
||||
SkSTArenaAllocWithReset<1024 * 1024> fAllocator;
|
||||
GrXferProcessor::DstProxyView fDstProxyView;
|
||||
};
|
||||
|
@ -102,6 +102,7 @@ void GrPathIndirectTessellator::prepare(GrMeshDrawOp::Target* target, const SkMa
|
||||
fIndirectIndexBuffer = GrMiddleOutCubicShader::FindOrMakeMiddleOutIndexBuffer(
|
||||
target->resourceProvider());
|
||||
if (!fIndirectIndexBuffer) {
|
||||
vertexAlloc.unlock(0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -111,8 +112,9 @@ void GrPathIndirectTessellator::prepare(GrMeshDrawOp::Target* target, const SkMa
|
||||
int indirectLockCnt = kMaxResolveLevel + 1;
|
||||
GrDrawIndexedIndirectWriter indirectWriter = target->makeDrawIndexedIndirectSpace(
|
||||
indirectLockCnt, &fIndirectDrawBuffer, &fIndirectDrawOffset);
|
||||
if (!indirectWriter.isValid()) {
|
||||
if (!indirectWriter) {
|
||||
SkASSERT(!fIndirectDrawBuffer);
|
||||
vertexAlloc.unlock(0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,6 @@ static grvx::float2 pow4(grvx::float2 x) {
|
||||
class PatchWriter {
|
||||
public:
|
||||
using ShaderFlags = GrStrokeTessellator::ShaderFlags;
|
||||
using PatchChunk = GrStrokeHardwareTessellator::PatchChunk;
|
||||
|
||||
enum class JoinType {
|
||||
kMiter = SkPaint::kMiter_Join,
|
||||
@ -52,27 +51,15 @@ public:
|
||||
};
|
||||
|
||||
PatchWriter(ShaderFlags shaderFlags, GrMeshDrawOp::Target* target, float matrixMaxScale,
|
||||
SkTArray<PatchChunk>* patchChunks, int totalCombinedVerbCnt)
|
||||
GrVertexChunkArray* patchChunks, int minPatchesPerChunk)
|
||||
: fShaderFlags(shaderFlags)
|
||||
, fTarget(target)
|
||||
, fPatchChunks(patchChunks)
|
||||
, fPatchStride(GrStrokeTessellateShader::PatchStride(fShaderFlags))
|
||||
, fChunkBuilder(target, patchChunks,
|
||||
GrStrokeTessellateShader::PatchStride(fShaderFlags), minPatchesPerChunk)
|
||||
// Subtract 2 because the tessellation shader chops every cubic at two locations, and
|
||||
// each chop has the potential to introduce an extra segment.
|
||||
, fMaxTessellationSegments(target->caps().shaderCaps()->maxTessellationSegments() - 2)
|
||||
, fParametricIntolerance(GrStrokeTolerances::CalcParametricIntolerance(
|
||||
matrixMaxScale)) {
|
||||
// Pre-allocate at least enough vertex space for 1 in 4 strokes to chop, and for 8 caps.
|
||||
int strokePreallocCount = totalCombinedVerbCnt * 5/4;
|
||||
int capPreallocCount = 8;
|
||||
fNextChunkMinPatchAllocCount = strokePreallocCount + capPreallocCount;
|
||||
}
|
||||
|
||||
~PatchWriter() {
|
||||
if (!fPatchChunks->empty()) {
|
||||
fTarget->putBackVertices(fCurrChunkPatchCapacity - fCurrChunkPatchCount, fPatchStride);
|
||||
fPatchChunks->back().fPatchCount = fCurrChunkPatchCount;
|
||||
}
|
||||
}
|
||||
|
||||
// This is the intolerance value, adjusted for the view matrix, to use with Wang's formulas when
|
||||
@ -239,10 +226,10 @@ public:
|
||||
fLastControlPoint = p[0]; // Disables the join section of this patch.
|
||||
}
|
||||
|
||||
if (this->allocPatch()) {
|
||||
fPatchWriter.write(fLastControlPoint);
|
||||
fPatchWriter.writeArray(p, 4);
|
||||
this->writeDynamicAttribs();
|
||||
if (GrVertexWriter patchWriter = fChunkBuilder.appendVertex()) {
|
||||
patchWriter.write(fLastControlPoint);
|
||||
patchWriter.writeArray(p, 4);
|
||||
this->writeDynamicAttribs(&patchWriter);
|
||||
}
|
||||
|
||||
fLastControlPoint = endControlPoint;
|
||||
@ -578,71 +565,37 @@ private:
|
||||
// We should never write out joins before the first curve.
|
||||
SkASSERT(fHasLastControlPoint);
|
||||
|
||||
if (this->allocPatch()) {
|
||||
fPatchWriter.write(fLastControlPoint, junctionPoint);
|
||||
if (GrVertexWriter patchWriter = fChunkBuilder.appendVertex()) {
|
||||
patchWriter.write(fLastControlPoint, junctionPoint);
|
||||
if (joinType == JoinType::kBowtie) {
|
||||
// {prevControlPoint, [p0, p0, p0, p3]} is a reserved patch pattern that means this
|
||||
// patch is a bowtie. The bowtie is anchored on p0 and its tangent angles go from
|
||||
// (p0 - prevControlPoint) to (p3 - p0).
|
||||
fPatchWriter.write(junctionPoint, junctionPoint);
|
||||
patchWriter.write(junctionPoint, junctionPoint);
|
||||
} else {
|
||||
// {prevControlPoint, [p0, p3, p3, p3]} is a reserved patch pattern that means this
|
||||
// patch is a join only (no curve sections in the patch). The join is anchored on p0
|
||||
// and its tangent angles go from (p0 - prevControlPoint) to (p3 - p0).
|
||||
fPatchWriter.write(nextControlPoint, nextControlPoint);
|
||||
patchWriter.write(nextControlPoint, nextControlPoint);
|
||||
}
|
||||
fPatchWriter.write(nextControlPoint);
|
||||
this->writeDynamicAttribs();
|
||||
patchWriter.write(nextControlPoint);
|
||||
this->writeDynamicAttribs(&patchWriter);
|
||||
}
|
||||
|
||||
fLastControlPoint = nextControlPoint;
|
||||
}
|
||||
|
||||
SK_ALWAYS_INLINE void writeDynamicAttribs() {
|
||||
SK_ALWAYS_INLINE void writeDynamicAttribs(GrVertexWriter* patchWriter) {
|
||||
if (fShaderFlags & ShaderFlags::kDynamicStroke) {
|
||||
fPatchWriter.write(fDynamicStroke);
|
||||
patchWriter->write(fDynamicStroke);
|
||||
}
|
||||
if (fShaderFlags & ShaderFlags::kDynamicColor) {
|
||||
fPatchWriter.write(fDynamicColor);
|
||||
patchWriter->write(fDynamicColor);
|
||||
}
|
||||
}
|
||||
|
||||
SK_ALWAYS_INLINE bool allocPatch() {
|
||||
if (fCurrChunkPatchCount == fCurrChunkPatchCapacity && !this->allocPatchChunk()) {
|
||||
return false;
|
||||
}
|
||||
SkASSERT(fCurrChunkPatchCount < fCurrChunkPatchCapacity);
|
||||
++fCurrChunkPatchCount;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool allocPatchChunk() {
|
||||
if (!fPatchChunks->empty()) {
|
||||
fPatchChunks->back().fPatchCount = fCurrChunkPatchCount;
|
||||
// No need to put back vertices; the buffer is full.
|
||||
}
|
||||
fCurrChunkPatchCount = 0;
|
||||
PatchChunk* chunk = &fPatchChunks->push_back();
|
||||
fPatchWriter = {fTarget->makeVertexSpaceAtLeast(fPatchStride, fNextChunkMinPatchAllocCount,
|
||||
fNextChunkMinPatchAllocCount,
|
||||
&chunk->fPatchBuffer, &chunk->fBasePatch,
|
||||
&fCurrChunkPatchCapacity)};
|
||||
if (!fPatchWriter.isValid()) {
|
||||
SkDebugf("WARNING: Failed to allocate vertex buffer for tessellated stroke.\n");
|
||||
fPatchChunks->pop_back();
|
||||
fCurrChunkPatchCapacity = 0;
|
||||
return false;
|
||||
}
|
||||
fNextChunkMinPatchAllocCount *= 2;
|
||||
return true;
|
||||
}
|
||||
|
||||
const ShaderFlags fShaderFlags;
|
||||
GrMeshDrawOp::Target* const fTarget;
|
||||
SkTArray<PatchChunk>* const fPatchChunks;
|
||||
|
||||
// Size in bytes of a tessellation patch with our shader flags.
|
||||
const size_t fPatchStride;
|
||||
GrVertexChunkBuilder fChunkBuilder;
|
||||
|
||||
// The maximum number of tessellation segments the hardware can emit for a single patch.
|
||||
const int fMaxTessellationSegments;
|
||||
@ -671,12 +624,6 @@ private:
|
||||
bool fSoloRoundJoinAlwaysFitsInPatch;
|
||||
JoinType fStrokeJoinType;
|
||||
|
||||
// Variables related to the patch chunk that we are currently writing out during prepareBuffers.
|
||||
int fCurrChunkPatchCount = 0;
|
||||
int fCurrChunkPatchCapacity = 0;
|
||||
int fNextChunkMinPatchAllocCount;
|
||||
GrVertexWriter fPatchWriter;
|
||||
|
||||
// Variables related to the specific contour that we are currently iterating during
|
||||
// prepareBuffers().
|
||||
bool fHasLastControlPoint = false;
|
||||
@ -735,7 +682,7 @@ SK_ALWAYS_INLINE static bool cubic_has_cusp(const SkPoint p[4]) {
|
||||
} // namespace
|
||||
|
||||
void GrStrokeHardwareTessellator::prepare(GrMeshDrawOp::Target* target,
|
||||
const SkMatrix& viewMatrix) {
|
||||
const SkMatrix& viewMatrix, int totalCombinedVerbCnt) {
|
||||
using JoinType = PatchWriter::JoinType;
|
||||
|
||||
std::array<float, 2> matrixMinMaxScales;
|
||||
@ -743,8 +690,13 @@ void GrStrokeHardwareTessellator::prepare(GrMeshDrawOp::Target* target,
|
||||
matrixMinMaxScales.fill(1);
|
||||
}
|
||||
|
||||
// Over-allocate enough patches for 1 in 4 strokes to chop and for 8 extra caps.
|
||||
int strokePreallocCount = totalCombinedVerbCnt * 5/4;
|
||||
int capPreallocCount = 8;
|
||||
int minPatchesPerChunk = strokePreallocCount + capPreallocCount;
|
||||
PatchWriter patchWriter(fShaderFlags, target, matrixMinMaxScales[1], &fPatchChunks,
|
||||
fTotalCombinedVerbCnt);
|
||||
minPatchesPerChunk);
|
||||
|
||||
if (!(fShaderFlags & ShaderFlags::kDynamicStroke)) {
|
||||
// Strokes are static. Calculate tolerances once.
|
||||
const SkStrokeRec& stroke = fPathStrokeList->fStroke;
|
||||
@ -918,9 +870,7 @@ void GrStrokeHardwareTessellator::prepare(GrMeshDrawOp::Target* target,
|
||||
|
||||
void GrStrokeHardwareTessellator::draw(GrOpFlushState* flushState) const {
|
||||
for (const auto& chunk : fPatchChunks) {
|
||||
if (chunk.fPatchBuffer) {
|
||||
flushState->bindBuffers(nullptr, nullptr, std::move(chunk.fPatchBuffer));
|
||||
flushState->draw(chunk.fPatchCount, chunk.fBasePatch);
|
||||
}
|
||||
flushState->bindBuffers(nullptr, nullptr, chunk.fBuffer);
|
||||
flushState->draw(chunk.fVertexCount, chunk.fBaseVertex);
|
||||
}
|
||||
}
|
||||
|
@ -8,8 +8,7 @@
|
||||
#ifndef GrStrokeHardwareTessellator_DEFINED
|
||||
#define GrStrokeHardwareTessellator_DEFINED
|
||||
|
||||
#include "include/core/SkStrokeRec.h"
|
||||
#include "src/gpu/GrVertexWriter.h"
|
||||
#include "src/gpu/GrVertexChunkArray.h"
|
||||
#include "src/gpu/tessellate/GrStrokeTessellator.h"
|
||||
|
||||
// Renders opaque, constant-color strokes by decomposing them into standalone tessellation patches.
|
||||
@ -17,30 +16,17 @@
|
||||
// MSAA if antialiasing is desired.
|
||||
class GrStrokeHardwareTessellator : public GrStrokeTessellator {
|
||||
public:
|
||||
// We generate and store patch buffers in chunks. Normally there will only be one chunk, but in
|
||||
// rare cases the first can run out of space if too many cubics needed to be subdivided.
|
||||
struct PatchChunk {
|
||||
sk_sp<const GrBuffer> fPatchBuffer;
|
||||
int fPatchCount = 0;
|
||||
int fBasePatch;
|
||||
};
|
||||
|
||||
GrStrokeHardwareTessellator(ShaderFlags shaderFlags, PathStrokeList* pathStrokeList,
|
||||
int totalCombinedVerbCnt, const GrShaderCaps& shaderCaps)
|
||||
: GrStrokeTessellator(shaderFlags, std::move(pathStrokeList))
|
||||
, fTotalCombinedVerbCnt(totalCombinedVerbCnt) {
|
||||
const GrShaderCaps&)
|
||||
: GrStrokeTessellator(shaderFlags, std::move(pathStrokeList)) {
|
||||
}
|
||||
|
||||
void prepare(GrMeshDrawOp::Target*, const SkMatrix&) override;
|
||||
void prepare(GrMeshDrawOp::Target*, const SkMatrix& viewMatrix,
|
||||
int totalCombinedVerbCnt) override;
|
||||
void draw(GrOpFlushState*) const override;
|
||||
|
||||
private:
|
||||
// The combined number of path verbs from all paths in fPathStrokeList.
|
||||
const int fTotalCombinedVerbCnt;
|
||||
|
||||
SkSTArray<1, PatchChunk> fPatchChunks;
|
||||
|
||||
friend class GrOp; // For ctor.
|
||||
GrVertexChunkArray fPatchChunks;
|
||||
|
||||
public:
|
||||
// This class is used to benchmark prepareBuffers().
|
||||
|
@ -719,7 +719,7 @@ public:
|
||||
#ifdef SK_DEBUG
|
||||
~BinningInstanceWriter() {
|
||||
for (int i = 0; i < kNumBins; ++i) {
|
||||
if (fInstanceWriters[i].isValid()) {
|
||||
if (fInstanceWriters[i]) {
|
||||
SkASSERT(fInstanceWriters[i] == fEndWriters[i]);
|
||||
}
|
||||
}
|
||||
@ -749,7 +749,8 @@ private:
|
||||
} // namespace
|
||||
|
||||
void GrStrokeIndirectTessellator::prepare(GrMeshDrawOp::Target* target,
|
||||
const SkMatrix& viewMatrix) {
|
||||
const SkMatrix& viewMatrix,
|
||||
int /*totalCombinedVerbCnt*/) {
|
||||
SkASSERT(fResolveLevels);
|
||||
SkASSERT(!fDrawIndirectBuffer);
|
||||
SkASSERT(!fInstanceBuffer);
|
||||
@ -775,7 +776,7 @@ void GrStrokeIndirectTessellator::prepare(GrMeshDrawOp::Target* target,
|
||||
size_t instanceStride = GrStrokeTessellateShader::IndirectInstanceStride(fShaderFlags);
|
||||
GrVertexWriter instanceWriter = {target->makeVertexSpace(instanceStride, fChainedInstanceCount,
|
||||
&fInstanceBuffer, &baseInstance)};
|
||||
if (!instanceWriter.isValid()) {
|
||||
if (!instanceWriter) {
|
||||
SkASSERT(!fInstanceBuffer);
|
||||
fDrawIndirectBuffer.reset();
|
||||
return;
|
||||
|
@ -22,14 +22,15 @@ public:
|
||||
// become an issue if we try to draw a stroke with an astronomically wide width.
|
||||
constexpr static int8_t kMaxResolveLevel = 15;
|
||||
|
||||
GrStrokeIndirectTessellator(ShaderFlags, const SkMatrix&, PathStrokeList*,
|
||||
GrStrokeIndirectTessellator(ShaderFlags, const SkMatrix& viewMatrix, PathStrokeList*,
|
||||
int totalCombinedVerbCnt, SkArenaAlloc*);
|
||||
|
||||
// Adds the given tessellator to our chain. The chained tessellators all append to a shared
|
||||
// indirect draw list during prepare().
|
||||
void addToChain(GrStrokeIndirectTessellator*);
|
||||
|
||||
void prepare(GrMeshDrawOp::Target*, const SkMatrix&) override;
|
||||
void prepare(GrMeshDrawOp::Target*, const SkMatrix& viewMatrix,
|
||||
int totalCombinedVerbCnt) override;
|
||||
|
||||
void draw(GrOpFlushState*) const override;
|
||||
|
||||
|
@ -174,7 +174,6 @@ void GrStrokeTessellateOp::prePrepareTessellator(GrPathShader::ProgramArgs&& arg
|
||||
((fShaderFlags & ShaderFlags::kDynamicColor) || fTotalCombinedVerbCnt > 50)) {
|
||||
SkASSERT(!this->nextInChain()); // We never chain when hw tessellation is an option.
|
||||
fTessellator = arena->make<GrStrokeHardwareTessellator>(fShaderFlags, &fPathStrokeList,
|
||||
fTotalCombinedVerbCnt,
|
||||
*caps.shaderCaps());
|
||||
shaderMode = GrStrokeTessellateShader::Mode::kTessellation;
|
||||
} else {
|
||||
@ -259,7 +258,7 @@ void GrStrokeTessellateOp::onPrepare(GrOpFlushState* flushState) {
|
||||
flushState->detachAppliedClip());
|
||||
}
|
||||
SkASSERT(fTessellator);
|
||||
fTessellator->prepare(flushState, fViewMatrix);
|
||||
fTessellator->prepare(flushState, fViewMatrix, fTotalCombinedVerbCnt);
|
||||
}
|
||||
|
||||
void GrStrokeTessellateOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
|
||||
|
@ -29,7 +29,8 @@ public:
|
||||
: fShaderFlags(shaderFlags), fPathStrokeList(pathStrokeList) {}
|
||||
|
||||
// Called before draw(). Prepares GPU buffers containing the geometry to tessellate.
|
||||
virtual void prepare(GrMeshDrawOp::Target*, const SkMatrix& viewMatrix) = 0;
|
||||
virtual void prepare(GrMeshDrawOp::Target*, const SkMatrix& viewMatrix,
|
||||
int totalCombinedVerbCnt) = 0;
|
||||
|
||||
// Issues draw calls for the tessellated stroke. The caller is responsible for binding its
|
||||
// desired pipeline ahead of time.
|
||||
|
@ -46,7 +46,7 @@ static void test_stroke(skiatest::Reporter* r, GrDirectContext* ctx, GrMockOpTar
|
||||
matrix, &pathStrokeList, path.countVerbs(),
|
||||
target->allocator());
|
||||
tessellator.verifyResolveLevels(r, target, matrix, path, stroke);
|
||||
tessellator.prepare(target, matrix);
|
||||
tessellator.prepare(target, matrix, path.countVerbs());
|
||||
tessellator.verifyBuffers(r, target, matrix, stroke);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user