Fill in the GrTriangulatingPathRenderer::onPrePrepareDraws
This will allow multiple DDL recording threads to share triangulated path data. Bug: 1108408 Change-Id: Idf204c1bb3dd03d3e1d5419c320e527bf348e38b Reviewed-on: https://skia-review.googlesource.com/c/skia/+/325576 Commit-Queue: Robert Phillips <robertphillips@google.com> Reviewed-by: Adlai Holler <adlai@google.com>
This commit is contained in:
parent
1ea7f5403b
commit
f9a1b8208e
@ -59,6 +59,7 @@ static sk_sp<SkData> create_data(int numVertices, int numCountedCurves, SkScalar
|
|||||||
return SkData::MakeWithCopy(&info, sizeof(info));
|
return SkData::MakeWithCopy(&info, sizeof(info));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: remove 'actualNumVertices' and 'actualNumCountedCurves' in a follow up CL
|
||||||
bool cache_match(const SkData* data, SkScalar tol,
|
bool cache_match(const SkData* data, SkScalar tol,
|
||||||
int* actualNumVertices, int* actualNumCountedCurves) {
|
int* actualNumVertices, int* actualNumCountedCurves) {
|
||||||
SkASSERT(data);
|
SkASSERT(data);
|
||||||
@ -76,11 +77,23 @@ bool cache_match(const SkData* data, SkScalar tol,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Should 'challenger' replace 'incumbent' in the cache if there is a collision?
|
||||||
|
bool is_newer_better(SkData* incumbent, SkData* challenger) {
|
||||||
|
const TessInfo* i = static_cast<const TessInfo*>(incumbent->data());
|
||||||
|
const TessInfo* c = static_cast<const TessInfo*>(challenger->data());
|
||||||
|
|
||||||
|
if (i->fNumCountedCurves == 0 || i->fTolerance <= c->fTolerance) {
|
||||||
|
return false; // prefer the incumbent
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
|
// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
|
||||||
class UniqueKeyInvalidator : public SkIDChangeListener {
|
class UniqueKeyInvalidator : public SkIDChangeListener {
|
||||||
public:
|
public:
|
||||||
UniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
|
UniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
|
||||||
: fMsg(key, contextUniqueID) {}
|
: fMsg(key, contextUniqueID, /* inThreadSafeCache */ true) {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
GrUniqueKeyInvalidatedMessage fMsg;
|
GrUniqueKeyInvalidatedMessage fMsg;
|
||||||
@ -92,18 +105,19 @@ class StaticVertexAllocator : public GrEagerVertexAllocator {
|
|||||||
public:
|
public:
|
||||||
StaticVertexAllocator(GrResourceProvider* resourceProvider, bool canMapVB)
|
StaticVertexAllocator(GrResourceProvider* resourceProvider, bool canMapVB)
|
||||||
: fResourceProvider(resourceProvider)
|
: fResourceProvider(resourceProvider)
|
||||||
, fCanMapVB(canMapVB)
|
, fCanMapVB(canMapVB) {
|
||||||
, fVertices(nullptr) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef SK_DEBUG
|
#ifdef SK_DEBUG
|
||||||
~StaticVertexAllocator() override {
|
~StaticVertexAllocator() override {
|
||||||
SkASSERT(!fLockStride);
|
SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && !fVertexData);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void* lock(size_t stride, int eagerCount) override {
|
void* lock(size_t stride, int eagerCount) override {
|
||||||
SkASSERT(!fLockStride);
|
SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && !fVertexData);
|
||||||
SkASSERT(stride);
|
SkASSERT(stride && eagerCount);
|
||||||
|
|
||||||
size_t size = eagerCount * stride;
|
size_t size = eagerCount * stride;
|
||||||
fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
|
fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
|
||||||
kStatic_GrAccessPattern);
|
kStatic_GrAccessPattern);
|
||||||
@ -118,24 +132,80 @@ public:
|
|||||||
fLockStride = stride;
|
fLockStride = stride;
|
||||||
return fVertices;
|
return fVertices;
|
||||||
}
|
}
|
||||||
|
|
||||||
void unlock(int actualCount) override {
|
void unlock(int actualCount) override {
|
||||||
SkASSERT(fLockStride);
|
SkASSERT(fLockStride && fVertices && fVertexBuffer && !fVertexData);
|
||||||
|
|
||||||
if (fCanMapVB) {
|
if (fCanMapVB) {
|
||||||
fVertexBuffer->unmap();
|
fVertexBuffer->unmap();
|
||||||
} else {
|
} else {
|
||||||
fVertexBuffer->updateData(fVertices, actualCount * fLockStride);
|
fVertexBuffer->updateData(fVertices, actualCount * fLockStride);
|
||||||
sk_free(fVertices);
|
sk_free(fVertices);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fVertexData = GrThreadSafeCache::MakeVertexData(std::move(fVertexBuffer),
|
||||||
|
actualCount, fLockStride);
|
||||||
|
|
||||||
fVertices = nullptr;
|
fVertices = nullptr;
|
||||||
fLockStride = 0;
|
fLockStride = 0;
|
||||||
}
|
}
|
||||||
sk_sp<GrGpuBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); }
|
|
||||||
|
sk_sp<GrThreadSafeCache::VertexData> detachVertexData() {
|
||||||
|
SkASSERT(!fLockStride && !fVertices && !fVertexBuffer && fVertexData);
|
||||||
|
|
||||||
|
return std::move(fVertexData);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
sk_sp<GrThreadSafeCache::VertexData> fVertexData;
|
||||||
sk_sp<GrGpuBuffer> fVertexBuffer;
|
sk_sp<GrGpuBuffer> fVertexBuffer;
|
||||||
GrResourceProvider* fResourceProvider;
|
GrResourceProvider* fResourceProvider;
|
||||||
bool fCanMapVB;
|
bool fCanMapVB;
|
||||||
void* fVertices;
|
void* fVertices = nullptr;
|
||||||
|
size_t fLockStride = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CpuVertexAllocator : public GrEagerVertexAllocator {
|
||||||
|
public:
|
||||||
|
CpuVertexAllocator() = default;
|
||||||
|
|
||||||
|
#ifdef SK_DEBUG
|
||||||
|
~CpuVertexAllocator() override {
|
||||||
|
SkASSERT(!fLockStride && !fVertices && !fVertexData);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void* lock(size_t stride, int eagerCount) override {
|
||||||
|
SkASSERT(!fLockStride && !fVertices && !fVertexData);
|
||||||
|
SkASSERT(stride && eagerCount);
|
||||||
|
|
||||||
|
fVertices = sk_malloc_throw(eagerCount * stride);
|
||||||
|
fLockStride = stride;
|
||||||
|
|
||||||
|
return fVertices;
|
||||||
|
}
|
||||||
|
|
||||||
|
void unlock(int actualCount) override {
|
||||||
|
SkASSERT(fLockStride && fVertices && !fVertexData);
|
||||||
|
|
||||||
|
fVertices = sk_realloc_throw(fVertices, actualCount * fLockStride);
|
||||||
|
|
||||||
|
fVertexData = GrThreadSafeCache::MakeVertexData(fVertices, actualCount, fLockStride);
|
||||||
|
|
||||||
|
fVertices = nullptr;
|
||||||
|
fLockStride = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
sk_sp<GrThreadSafeCache::VertexData> detachVertexData() {
|
||||||
|
SkASSERT(!fLockStride && !fVertices && fVertexData);
|
||||||
|
|
||||||
|
return std::move(fVertexData);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
sk_sp<GrThreadSafeCache::VertexData> fVertexData;
|
||||||
|
|
||||||
|
void* fVertices = nullptr;
|
||||||
size_t fLockStride = 0;
|
size_t fLockStride = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -303,6 +373,7 @@ private:
|
|||||||
void createNonAAMesh(Target* target) {
|
void createNonAAMesh(Target* target) {
|
||||||
SkASSERT(!fAntiAlias);
|
SkASSERT(!fAntiAlias);
|
||||||
GrResourceProvider* rp = target->resourceProvider();
|
GrResourceProvider* rp = target->resourceProvider();
|
||||||
|
auto threadSafeCache = target->threadSafeCache();
|
||||||
|
|
||||||
GrUniqueKey key;
|
GrUniqueKey key;
|
||||||
CreateKey(&key, fShape, fDevClipBounds);
|
CreateKey(&key, fShape, fDevClipBounds);
|
||||||
@ -310,15 +381,30 @@ private:
|
|||||||
SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
|
SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
|
||||||
fViewMatrix, fShape.bounds());
|
fViewMatrix, fShape.bounds());
|
||||||
|
|
||||||
sk_sp<GrGpuBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrGpuBuffer>(key));
|
if (!fVertexData) {
|
||||||
if (cachedVertexBuffer) {
|
auto [cachedVerts, data] = threadSafeCache->findVertsWithData(key);
|
||||||
int actualVertexCount;
|
if (cachedVerts && cache_match(data.get(), tol, nullptr, nullptr)) {
|
||||||
|
fVertexData = std::move(cachedVerts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (cache_match(cachedVertexBuffer->getUniqueKey().getCustomData(), tol,
|
if (fVertexData) {
|
||||||
&actualVertexCount, nullptr)) {
|
if (!fVertexData->gpuBuffer()) {
|
||||||
fMesh = CreateMesh(target, std::move(cachedVertexBuffer), 0, actualVertexCount);
|
sk_sp<GrGpuBuffer> buffer = rp->createBuffer(fVertexData->size(),
|
||||||
|
GrGpuBufferType::kVertex,
|
||||||
|
kStatic_GrAccessPattern,
|
||||||
|
fVertexData->vertices());
|
||||||
|
if (!buffer) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Since we have a direct context and a ref on 'fVertexData' we need not worry
|
||||||
|
// about any threading issues in this call.
|
||||||
|
fVertexData->setGpuBuffer(std::move(buffer));
|
||||||
|
}
|
||||||
|
|
||||||
|
fMesh = CreateMesh(target, fVertexData->refGpuBuffer(), 0, fVertexData->numVertices());
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
|
bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
|
||||||
@ -331,18 +417,28 @@ private:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
sk_sp<GrGpuBuffer> vb = allocator.detachVertexBuffer();
|
fVertexData = allocator.detachVertexData();
|
||||||
|
|
||||||
key.setCustomData(create_data(vertexCount, numCountedCurves, tol));
|
key.setCustomData(create_data(vertexCount, numCountedCurves, tol));
|
||||||
|
|
||||||
|
auto [tmpV, tmpD] = threadSafeCache->addVertsWithData(key, fVertexData, is_newer_better);
|
||||||
|
if (tmpV != fVertexData) {
|
||||||
|
SkASSERT(!tmpV->gpuBuffer());
|
||||||
|
// In this case, although the different triangulation found in the cache is better,
|
||||||
|
// we will continue on with the current triangulation since it is already on the gpu.
|
||||||
|
} else {
|
||||||
|
// This isn't perfect. The current triangulation is in the cache but it may have
|
||||||
|
// replaced a pre-existing one. A duplicated listener is unlikely and not that
|
||||||
|
// expensive so we just roll with it.
|
||||||
fShape.addGenIDChangeListener(
|
fShape.addGenIDChangeListener(
|
||||||
sk_make_sp<UniqueKeyInvalidator>(key, target->contextUniqueID()));
|
sk_make_sp<UniqueKeyInvalidator>(key, target->contextUniqueID()));
|
||||||
rp->assignUniqueKeyToResource(key, vb.get());
|
}
|
||||||
|
|
||||||
fMesh = CreateMesh(target, std::move(vb), 0, vertexCount);
|
fMesh = CreateMesh(target, fVertexData->refGpuBuffer(), 0, fVertexData->numVertices());
|
||||||
}
|
}
|
||||||
|
|
||||||
void createAAMesh(Target* target) {
|
void createAAMesh(Target* target) {
|
||||||
|
SkASSERT(!fVertexData);
|
||||||
SkASSERT(fAntiAlias);
|
SkASSERT(fAntiAlias);
|
||||||
SkPath path = this->getPath();
|
SkPath path = this->getPath();
|
||||||
if (path.isEmpty()) {
|
if (path.isEmpty()) {
|
||||||
@ -426,6 +522,58 @@ private:
|
|||||||
|
|
||||||
INHERITED::onPrePrepareDraws(rContext, writeView, clip, dstProxyView,
|
INHERITED::onPrePrepareDraws(rContext, writeView, clip, dstProxyView,
|
||||||
renderPassXferBarriers);
|
renderPassXferBarriers);
|
||||||
|
|
||||||
|
if (fAntiAlias) {
|
||||||
|
// TODO: pull the triangulation work forward to the recording thread for the AA case
|
||||||
|
// too.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto threadSafeViewCache = rContext->priv().threadSafeCache();
|
||||||
|
|
||||||
|
GrUniqueKey key;
|
||||||
|
CreateKey(&key, fShape, fDevClipBounds);
|
||||||
|
|
||||||
|
SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
|
||||||
|
fViewMatrix, fShape.bounds());
|
||||||
|
|
||||||
|
auto [cachedVerts, data] = threadSafeViewCache->findVertsWithData(key);
|
||||||
|
if (cachedVerts && cache_match(data.get(), tol, nullptr, nullptr)) {
|
||||||
|
fVertexData = std::move(cachedVerts);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CpuVertexAllocator allocator;
|
||||||
|
|
||||||
|
int numCountedCurves;
|
||||||
|
int vertexCount = Triangulate(&allocator, fViewMatrix, fShape, fDevClipBounds, tol,
|
||||||
|
&numCountedCurves);
|
||||||
|
if (vertexCount == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
fVertexData = allocator.detachVertexData();
|
||||||
|
|
||||||
|
key.setCustomData(create_data(vertexCount, numCountedCurves, tol));
|
||||||
|
|
||||||
|
// If some other thread created and cached its own triangulation, the 'is_newer_better'
|
||||||
|
// predicate will replace the version in the cache if 'fVertexData' is a more accurate
|
||||||
|
// triangulation. This will leave some other recording threads using a poorer triangulation
|
||||||
|
// but will result in a version with greater applicability being in the cache.
|
||||||
|
auto [tmpV, tmpD] = threadSafeViewCache->addVertsWithData(key, fVertexData,
|
||||||
|
is_newer_better);
|
||||||
|
if (tmpV != fVertexData) {
|
||||||
|
// Someone beat us to creating the triangulation (and it is better than ours) so
|
||||||
|
// just go ahead and use it.
|
||||||
|
SkASSERT(cache_match(tmpD.get(), tol, nullptr, nullptr));
|
||||||
|
fVertexData = std::move(tmpV);
|
||||||
|
} else {
|
||||||
|
// This isn't perfect. The current triangulation is in the cache but it may have
|
||||||
|
// replaced a pre-existing one. A duplicated listener is unlikely and not that
|
||||||
|
// expensive so we just roll with it.
|
||||||
|
fShape.addGenIDChangeListener(
|
||||||
|
sk_make_sp<UniqueKeyInvalidator>(key, rContext->priv().contextID()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void onPrepareDraws(Target* target) override {
|
void onPrepareDraws(Target* target) override {
|
||||||
@ -474,6 +622,8 @@ private:
|
|||||||
GrSimpleMesh* fMesh = nullptr;
|
GrSimpleMesh* fMesh = nullptr;
|
||||||
GrProgramInfo* fProgramInfo = nullptr;
|
GrProgramInfo* fProgramInfo = nullptr;
|
||||||
|
|
||||||
|
sk_sp<GrThreadSafeCache::VertexData> fVertexData;
|
||||||
|
|
||||||
using INHERITED = GrMeshDrawOp;
|
using INHERITED = GrMeshDrawOp;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user