Make deferred upload handling and draw recording be virtual interfaces implemented by GrOpFlushState.

The motivation for this is to allow other clients of GrDrawOpAtlas. Making GrMeshDrawOp::Target also be an abstract interface is somewhat incidental to this goal.

Bug: skia:
Change-Id: I0987adfa8a269aa2ca94147e933a2827d734c1cc
Reviewed-on: https://skia-review.googlesource.com/65121
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Brian Salomon 2017-10-31 14:42:10 -04:00 committed by Skia Commit-Bot
parent f78b55cb94
commit 29b60c9020
16 changed files with 397 additions and 309 deletions

View File

@ -13,6 +13,24 @@
class GrTextureProxy;
/**
* A word about deferred uploads and tokens: Ops should usually schedule their uploads to occur at
* the beginning of a frame whenever possible. These are called ASAP uploads. Of course, this
* requires that there are no draws that have yet to be flushed that rely on the old texture
* contents. In that case the ASAP upload would happen prior to the draw and therefore the draw
* would read the new (wrong) texture data. When this read-before-write data hazard exists they
* should schedule an inline upload.
*
* Ops, in conjunction with helpers such as GrDrawOpAtlas, use upload tokens to know what the most
* recent draw was that referenced a resource (or portion of a resource). Each draw is assigned a
* token. A resource (or portion thereof) can be tagged with the most recent reading draw's token.
* The deferred uploads target provides a facility for testing whether the draw corresponding to the
* token has been flushed. If it has not been flushed then the op must perform an inline upload
* instead so that the upload occurs after the draw depending on the old contents and before the
* draw depending on the updated contents. When scheduling an inline upload the op provides the
* token of the draw that the upload must occur before.
*/
/**
* GrDeferredUploadToken is used to sequence the uploads relative to each other and to draws.
*/
@ -22,19 +40,44 @@ public:
GrDeferredUploadToken(const GrDeferredUploadToken&) = default;
GrDeferredUploadToken& operator=(const GrDeferredUploadToken&) = default;
bool operator==(const GrDeferredUploadToken& that) const {
return fSequenceNumber == that.fSequenceNumber;
}
bool operator!=(const GrDeferredUploadToken& that) const { return !(*this == that); }
bool inInterval(const GrDeferredUploadToken& start, const GrDeferredUploadToken& finish) {
return fSequenceNumber >= start.fSequenceNumber &&
fSequenceNumber <= finish.fSequenceNumber;
bool operator<(const GrDeferredUploadToken that) const {
return fSequenceNumber < that.fSequenceNumber;
}
bool operator<=(const GrDeferredUploadToken that) const {
return fSequenceNumber <= that.fSequenceNumber;
}
bool operator>(const GrDeferredUploadToken that) const {
return fSequenceNumber > that.fSequenceNumber;
}
bool operator>=(const GrDeferredUploadToken that) const {
return fSequenceNumber >= that.fSequenceNumber;
}
GrDeferredUploadToken& operator++() {
++fSequenceNumber;
return *this;
}
GrDeferredUploadToken operator++(int) {
auto old = fSequenceNumber;
++fSequenceNumber;
return GrDeferredUploadToken(old);
}
GrDeferredUploadToken next() const { return GrDeferredUploadToken(fSequenceNumber + 1); }
/** Is this token in the [start, end] inclusive interval? */
bool inInterval(const GrDeferredUploadToken& start, const GrDeferredUploadToken& end) {
return *this >= start && *this <= end;
}
private:
GrDeferredUploadToken();
GrDeferredUploadToken() = delete;
explicit GrDeferredUploadToken(uint64_t sequenceNumber) : fSequenceNumber(sequenceNumber) {}
friend class GrOpFlushState;
uint64_t fSequenceNumber;
};
@ -53,4 +96,29 @@ using GrDeferredTextureUploadWritePixelsFn =
*/
using GrDeferredTextureUploadFn = std::function<void(GrDeferredTextureUploadWritePixelsFn&)>;
/**
* An interface for scheduling deferred uploads. It provides sequence tokens and accepts asap and
* deferred inline uploads.
*/
class GrDeferredUploadTarget {
public:
virtual ~GrDeferredUploadTarget() {}
/** Returns the token of the draw that this upload will occur before. */
virtual GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) = 0;
/** Returns the token of the draw that this upload will occur before. Since ASAP uploads
are done first during a flush, this will be the first token since the most recent
flush. */
virtual GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&& upload) = 0;
/** Gets the token one beyond the last token that has been flushed. */
virtual GrDeferredUploadToken nextTokenToFlush() const = 0;
/** Gets the next draw token that will be issued by this target. This can be used by an op
to record that the next draw it issues will use a resource (e.g. texture) while preparing
that draw. */
virtual GrDeferredUploadToken nextDrawToken() const = 0;
};
#endif

View File

@ -173,14 +173,14 @@ inline void GrDrawOpAtlas::processEviction(AtlasID id) {
++fAtlasGeneration;
}
inline bool GrDrawOpAtlas::updatePlot(GrDrawOp::Target* target, AtlasID* id, Plot* plot) {
inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target, AtlasID* id, Plot* plot) {
int pageIdx = GetPageIndexFromID(plot->id());
this->makeMRU(plot, pageIdx);
// If our most recent upload has already occurred then we have to insert a new
// upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
// This new update will piggy back on that previously scheduled update.
if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
if (plot->lastUploadToken() < target->nextTokenToFlush()) {
// With c+14 we could move sk_sp into lamba to only ref once.
sk_sp<Plot> plotsp(SkRef(plot));
@ -192,7 +192,7 @@ inline bool GrDrawOpAtlas::updatePlot(GrDrawOp::Target* target, AtlasID* id, Plo
GrTextureProxy* proxy = fProxies[pageIdx].get();
GrDeferredUploadToken lastUploadToken = target->addAsapUpload(
GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
[plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
plotsp->uploadToTexture(writePixels, proxy);
});
@ -210,7 +210,7 @@ inline bool GrDrawOpAtlas::updatePlot(GrDrawOp::Target* target, AtlasID* id, Plo
// are rare; i.e., we are not continually refreshing the frame.
static constexpr auto kRecentlyUsedCount = 256;
bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width, int height,
bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDeferredUploadTarget* target, int width, int height,
const void* image, SkIPoint16* loc) {
if (width > fPlotWidth || height > fPlotHeight) {
return false;
@ -242,7 +242,7 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
for (unsigned int pageIdx = 0; pageIdx < fNumPages; ++pageIdx) {
Plot* plot = fPages[pageIdx].fPlotList.tail();
SkASSERT(plot);
if ((fNumPages == kMaxPages && target->hasDrawBeenFlushed(plot->lastUseToken())) ||
if ((fNumPages == kMaxPages && plot->lastUseToken() < target->nextTokenToFlush()) ||
plot->flushesSinceLastUsed() >= kRecentlyUsedCount) {
this->processEvictionAndResetRects(plot);
SkASSERT(GrBytesPerPixel(fProxies[pageIdx]->config()) == plot->bpp());
@ -302,7 +302,7 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
SkASSERT(verify);
// Note that this plot will be uploaded inline with the draws whereas the
// one it displaced most likely was uploaded asap.
// one it displaced most likely was uploaded ASAP.
// With c+14 we could move sk_sp into lambda to only ref once.
sk_sp<Plot> plotsp(SkRef(newPlot.get()));
// MDB TODO: this is currently fine since the atlas' proxy is always pre-instantiated.

View File

@ -29,7 +29,7 @@ struct GrDrawOpAtlasConfig {
* This class manages one or more atlas textures on behalf of GrDrawOps. The draw ops that use the
* atlas perform texture uploads when preparing their draws during flush. The class provides
* facilities for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in
* "asap" mode until it is impossible to add data without overwriting texels read by draws that
* "ASAP" mode until it is impossible to add data without overwriting texels read by draws that
* have not yet executed on the gpu. At that point, the atlas will attempt to allocate a new
* atlas texture (or "page") of the same size, up to a maximum number of textures, and upload
* to that texture. If that's not possible, the uploads are performed "inline" between draws. If a
@ -100,7 +100,7 @@ public:
* 'setUseToken' with the currentToken from the GrDrawOp::Target, otherwise the next call to
* addToAtlas might cause the previous data to be overwritten before it has been read.
*/
bool addToAtlas(AtlasID*, GrDrawOp::Target*, int width, int height, const void* image,
bool addToAtlas(AtlasID*, GrDeferredUploadTarget*, int width, int height, const void* image,
SkIPoint16* loc);
GrContext* context() const { return fContext; }
@ -327,7 +327,7 @@ private:
return (id >> 16) & 0xffffffffffff;
}
inline bool updatePlot(GrDrawOp::Target*, AtlasID*, Plot*);
inline bool updatePlot(GrDeferredUploadTarget*, AtlasID*, Plot*);
inline void makeMRU(Plot* plot, int pageIdx) {
if (fPages[pageIdx].fPlotList.head() == plot) {

View File

@ -30,30 +30,6 @@ GrGpuRTCommandBuffer* GrOpFlushState::rtCommandBuffer() {
return fCommandBuffer->asRTCommandBuffer();
}
void* GrOpFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
const GrBuffer** buffer, int* startVertex) {
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrOpFlushState::makeIndexSpace(int indexCount,
const GrBuffer** buffer, int* startIndex) {
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
}
void* GrOpFlushState::makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
int fallbackVertexCount, const GrBuffer** buffer,
int* startVertex, int* actualVertexCount) {
return fVertexPool.makeSpaceAtLeast(vertexSize, minVertexCount, fallbackVertexCount, buffer,
startVertex, actualVertexCount);
}
uint16_t* GrOpFlushState::makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
const GrBuffer** buffer, int* startIndex,
int* actualIndexCount) {
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpaceAtLeast(
minIndexCount, fallbackIndexCount, buffer, startIndex, actualIndexCount));
}
void GrOpFlushState::doUpload(GrDeferredTextureUploadFn& upload) {
GrDeferredTextureUploadWritePixelsFn wp = [this](GrTextureProxy* proxy, int left, int top,
int width, int height, GrPixelConfig config,
@ -86,3 +62,85 @@ void GrOpFlushState::doUpload(GrDeferredTextureUploadFn& upload) {
};
upload(wp);
}
GrDeferredUploadToken GrOpFlushState::addInlineUpload(GrDeferredTextureUploadFn&& upload) {
SkASSERT(fOpArgs);
SkASSERT(fOpArgs->fOp);
// Here we're dangerously relying on only GrDrawOps calling this method. This gets fixed by
// storing inline uploads on GrOpFlushState and removing GrDrawOp::FlushStateAccess.
auto op = static_cast<GrDrawOp*>(fOpArgs->fOp);
auto token = this->nextDrawToken();
GrDrawOp::FlushStateAccess(op).addInlineUpload(std::move(upload), token);
return token;
}
GrDeferredUploadToken GrOpFlushState::addASAPUpload(GrDeferredTextureUploadFn&& upload) {
fASAPUploads.emplace_back(std::move(upload));
return this->nextTokenToFlush();
}
void GrOpFlushState::draw(const GrGeometryProcessor* gp, const GrPipeline* pipeline,
const GrMesh& mesh) {
SkASSERT(fOpArgs);
SkASSERT(fOpArgs->fOp);
// Here we're dangerously relying on only GrMeshDrawOps calling this method. This gets fixed by
// storing draw data on GrOpFlushState and removing GrMeshDrawOp::FlushStateAccess.
auto op = static_cast<GrMeshDrawOp*>(fOpArgs->fOp);
GrMeshDrawOp::FlushStateAccess fsa(op);
fsa.addMesh(mesh);
GrMeshDrawOp::FlushStateAccess::QueuedDraw* lastDraw = fsa.lastDraw();
if (lastDraw) {
// If the last draw shares a geometry processor and pipeline and there are no intervening
// uploads, add this mesh to it.
if (lastDraw->fGeometryProcessor == gp && lastDraw->fPipeline == pipeline &&
(fsa.lastUploadToken() != this->nextDrawToken())) {
++lastDraw->fMeshCnt;
return;
}
}
GrMeshDrawOp::FlushStateAccess::QueuedDraw* draw = fsa.addDraw();
GrDeferredUploadToken token = this->issueDrawToken();
draw->fGeometryProcessor.reset(gp);
draw->fPipeline = pipeline;
draw->fMeshCnt = 1;
if (!lastDraw) {
fsa.setBaseDrawToken(token);
}
}
void* GrOpFlushState::makeVertexSpace(size_t vertexSize, int vertexCount, const GrBuffer** buffer,
int* startVertex) {
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrOpFlushState::makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
}
void* GrOpFlushState::makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
int fallbackVertexCount, const GrBuffer** buffer,
int* startVertex, int* actualVertexCount) {
return fVertexPool.makeSpaceAtLeast(vertexSize, minVertexCount, fallbackVertexCount, buffer,
startVertex, actualVertexCount);
}
uint16_t* GrOpFlushState::makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
const GrBuffer** buffer, int* startIndex,
int* actualIndexCount) {
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpaceAtLeast(
minIndexCount, fallbackIndexCount, buffer, startIndex, actualIndexCount));
}
void GrOpFlushState::putBackIndices(int indexCount) {
fIndexPool.putBack(indexCount * sizeof(uint16_t));
}
void GrOpFlushState::putBackVertices(int vertices, size_t vertexStride) {
fVertexPool.putBack(vertices * vertexStride);
}
GrAppliedClip GrOpFlushState::detachAppliedClip() {
return fOpArgs->fAppliedClip ? std::move(*fOpArgs->fAppliedClip) : GrAppliedClip();
}

View File

@ -10,6 +10,7 @@
#include "GrAppliedClip.h"
#include "GrBufferAllocPool.h"
#include "GrDeferredUpload.h"
#include "SkArenaAlloc.h"
#include "ops/GrMeshDrawOp.h"
@ -18,74 +19,76 @@ class GrGpuCommandBuffer;
class GrGpuRTCommandBuffer;
class GrResourceProvider;
// TODO: Store uploads on GrOpFlushState rather than GrDrawOp and remove this.
class GrDrawOp::FlushStateAccess {
private:
friend class GrOpFlushState;
explicit FlushStateAccess(GrDrawOp* op) : fOp(op) {}
void addInlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token) {
fOp->fInlineUploads.emplace_back(std::move(upload), token);
}
GrDrawOp* fOp;
};
// TODO: Store draw related data on GrOpFlushState rather than GrMeshDrawOp and remove this.
class GrMeshDrawOp::FlushStateAccess {
private:
friend class GrOpFlushState;
using QueuedDraw = GrMeshDrawOp::QueuedDraw;
explicit FlushStateAccess(GrMeshDrawOp* op) : fOp(op) {}
void addMesh(const GrMesh& mesh) { fOp->fMeshes.push_back(mesh); }
QueuedDraw* lastDraw() {
return fOp->fQueuedDraws.empty() ? nullptr : &fOp->fQueuedDraws.back();
}
QueuedDraw* addDraw() { return &fOp->fQueuedDraws.push_back(); }
GrDeferredUploadToken lastUploadToken() const {
if (fOp->fInlineUploads.empty()) {
return GrDeferredUploadToken::AlreadyFlushedToken();
}
return fOp->fInlineUploads.back().fUploadBeforeToken;
}
void setBaseDrawToken(GrDeferredUploadToken token) { fOp->fBaseDrawToken = token; }
GrMeshDrawOp* fOp;
};
/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
class GrOpFlushState {
class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
public:
GrOpFlushState(GrGpu*, GrResourceProvider*);
~GrOpFlushState() { this->reset(); }
/** Inserts an upload to be executed after all ops in the flush prepared their draws but before
the draws are executed to the backend 3D API. */
void addASAPUpload(GrDeferredTextureUploadFn&& upload) {
fAsapUploads.emplace_back(std::move(upload));
}
const GrCaps& caps() const;
GrResourceProvider* resourceProvider() const { return fResourceProvider; }
/** Has the token been flushed to the backend 3D API. */
bool hasDrawBeenFlushed(GrDeferredUploadToken token) const {
return token.fSequenceNumber <= fLastFlushedToken.fSequenceNumber;
}
~GrOpFlushState() final { this->reset(); }
/** Issue a token to an operation that is being enqueued. */
GrDeferredUploadToken issueDrawToken() {
return GrDeferredUploadToken(++fLastIssuedToken.fSequenceNumber);
}
GrDeferredUploadToken issueDrawToken() { return ++fLastIssuedToken; }
/** Call every time a draw that was issued a token is flushed */
void flushToken() { ++fLastFlushedToken.fSequenceNumber; }
/** Gets the next draw token that will be issued. */
GrDeferredUploadToken nextDrawToken() const {
return GrDeferredUploadToken(fLastIssuedToken.fSequenceNumber + 1);
}
/** The last token flushed to all the way to the backend API. */
GrDeferredUploadToken nextTokenToFlush() const {
return GrDeferredUploadToken(fLastFlushedToken.fSequenceNumber + 1);
}
void* makeVertexSpace(size_t vertexSize, int vertexCount,
const GrBuffer** buffer, int* startVertex);
uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex);
void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
const GrBuffer** buffer, int* startVertex, int* actualVertexCount);
uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
const GrBuffer** buffer, int* startIndex,
int* actualIndexCount);
void flushToken() { ++fLastFlushedToken; }
/** This is called after each op has a chance to prepare its draws and before the draws are
issued. */
void preIssueDraws() {
fVertexPool.unmap();
fIndexPool.unmap();
int uploadCount = fAsapUploads.count();
int uploadCount = fASAPUploads.count();
for (int i = 0; i < uploadCount; i++) {
this->doUpload(fAsapUploads[i]);
this->doUpload(fASAPUploads[i]);
}
fAsapUploads.reset();
fASAPUploads.reset();
}
void doUpload(GrDeferredTextureUploadFn&);
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
void putBackVertexSpace(size_t sizeInBytes) { fVertexPool.putBack(sizeInBytes); }
GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
// Helper function used by Ops that are only called via RenderTargetOpLists
GrGpuRTCommandBuffer* rtCommandBuffer();
@ -99,184 +102,65 @@ public:
fPipelines.reset();
}
/** Additional data required on a per-op basis when executing GrDrawOps. */
struct DrawOpArgs {
/** Additional data required on a per-op basis when executing GrOps. */
struct OpArgs {
GrRenderTarget* renderTarget() const { return fProxy->priv().peekRenderTarget(); }
GrOp* fOp;
// TODO: do we still need the dst proxy here?
GrRenderTargetProxy* fProxy;
GrAppliedClip* fAppliedClip;
GrXferProcessor::DstProxy fDstProxy;
};
void setDrawOpArgs(DrawOpArgs* opArgs) { fOpArgs = opArgs; }
void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
const DrawOpArgs& drawOpArgs() const {
const OpArgs& drawOpArgs() const {
SkASSERT(fOpArgs);
SkASSERT(fOpArgs->fOp);
return *fOpArgs;
}
GrAppliedClip detachAppliedClip() {
SkASSERT(fOpArgs);
return fOpArgs->fAppliedClip ? std::move(*fOpArgs->fAppliedClip) : GrAppliedClip();
}
/** Overrides of GrDeferredUploadTarget. */
template <typename... Args>
GrPipeline* allocPipeline(Args&&... args) {
return fPipelines.make<GrPipeline>(std::forward<Args>(args)...);
}
GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
GrDeferredUploadToken nextDrawToken() const final { return fLastIssuedToken.next(); }
GrDeferredUploadToken nextTokenToFlush() const override { return fLastFlushedToken.next(); }
/** Overrides of GrMeshDrawOp::Target. */
void draw(const GrGeometryProcessor*, const GrPipeline*, const GrMesh&) final;
void* makeVertexSpace(size_t vertexSize, int vertexCount, const GrBuffer**,
int* startVertex) final;
uint16_t* makeIndexSpace(int indexCount, const GrBuffer**, int* startIndex) final;
void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
const GrBuffer**, int* startVertex, int* actualVertexCount) final;
uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, const GrBuffer**,
int* startIndex, int* actualIndexCount) final;
void putBackIndices(int indexCount) final;
void putBackVertices(int vertices, size_t vertexStride) final;
GrRenderTargetProxy* proxy() const final { return fOpArgs->fProxy; }
GrAppliedClip detachAppliedClip() final;
const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->fDstProxy; }
GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
const GrCaps& caps() const final;
GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
private:
/** GrMeshDrawOp::Target override. */
SkArenaAlloc* pipelineArena() override { return &fPipelines; }
GrGpu* fGpu;
GrResourceProvider* fResourceProvider;
GrGpuCommandBuffer* fCommandBuffer;
GrVertexBufferAllocPool fVertexPool;
GrIndexBufferAllocPool fIndexPool;
SkSTArray<4, GrDeferredTextureUploadFn> fAsapUploads;
SkSTArray<4, GrDeferredTextureUploadFn> fASAPUploads;
GrDeferredUploadToken fLastIssuedToken;
GrDeferredUploadToken fLastFlushedToken;
DrawOpArgs* fOpArgs;
OpArgs* fOpArgs;
SkArenaAlloc fPipelines{sizeof(GrPipeline) * 100};
};
/**
* A word about uploads and tokens: Ops should usually schedule their uploads to occur at the
* begining of a frame whenever possible. These are called ASAP uploads. Of course, this requires
* that there are no draws that have yet to be flushed that rely on the old texture contents. In
* that case the ASAP upload would happen prior to the previous draw causing the draw to read the
* new (wrong) texture data. In that case they should schedule an inline upload.
*
* Ops, in conjunction with helpers such as GrDrawOpAtlas, can use the token system to know
* what the most recent draw was that referenced a resource (or portion of a resource). Each draw
* is assigned a token. A resource (or portion) can be tagged with the most recent draw's
* token. The target provides a facility for testing whether the draw corresponding to the token
* has been flushed. If it has not been flushed then the op must perform an inline upload instead.
* When scheduling an inline upload the op provides the token of the draw that the upload must occur
* before. The upload will then occur between the draw that requires the new data but after the
* token that requires the old data.
*
* TODO: Currently the token/upload interface is spread over GrDrawOp, GrMeshDrawOp,
* GrDrawOp::Target, and GrMeshDrawOp::Target. However, the interface at the GrDrawOp level is not
* complete and isn't useful. We should push it down to GrMeshDrawOp until it is required at the
* GrDrawOp level.
*/
/**
* GrDrawOp instances use this object to allocate space for their geometry and to issue the draws
* that render their op.
*/
class GrDrawOp::Target {
public:
Target(GrOpFlushState* state, GrDrawOp* op) : fState(state), fOp(op) {}
/** Returns the token of the draw that this upload will occur before. */
GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&& upload) {
fOp->fInlineUploads.emplace_back(std::move(upload), fState->nextDrawToken());
return fOp->fInlineUploads.back().fUploadBeforeToken;
}
/** Returns the token of the draw that this upload will occur before. Since ASAP uploads
are done first during a flush, this will be the first token since the most recent
flush. */
GrDeferredUploadToken addAsapUpload(GrDeferredTextureUploadFn&& upload) {
fState->addASAPUpload(std::move(upload));
return fState->nextTokenToFlush();
}
bool hasDrawBeenFlushed(GrDeferredUploadToken token) const {
return fState->hasDrawBeenFlushed(token);
}
/** Gets the next draw token that will be issued by this target. This can be used by an op
to record that the next draw it issues will use a resource (e.g. texture) while preparing
that draw. */
GrDeferredUploadToken nextDrawToken() const { return fState->nextDrawToken(); }
const GrCaps& caps() const { return fState->caps(); }
GrResourceProvider* resourceProvider() const { return fState->resourceProvider(); }
protected:
GrDrawOp* op() { return fOp; }
GrOpFlushState* state() { return fState; }
const GrOpFlushState* state() const { return fState; }
private:
GrOpFlushState* fState;
GrDrawOp* fOp;
};
/** Extension of GrDrawOp::Target for use by GrMeshDrawOp. Adds the ability to create vertex
draws. */
class GrMeshDrawOp::Target : public GrDrawOp::Target {
public:
Target(GrOpFlushState* state, GrMeshDrawOp* op) : INHERITED(state, op) {}
void draw(const GrGeometryProcessor* gp, const GrPipeline* pipeline, const GrMesh& mesh);
void* makeVertexSpace(size_t vertexSize, int vertexCount,
const GrBuffer** buffer, int* startVertex) {
return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
}
void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
const GrBuffer** buffer, int* startVertex,
int* actualVertexCount) {
return this->state()->makeVertexSpaceAtLeast(vertexSize, minVertexCount,
fallbackVertexCount, buffer, startVertex,
actualVertexCount);
}
uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
const GrBuffer** buffer, int* startIndex,
int* actualIndexCount) {
return this->state()->makeIndexSpaceAtLeast(minIndexCount, fallbackIndexCount, buffer,
startIndex, actualIndexCount);
}
/** Helpers for ops which over-allocate and then return data to the pool. */
void putBackIndices(int indices) { this->state()->putBackIndices(indices); }
void putBackVertices(int vertices, size_t vertexStride) {
this->state()->putBackVertexSpace(vertices * vertexStride);
}
GrRenderTargetProxy* proxy() const { return this->state()->drawOpArgs().fProxy; }
const GrAppliedClip* clip() const { return this->state()->drawOpArgs().fAppliedClip; }
GrAppliedClip detachAppliedClip() { return this->state()->detachAppliedClip(); }
const GrXferProcessor::DstProxy& dstProxy() const {
return this->state()->drawOpArgs().fDstProxy;
}
template <typename... Args>
GrPipeline* allocPipeline(Args&&... args) {
return this->state()->allocPipeline(std::forward<Args>(args)...);
}
/**
* Helper that makes a pipeline targeting the op's render target that incorporates the op's
* GrAppliedClip.
* */
GrPipeline* makePipeline(uint32_t pipelineFlags, GrProcessorSet&& processorSet,
GrAppliedClip&& clip) {
GrPipeline::InitArgs pipelineArgs;
pipelineArgs.fFlags = pipelineFlags;
pipelineArgs.fProxy = this->proxy();
pipelineArgs.fDstProxy = this->dstProxy();
pipelineArgs.fCaps = &this->caps();
pipelineArgs.fResourceProvider = this->resourceProvider();
return this->allocPipeline(pipelineArgs, std::move(processorSet), std::move(clip));
}
private:
GrMeshDrawOp* meshDrawOp() { return static_cast<GrMeshDrawOp*>(this->op()); }
typedef GrDrawOp::Target INHERITED;
};
#endif

View File

@ -75,15 +75,16 @@ void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
TRACE_EVENT0("skia", fRecordedOps[i].fOp->name());
#endif
GrOpFlushState::DrawOpArgs opArgs = {
GrOpFlushState::OpArgs opArgs = {
fRecordedOps[i].fOp.get(),
fTarget.get()->asRenderTargetProxy(),
fRecordedOps[i].fAppliedClip,
fRecordedOps[i].fDstProxy
};
flushState->setDrawOpArgs(&opArgs);
flushState->setOpArgs(&opArgs);
fRecordedOps[i].fOp->prepare(flushState);
flushState->setDrawOpArgs(nullptr);
flushState->setOpArgs(nullptr);
}
}
@ -163,15 +164,16 @@ bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
TRACE_EVENT0("skia", fRecordedOps[i].fOp->name());
#endif
GrOpFlushState::DrawOpArgs opArgs {
GrOpFlushState::OpArgs opArgs {
fRecordedOps[i].fOp.get(),
fTarget.get()->asRenderTargetProxy(),
fRecordedOps[i].fAppliedClip,
fRecordedOps[i].fDstProxy
};
flushState->setDrawOpArgs(&opArgs);
flushState->setOpArgs(&opArgs);
fRecordedOps[i].fOp->execute(flushState);
flushState->setDrawOpArgs(nullptr);
flushState->setOpArgs(nullptr);
}
finish_command_buffer(commandBuffer.get());

View File

@ -52,8 +52,15 @@ void GrTextureOpList::onPrepare(GrOpFlushState* flushState) {
// Loop over the ops that haven't yet generated their geometry
for (int i = 0; i < fRecordedOps.count(); ++i) {
if (fRecordedOps[i]) {
// We do not call flushState->setDrawOpArgs as this op list does not support GrDrawOps.
GrOpFlushState::OpArgs opArgs = {
fRecordedOps[i].get(),
nullptr,
nullptr,
GrXferProcessor::DstProxy()
};
flushState->setOpArgs(&opArgs);
fRecordedOps[i]->prepare(flushState);
flushState->setOpArgs(nullptr);
}
}
}
@ -69,8 +76,15 @@ bool GrTextureOpList::onExecute(GrOpFlushState* flushState) {
flushState->setCommandBuffer(commandBuffer.get());
for (int i = 0; i < fRecordedOps.count(); ++i) {
// We do not call flushState->setDrawOpArgs as this op list does not support GrDrawOps.
GrOpFlushState::OpArgs opArgs = {
fRecordedOps[i].get(),
nullptr,
nullptr,
GrXferProcessor::DstProxy()
};
flushState->setOpArgs(&opArgs);
fRecordedOps[i]->execute(flushState);
flushState->setOpArgs(nullptr);
}
commandBuffer->submit();

View File

@ -235,9 +235,9 @@ void GrAtlasTextOp::onPrepareDraws(Target* target) {
size_t byteCount;
void* blobVertices;
int subRunGlyphCount;
blob->regenInOp(target, fFontCache, &helper, args.fRun, args.fSubRun, &glyphCache,
vertexStride, args.fViewMatrix, args.fX, args.fY, args.fColor,
&blobVertices, &byteCount, &subRunGlyphCount);
blob->regenInOp(target->deferredUploadTarget(), fFontCache, &helper, args.fRun,
args.fSubRun, &glyphCache, vertexStride, args.fViewMatrix, args.fX, args.fY,
args.fColor, &blobVertices, &byteCount, &subRunGlyphCount);
// now copy all vertices
if (args.fClipRect.isEmpty()) {

View File

@ -16,11 +16,12 @@
class GrAppliedClip;
/**
* Base class for GrOps that draw. These ops have a GrPipeline installed by GrOpList.
* Base class for GrOps that draw. These ops can draw into an op list's GrRenderTarget.
*/
class GrDrawOp : public GrOp {
public:
class Target;
/** Provides GrOpFlushState with privileged access to GrDrawOp. */
class FlushStateAccess;
GrDrawOp(uint32_t classID) : INHERITED(classID) {}

View File

@ -13,10 +13,7 @@
GrMeshDrawOp::GrMeshDrawOp(uint32_t classID)
: INHERITED(classID), fBaseDrawToken(GrDeferredUploadToken::AlreadyFlushedToken()) {}
void GrMeshDrawOp::onPrepare(GrOpFlushState* state) {
Target target(state, this);
this->onPrepareDraws(&target);
}
void GrMeshDrawOp::onPrepare(GrOpFlushState* state) { this->onPrepareDraws(state); }
void* GrMeshDrawOp::PatternHelper::init(Target* target, size_t vertexStride,
const GrBuffer* indexBuffer, int verticesPerRepetition,
@ -85,30 +82,3 @@ void GrMeshDrawOp::onExecute(GrOpFlushState* state) {
fQueuedDraws.reset();
fInlineUploads.reset();
}
//////////////////////////////////////////////////////////////////////////////
void GrMeshDrawOp::Target::draw(const GrGeometryProcessor* gp, const GrPipeline* pipeline,
const GrMesh& mesh) {
GrMeshDrawOp* op = this->meshDrawOp();
op->fMeshes.push_back(mesh);
if (!op->fQueuedDraws.empty()) {
// If the last draw shares a geometry processor and pipeline and there are no intervening
// uploads, add this mesh to it.
GrMeshDrawOp::QueuedDraw& lastDraw = op->fQueuedDraws.back();
if (lastDraw.fGeometryProcessor == gp && lastDraw.fPipeline == pipeline &&
(op->fInlineUploads.empty() ||
op->fInlineUploads.back().fUploadBeforeToken != this->nextDrawToken())) {
++lastDraw.fMeshCnt;
return;
}
}
GrMeshDrawOp::QueuedDraw& draw = op->fQueuedDraws.push_back();
GrDeferredUploadToken token = this->state()->issueDrawToken();
draw.fGeometryProcessor.reset(gp);
draw.fPipeline = pipeline;
draw.fMeshCnt = 1;
if (op->fQueuedDraws.count() == 1) {
op->fBaseDrawToken = token;
}
}

View File

@ -23,7 +23,10 @@ class GrOpFlushState;
*/
class GrMeshDrawOp : public GrDrawOp {
public:
/** Abstract interface that represents a destination for a GrMeshDrawOp. */
class Target;
/** Provides GrOpFlushState with privileged access to GrMeshDrawOp. */
class FlushStateAccess;
protected:
GrMeshDrawOp(uint32_t classID);
@ -90,4 +93,93 @@ private:
typedef GrDrawOp INHERITED;
};
class GrMeshDrawOp::Target {
public:
virtual ~Target() {}
/** Adds a draw of a mesh. */
virtual void draw(const GrGeometryProcessor*, const GrPipeline*, const GrMesh&) = 0;
/**
* Makes space for vertex data. The returned pointer is the location where vertex data
* should be written. On return the buffer that will hold the data as well as an offset into
* the buffer (in 'vertexSize' units) where the data will be placed.
*/
virtual void* makeVertexSpace(size_t vertexSize, int vertexCount, const GrBuffer**,
int* startVertex) = 0;
/**
* Makes space for index data. The returned pointer is the location where index data
* should be written. On return the buffer that will hold the data as well as an offset into
* the buffer (in uint16_t units) where the data will be placed.
*/
virtual uint16_t* makeIndexSpace(int indexCount, const GrBuffer**, int* startIndex) = 0;
/**
* This is similar to makeVertexSpace. It allows the caller to use up to 'actualVertexCount'
* vertices in the returned pointer, which may exceed 'minVertexCount'.
* 'fallbackVertexCount' is the maximum number of vertices that should be allocated if a new
* buffer is allocated on behalf of this request.
*/
virtual void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
int fallbackVertexCount, const GrBuffer**,
int* startVertex, int* actualVertexCount) = 0;
/**
* This is similar to makeIndexSpace. It allows the caller to use up to 'actualIndexCount'
* indices in the returned pointer, which may exceed 'minIndexCount'.
* 'fallbackIndexCount' is the maximum number of indices that should be allocated if a new
* buffer is allocated on behalf of this request.
*/
virtual uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
const GrBuffer**, int* startIndex,
int* actualIndexCount) = 0;
/** Helpers for ops which over-allocate and then return excess data to the pool. */
virtual void putBackIndices(int indices) = 0;
virtual void putBackVertices(int vertices, size_t vertexStride) = 0;
/**
* Allocate space for a pipeline. The target ensures this pipeline lifetime is at least
* as long as any deferred execution of draws added via draw().
* @tparam Args
* @param args
* @return
*/
template <typename... Args>
GrPipeline* allocPipeline(Args&&... args) {
return this->pipelineArena()->make<GrPipeline>(std::forward<Args>(args)...);
}
/**
* Helper that makes a pipeline targeting the op's render target that incorporates the op's
* GrAppliedClip.
* */
GrPipeline* makePipeline(uint32_t pipelineFlags, GrProcessorSet&& processorSet,
GrAppliedClip&& clip) {
GrPipeline::InitArgs pipelineArgs;
pipelineArgs.fFlags = pipelineFlags;
pipelineArgs.fProxy = this->proxy();
pipelineArgs.fDstProxy = this->dstProxy();
pipelineArgs.fCaps = &this->caps();
pipelineArgs.fResourceProvider = this->resourceProvider();
return this->allocPipeline(pipelineArgs, std::move(processorSet), std::move(clip));
}
virtual GrRenderTargetProxy* proxy() const = 0;
virtual GrAppliedClip detachAppliedClip() = 0;
virtual const GrXferProcessor::DstProxy& dstProxy() const = 0;
virtual GrResourceProvider* resourceProvider() const = 0;
virtual const GrCaps& caps() const = 0;
virtual GrDeferredUploadTarget* deferredUploadTarget() = 0;
private:
virtual SkArenaAlloc* pipelineArena() = 0;
};
#endif

View File

@ -383,10 +383,9 @@ private:
}
}
atlas->setLastUseToken(shapeData->fID, target->nextDrawToken());
atlas->setLastUseToken(shapeData->fID, target->deferredUploadTarget()->nextDrawToken());
this->writePathVertices(target,
atlas,
this->writePathVertices(atlas,
offset,
args.fColor,
vertexStride,
@ -489,9 +488,11 @@ private:
// add to atlas
SkIPoint16 atlasLocation;
GrDrawOpAtlas::AtlasID id;
if (!atlas->addToAtlas(&id, target, width, height, dfStorage.get(), &atlasLocation)) {
auto uploadTarget = target->deferredUploadTarget();
if (!atlas->addToAtlas(&id, uploadTarget, width, height, dfStorage.get(), &atlasLocation)) {
this->flush(target, flushInfo);
if (!atlas->addToAtlas(&id, target, width, height, dfStorage.get(), &atlasLocation)) {
if (!atlas->addToAtlas(&id, uploadTarget, width, height, dfStorage.get(),
&atlasLocation)) {
return false;
}
}
@ -589,10 +590,11 @@ private:
// add to atlas
SkIPoint16 atlasLocation;
GrDrawOpAtlas::AtlasID id;
if (!atlas->addToAtlas(&id, target, dst.width(), dst.height(), dst.addr(),
auto uploadTarget = target->deferredUploadTarget();
if (!atlas->addToAtlas(&id, uploadTarget, dst.width(), dst.height(), dst.addr(),
&atlasLocation)) {
this->flush(target, flushInfo);
if (!atlas->addToAtlas(&id, target, dst.width(), dst.height(), dst.addr(),
if (!atlas->addToAtlas(&id, uploadTarget, dst.width(), dst.height(), dst.addr(),
&atlasLocation)) {
return false;
}
@ -622,8 +624,7 @@ private:
return true;
}
void writePathVertices(GrDrawOp::Target* target,
GrDrawOpAtlas* atlas,
void writePathVertices(GrDrawOpAtlas* atlas,
intptr_t offset,
GrColor color,
size_t vertexStride,

View File

@ -443,7 +443,7 @@ void GrAtlasTextStrike::removeID(GrDrawOpAtlas::AtlasID id) {
}
}
bool GrAtlasTextStrike::addGlyphToAtlas(GrDrawOp::Target* target,
bool GrAtlasTextStrike::addGlyphToAtlas(GrDeferredUploadTarget* target,
GrGlyph* glyph,
SkGlyphCache* cache,
GrMaskFormat expectedMaskFormat) {

View File

@ -65,7 +65,7 @@ public:
// happen.
// TODO we can handle some of these cases if we really want to, but the long term solution is to
// get the actual glyph image itself when we get the glyph metrics.
bool addGlyphToAtlas(GrDrawOp::Target*, GrGlyph*, SkGlyphCache*,
bool addGlyphToAtlas(GrDeferredUploadTarget*, GrGlyph*, SkGlyphCache*,
GrMaskFormat expectedMaskFormat);
// testing
@ -168,9 +168,9 @@ public:
}
// add to texture atlas that matches this format
bool addToAtlas(GrAtlasTextStrike* strike, GrDrawOpAtlas::AtlasID* id, GrDrawOp::Target* target,
GrMaskFormat format, int width, int height, const void* image,
SkIPoint16* loc) {
bool addToAtlas(GrAtlasTextStrike* strike, GrDrawOpAtlas::AtlasID* id,
GrDeferredUploadTarget* target, GrMaskFormat format, int width, int height,
const void* image, SkIPoint16* loc) {
fPreserveStrike = strike;
return this->getAtlas(format)->addToAtlas(id, target, width, height, image, loc);
}

View File

@ -255,10 +255,10 @@ public:
* SkAutoGlyphCache is passed to multiple calls of regenInOp then it can save the cost of
* multiple detach/attach operations of SkGlyphCache.
*/
void regenInOp(GrDrawOp::Target* target, GrAtlasGlyphCache* fontCache,
GrBlobRegenHelper* helper, int run, int subRun, SkAutoGlyphCache*,
size_t vertexStride, const SkMatrix& viewMatrix, SkScalar x, SkScalar y,
GrColor color, void** vertices, size_t* byteCount, int* glyphCount);
void regenInOp(GrDeferredUploadTarget*, GrAtlasGlyphCache* fontCache, GrBlobRegenHelper* helper,
int run, int subRun, SkAutoGlyphCache*, size_t vertexStride,
const SkMatrix& viewMatrix, SkScalar x, SkScalar y, GrColor color,
void** vertices, size_t* byteCount, int* glyphCount);
const Key& key() const { return fKey; }
@ -493,7 +493,7 @@ private:
}; // Run
template <bool regenPos, bool regenCol, bool regenTexCoords, bool regenGlyphs>
void regenInOp(GrDrawOp::Target* target, GrAtlasGlyphCache* fontCache, GrBlobRegenHelper* helper,
void regenInOp(GrDeferredUploadTarget*, GrAtlasGlyphCache* fontCache, GrBlobRegenHelper* helper,
Run* run, Run::SubRunInfo* info, SkAutoGlyphCache*, int glyphCount,
size_t vertexStride, GrColor color, SkScalar transX, SkScalar transY) const;

View File

@ -136,7 +136,7 @@ inline void regen_vertices(intptr_t vertex, const GrGlyph* glyph, size_t vertexS
}
template <bool regenPos, bool regenCol, bool regenTexCoords, bool regenGlyphs>
void GrAtlasTextBlob::regenInOp(GrDrawOp::Target* target, GrAtlasGlyphCache* fontCache,
void GrAtlasTextBlob::regenInOp(GrDeferredUploadTarget* target, GrAtlasGlyphCache* fontCache,
GrBlobRegenHelper* helper, Run* run, Run::SubRunInfo* info,
SkAutoGlyphCache* lazyCache, int glyphCount, size_t vertexStride,
GrColor color, SkScalar transX, SkScalar transY) const {
@ -236,12 +236,10 @@ enum RegenMask {
#define REGEN_ARGS target, fontCache, helper, &run, &info, lazyCache, \
*glyphCount, vertexStride, color, transX, transY
void GrAtlasTextBlob::regenInOp(GrDrawOp::Target* target,
GrAtlasGlyphCache* fontCache,
GrBlobRegenHelper* helper,
int runIndex, int subRunIndex, SkAutoGlyphCache* lazyCache,
size_t vertexStride, const SkMatrix& viewMatrix,
SkScalar x, SkScalar y, GrColor color,
void GrAtlasTextBlob::regenInOp(GrDeferredUploadTarget* target, GrAtlasGlyphCache* fontCache,
GrBlobRegenHelper* helper, int runIndex, int subRunIndex,
SkAutoGlyphCache* lazyCache, size_t vertexStride,
const SkMatrix& viewMatrix, SkScalar x, SkScalar y, GrColor color,
void** vertices, size_t* byteCount, int* glyphCount) {
Run& run = fRuns[runIndex];
Run::SubRunInfo& info = run.fSubRunInfo[subRunIndex];