Shrink GrDrawOpAtlases when no longer needed, take 2.

Bug: skia:3550
Change-Id: Id483a76b9edcf29f7ea0aad0dd8946a3655ba8f2
Reviewed-on: https://skia-review.googlesource.com/50600
Commit-Queue: Jim Van Verth <jvanverth@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Jim Van Verth 2017-09-26 12:45:29 -04:00 committed by Skia Commit-Bot
parent 8d17769700
commit 106b5c4917
12 changed files with 202 additions and 18 deletions

View File

@ -45,6 +45,11 @@ public:
, fTail(nullptr) {
}
void reset() {
fHead = nullptr;
fTail = nullptr;
}
void remove(T* entry) {
SkASSERT(fHead && fTail);
SkASSERT(this->isInList(entry));

View File

@ -205,6 +205,7 @@ bool GrContext::init(const GrContextOptions& options) {
fDrawingManager.reset(new GrDrawingManager(this, prcOptions, &fSingleOwner));
fAtlasGlyphCache = new GrAtlasGlyphCache(this, options.fGlyphCacheTextureMaximumBytes);
this->contextPriv().addOnFlushCallbackObject(fAtlasGlyphCache);
fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this));

View File

@ -36,6 +36,7 @@ GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX
int width, int height, GrPixelConfig config)
: fLastUpload(GrDrawOpUploadToken::AlreadyFlushedToken())
, fLastUse(GrDrawOpUploadToken::AlreadyFlushedToken())
, fFlushesSinceLastUse(0)
, fPageIndex(pageIndex)
, fPlotIndex(plotIndex)
, fGenID(genID)
@ -147,6 +148,7 @@ GrDrawOpAtlas::GrDrawOpAtlas(GrContext* context, GrPixelConfig config, int width
, fTextureWidth(width)
, fTextureHeight(height)
, fAtlasGeneration(kInvalidAtlasGeneration + 1)
, fPrevFlushToken(GrDrawOpUploadToken::AlreadyFlushedToken())
, fNumPages(0) {
fPlotWidth = fTextureWidth / numPlotsX;
@ -311,6 +313,86 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
return true;
}
void GrDrawOpAtlas::compact(GrDrawOpUploadToken startTokenForNextFlush) {
// Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
//
// This value is somewhat arbitrary -- the idea is to keep it low enough that
// a page with unused plots will get removed reasonably quickly, but allow it
// to hang around for a bit in case it's needed. The assumption is that flushes
// are rare; i.e., we are not continually refreshing the frame.
static constexpr auto kRecentlyUsedCount = 8;
if (fNumPages <= 1) {
fPrevFlushToken = startTokenForNextFlush;
return;
}
// For all plots, update number of flushes since used, and check to see if there
// are any in the first pages that the last page can safely upload to.
PlotList::Iter plotIter;
int availablePlots = 0;
uint32_t lastPageIndex = fNumPages-1;
bool atlasUsedThisFlush = false;
for (uint32_t pageIndex = 0; pageIndex < fNumPages; ++pageIndex) {
plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
while (Plot* plot = plotIter.get()) {
// Update number of flushes since plot was last used
if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
plot->resetFlushesSinceLastUsed();
atlasUsedThisFlush = true;
} else {
plot->incFlushesSinceLastUsed();
}
// Count plots we can potentially upload to in all pages except the last one
// (the potential compactee).
if (pageIndex < lastPageIndex && plot->flushesSinceLastUsed() > kRecentlyUsedCount) {
++availablePlots;
}
plotIter.next();
}
}
// We only try to compact if the atlas was used in the recently completed flush.
// This is to handle the case where a lot of text rendering has occurred but then just a
// blinking cursor is drawn.
// TODO: consider if we should also do this if it's been a long time since the last atlas use
if (atlasUsedThisFlush) {
// Count recently used plots in the last page and evict them if there's available space
// in earlier pages. Since we prioritize uploading to the first pages, this will eventually
// clear out usage of this page unless we have a large need.
plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
int usedPlots = 0;
while (Plot* plot = plotIter.get()) {
// If this plot was used recently
if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) {
usedPlots++;
// see if there's room in an earlier page and if so evict.
// We need to be somewhat harsh here so that one plot that is consistently in use
// doesn't end up locking the page in memory.
if (availablePlots) {
this->processEviction(plot->id());
plot->resetRects();
--availablePlots;
}
} else {
// otherwise if aged out just evict it.
this->processEviction(plot->id());
plot->resetRects();
}
plotIter.next();
}
// If none of the plots in the last page have been used recently, delete it.
if (!usedPlots) {
this->deleteLastPage();
}
}
fPrevFlushToken = startTokenForNextFlush;
}
bool GrDrawOpAtlas::createNewPage() {
if (fNumPages == kMaxPages) {
return false;
@ -363,3 +445,13 @@ bool GrDrawOpAtlas::createNewPage() {
fNumPages++;
return true;
}
inline void GrDrawOpAtlas::deleteLastPage() {
uint32_t lastPageIndex = fNumPages - 1;
// clean out the plots
fPages[lastPageIndex].fPlotList.reset();
fPages[lastPageIndex].fPlotArray.reset(nullptr);
// remove ref to texture proxy
fProxies[lastPageIndex].reset(nullptr);
--fNumPages;
}

View File

@ -26,14 +26,29 @@ struct GrDrawOpAtlasConfig {
};
/**
* This class manages an atlas texture on behalf of GrDrawOps. The draw ops that use the atlas
* perform texture uploads when preparing their draws during flush. The class provides facilities
* for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in "asap" mode
* until it is impossible to add data without overwriting texels read by draws that have not yet
* executed on the gpu. At that point the uploads are performed "inline" between draws. If a single
* draw would use enough subimage space to overflow the atlas texture then the atlas will fail to
* add a subimage. This gives the op the chance to end the draw and begin a new one. Additional
* uploads will then succeed in inline mode.
* This class manages one or more atlas textures on behalf of GrDrawOps. The draw ops that use the
* atlas perform texture uploads when preparing their draws during flush. The class provides
* facilities for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in
* "asap" mode until it is impossible to add data without overwriting texels read by draws that
* have not yet executed on the gpu. At that point, the atlas will attempt to allocate a new
* atlas texture (or "page") of the same size, up to a maximum number of textures, and upload
* to that texture. If that's not possible, the uploads are performed "inline" between draws. If a
* single draw would use enough subimage space to overflow the atlas texture then the atlas will
* fail to add a subimage. This gives the op the chance to end the draw and begin a new one.
* Additional uploads will then succeed in inline mode.
*
* When the atlas has multiple pages, new uploads are prioritized to the lower index pages, i.e.,
* it will try to upload to page 0 before page 1 or 2. To keep the atlas from continually using
* excess space, periodic garbage collection is needed to shift data from the higher index pages to
* the lower ones, and then eventually remove any pages that are no longer in use. "In use" is
* determined by using the GrDrawUploadToken system: After a flush each subarea of the page
* is checked to see whether it was used in that flush; if it is not, a counter is incremented.
* Once that counter reaches a threshold that subarea is considered to be no longer in use.
*
* Garbage collection is initiated by the GrDrawOpAtlas's client via the compact() method. One
* solution is to make the client a subclass of GrOnFlushCallbackObject, register it with the
* GrContext via addOnFlushCallbackObject(), and the client's postFlush() method calls compact()
* and passes in the given GrDrawUploadToken.
*/
class GrDrawOpAtlas {
public:
@ -186,6 +201,8 @@ public:
}
}
void compact(GrDrawOpUploadToken startTokenForNextFlush);
static constexpr auto kGlyphMaxDim = 256;
static bool GlyphTooLargeForAtlas(int width, int height) {
return width > kGlyphMaxDim || height > kGlyphMaxDim;
@ -240,6 +257,10 @@ private:
void uploadToTexture(GrDrawOp::WritePixelsFn&, GrTextureProxy*);
void resetRects();
int flushesSinceLastUsed() { return fFlushesSinceLastUse; }
void resetFlushesSinceLastUsed() { fFlushesSinceLastUse = 0; }
void incFlushesSinceLastUsed() { fFlushesSinceLastUse++; }
private:
Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY, int width, int height,
GrPixelConfig config);
@ -265,6 +286,8 @@ private:
GrDrawOpUploadToken fLastUpload;
GrDrawOpUploadToken fLastUse;
// the number of flushes since this plot has been last used
int fFlushesSinceLastUse;
struct {
const uint32_t fPageIndex : 16;
@ -310,10 +333,12 @@ private:
fPages[pageIdx].fPlotList.remove(plot);
fPages[pageIdx].fPlotList.addToHead(plot);
// TODO: make page MRU
// No MRU update for pages -- since we will always try to add from
// the front and remove from the back there is no need for MRU.
}
bool createNewPage();
void deleteLastPage();
inline void processEviction(AtlasID);
@ -326,6 +351,8 @@ private:
SkDEBUGCODE(uint32_t fNumPlots;)
uint64_t fAtlasGeneration;
// nextTokenToFlush() value at the end of the previous flush
GrDrawOpUploadToken fPrevFlushToken;
struct EvictionData {
EvictionFunc fFunc;

View File

@ -44,6 +44,8 @@ void GrDrawingManager::cleanup() {
delete fPathRendererChain;
fPathRendererChain = nullptr;
SkSafeSetNull(fSoftwarePathRenderer);
fOnFlushCBObjects.reset();
}
GrDrawingManager::~GrDrawingManager() {
@ -59,6 +61,13 @@ void GrDrawingManager::abandon() {
}
void GrDrawingManager::freeGpuResources() {
for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
// it's safe to just do this because we're iterating in reverse
fOnFlushCBObjects.removeShuffle(i);
}
}
// a path renderer may be holding onto resources
delete fPathRendererChain;
fPathRendererChain = nullptr;
@ -66,6 +75,7 @@ void GrDrawingManager::freeGpuResources() {
for (int i = 0; i < fOpLists.count(); ++i) {
fOpLists[i]->freeGpuResources();
}
}
void GrDrawingManager::reset() {
@ -214,7 +224,7 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
fContext->getResourceCache()->notifyFlushOccurred(type);
}
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
onFlushCBObject->postFlush();
onFlushCBObject->postFlush(fFlushState.nextTokenToFlush());
}
fFlushing = false;

View File

@ -9,6 +9,7 @@
#define GrOnFlushResourceProvider_DEFINED
#include "GrTypes.h"
#include "GrOpFlushState.h"
#include "GrResourceProvider.h"
#include "SkRefCnt.h"
#include "SkTArray.h"
@ -43,9 +44,17 @@ public:
/**
* Called once flushing is complete and all ops indicated by preFlush have been executed and
* released.
* released. startTokenForNextFlush can be used to track resources used in the current flush.
*/
virtual void postFlush() {}
virtual void postFlush(GrDrawOpUploadToken startTokenForNextFlush) {}
/**
* Tells the callback owner to hold onto this object when freeing GPU resources
*
* In particular, GrDrawingManager::freeGPUResources() deletes all the path renderers.
* Any OnFlushCallbackObject associated with a path renderer will need to be deleted.
*/
virtual bool retainOnFreeGpuResources() { return false; }
private:
typedef SkRefCnt INHERITED;

View File

@ -63,7 +63,9 @@ GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& opti
fChain.push_back(sk_make_sp<GrAALinearizingConvexPathRenderer>());
}
if (options.fGpuPathRenderers & GpuPathRenderers::kSmall) {
fChain.push_back(sk_make_sp<GrSmallPathRenderer>());
auto spr = sk_make_sp<GrSmallPathRenderer>();
context->contextPriv().addOnFlushCallbackObject(spr.get());
fChain.push_back(std::move(spr));
}
if (options.fGpuPathRenderers & GpuPathRenderers::kTessellating) {
fChain.push_back(sk_make_sp<GrTessellatingPathRenderer>());

View File

@ -387,7 +387,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount - fDebugSkippedInstances);
}
void GrCoverageCountingPathRenderer::postFlush() {
void GrCoverageCountingPathRenderer::postFlush(GrDrawOpUploadToken) {
SkASSERT(fFlushing);
fPerFlushAtlases.reset();
fPerFlushInstanceBuffer.reset();

View File

@ -44,7 +44,7 @@ public:
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
SkTArray<sk_sp<GrRenderTargetContext>>* results) override;
void postFlush() override;
void postFlush(GrDrawOpUploadToken) override;
// This is the Op that ultimately draws a path into its final destination, using the atlas we
// generate at flush time.

View File

@ -33,6 +33,10 @@ public:
return fSequenceNumber == that.fSequenceNumber;
}
bool operator!=(const GrDrawOpUploadToken& that) const { return !(*this == that); }
bool inInterval(const GrDrawOpUploadToken& start, const GrDrawOpUploadToken& finish) {
return fSequenceNumber >= start.fSequenceNumber &&
fSequenceNumber <= finish.fSequenceNumber;
}
private:
GrDrawOpUploadToken();

View File

@ -9,6 +9,7 @@
#define GrSmallPathRenderer_DEFINED
#include "GrDrawOpAtlas.h"
#include "GrOnFlushResourceProvider.h"
#include "GrPathRenderer.h"
#include "GrRect.h"
#include "GrShape.h"
@ -18,7 +19,7 @@
class GrContext;
class GrSmallPathRenderer : public GrPathRenderer {
class GrSmallPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
public:
GrSmallPathRenderer();
~GrSmallPathRenderer() override;
@ -26,6 +27,21 @@ public:
class SmallPathOp;
struct PathTestStruct;
// GrOnFlushCallbackObject overrides
//
// Note: because this class is associated with a path renderer we want it to be removed from
// the list of active OnFlushBackkbackObjects in an freeGpuResources call (i.e., we accept the
// default retainOnFreeGpuResources implementation).
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
if (fAtlas) {
fAtlas->compact(startTokenForNextFlush);
}
}
private:
StencilSupport onGetStencilSupport(const GrShape&) const override {
return GrPathRenderer::kNoSupport_StencilSupport;

View File

@ -11,6 +11,7 @@
#include "GrCaps.h"
#include "GrDrawOpAtlas.h"
#include "GrGlyph.h"
#include "GrOnFlushResourceProvider.h"
#include "SkArenaAlloc.h"
#include "SkGlyphCache.h"
#include "SkTDynamicHash.h"
@ -108,10 +109,10 @@ private:
* though this is more or less transparent to the client(aside from atlasGeneration, described
* below).
*/
class GrAtlasGlyphCache {
class GrAtlasGlyphCache : public GrOnFlushCallbackObject {
public:
GrAtlasGlyphCache(GrContext*, float maxTextureBytes);
~GrAtlasGlyphCache();
~GrAtlasGlyphCache() override;
// The user of the cache may hold a long-lived ref to the returned strike. However, actions by
// another client of the cache may cause the strike to be purged while it is still reffed.
// Therefore, the caller must check GrAtlasTextStrike::isAbandoned() if there are other
@ -181,6 +182,23 @@ public:
return this->getAtlas(format)->atlasGeneration();
}
// GrOnFlushCallbackObject overrides
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
for (int i = 0; i < kMaskFormatCount; ++i) {
if (fAtlases[i]) {
fAtlases[i]->compact(startTokenForNextFlush);
}
}
}
// The AtlasGlyph cache always survives freeGpuResources so we want it to remain in the active
// OnFlushCallbackObject list
bool retainOnFreeGpuResources() override { return true; }
///////////////////////////////////////////////////////////////////////////
// Functions intended debug only
#ifdef SK_DEBUG