Add uma histogram tracking for the number of submit render passes.

This uma stat can allow us to look a few different things. First we can
look at the distribution of render passes per submit (including mean,
median, ect.). Additionally we can do things like sum the total number
of renderpasses from all submits and divide by the total number of
frames (Compositing.Display.DrawToSwapUs::TotalCount) to get the
average number of render passes per frame and not just submit.

Technically under the hood the ENUMERATION and LINEAR_EXACT macros will
do the same thing and we could just use ENUMERATION here. However to
match how chrome uses their UMA macros it is more correct to use the
LINEAR_EXACT since we aren't counting enum values. Chromes macros
actually have static asserts the values are or are not enums, but the
skia implementations do not.

Includes some minor updates to names to match chromes UMA macro values.

This still requires the chrome implementation of the new macro.

Bug: skia:10871
Change-Id: Idbc4d2fc649bbdefd0952a002c3327cb700b552b
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/329776
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Adlai Holler <adlai@google.com>
This commit is contained in:
Greg Daniel 2020-10-27 09:20:20 -04:00 committed by Skia Commit-Bot
parent 890363ae34
commit 65476e0f5b
16 changed files with 121 additions and 72 deletions

View File

@ -81,7 +81,8 @@
* Skia consumers can provide their own definitions of these macros to
* integrate with their histogram collection backend.
*/
//#define SK_HISTOGRAM_BOOLEAN(name, value)
//#define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
//#define SK_HISTOGRAM_BOOLEAN(name, sample)
//#define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size)
//#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
#endif

View File

@ -396,18 +396,24 @@
# define GR_TEST_UTILS 0
#endif
#if defined(SK_HISTOGRAM_ENUMERATION) && defined(SK_HISTOGRAM_BOOLEAN)
#if defined(SK_HISTOGRAM_ENUMERATION) || \
defined(SK_HISTOGRAM_BOOLEAN) || \
defined(SK_HISTOGRAM_EXACT_LINEAR)
# define SK_HISTOGRAMS_ENABLED 1
#else
# define SK_HISTOGRAMS_ENABLED 0
#endif
#ifndef SK_HISTOGRAM_BOOLEAN
# define SK_HISTOGRAM_BOOLEAN(name, value)
# define SK_HISTOGRAM_BOOLEAN(name, sample)
#endif
#ifndef SK_HISTOGRAM_ENUMERATION
# define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value)
# define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size)
#endif
#ifndef SK_HISTOGRAM_EXACT_LINEAR
#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
#endif
#ifndef SK_DISABLE_LEGACY_SHADERCONTEXT

View File

@ -631,6 +631,22 @@ void GrGpu::executeFlushInfo(GrSurfaceProxy* proxies[],
this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, numProxies, access, newState);
}
GrOpsRenderPass* GrGpu::getOpsRenderPass(
GrRenderTarget* renderTarget,
GrAttachment* stencil,
GrSurfaceOrigin origin,
const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) {
#if SK_HISTOGRAMS_ENABLED
fCurrentSubmitRenderPassCount++;
#endif
return this->onGetOpsRenderPass(renderTarget, stencil, origin, bounds, colorInfo, stencilInfo,
sampledProxies, renderPassXferBarriers);
}
bool GrGpu::submitToGpu(bool syncCpu) {
this->stats()->incNumSubmitToGpus();
@ -646,6 +662,17 @@ bool GrGpu::submitToGpu(bool syncCpu) {
this->callSubmittedProcs(submitted);
#if SK_HISTOGRAMS_ENABLED
// The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
// values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
// than the number of actual values
static constexpr int kMaxRenderPassBucketValue = 100;
SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucket),
kMaxRenderPassBucketValue);
fCurrentSubmitRenderPassCount = 0;
#endif
return submitted;
}

View File

@ -348,14 +348,14 @@ public:
// If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
// provided but 'renderTarget' has a stencil buffer then that is a signal that the
// render target's stencil buffer should be ignored.
virtual GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget,
GrAttachment* stencil,
GrSurfaceOrigin,
const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) = 0;
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget,
GrAttachment* stencil,
GrSurfaceOrigin,
const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers);
// Called by GrDrawingManager when flushing.
// Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
@ -857,6 +857,16 @@ private:
virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0;
virtual GrOpsRenderPass* onGetOpsRenderPass(
GrRenderTarget* renderTarget,
GrAttachment* stencil,
GrSurfaceOrigin,
const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) = 0;
virtual void prepareSurfacesForBackendAccessAndStateUpdates(
GrSurfaceProxy* proxies[],
int numProxies,
@ -901,6 +911,10 @@ private:
bool fOOMed = false;
#if SK_HISTOGRAMS_ENABLED
int fCurrentSubmitRenderPassCount = 0;
#endif
friend class GrPathRendering;
using INHERITED = SkRefCnt;
};

View File

@ -115,7 +115,7 @@ void GrD3DGpu::destroyResources() {
fResourceProvider.destroyResources();
}
GrOpsRenderPass* GrD3DGpu::getOpsRenderPass(
GrOpsRenderPass* GrD3DGpu::onGetOpsRenderPass(
GrRenderTarget* rt,
GrAttachment*,
GrSurfaceOrigin origin,

View File

@ -96,15 +96,6 @@ public:
return nullptr;
}
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
void addResourceBarriers(sk_sp<GrManagedResource> resource,
int numBarriers,
D3D12_RESOURCE_TRANSITION_BARRIER* barriers) const;
@ -213,6 +204,15 @@ private:
GrGpuFinishedContext finishedContext) override;
void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback);
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
void prepareSurfacesForBackendAccessAndStateUpdates(
GrSurfaceProxy* proxies[],
int numProxies,

View File

@ -150,7 +150,7 @@ void GrDawnGpu::disconnect(DisconnectType type) {
///////////////////////////////////////////////////////////////////////////////
GrOpsRenderPass* GrDawnGpu::getOpsRenderPass(
GrOpsRenderPass* GrDawnGpu::onGetOpsRenderPass(
GrRenderTarget* rt,
GrAttachment*,
GrSurfaceOrigin origin,

View File

@ -77,15 +77,6 @@ public:
return nullptr;
}
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
SkSL::Compiler* shaderCompiler() const {
return fCompiler.get();
}
@ -206,6 +197,15 @@ private:
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
bool onSubmitToGpu(bool syncCpu) override;
void uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount,

View File

@ -2153,7 +2153,7 @@ bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
dstColorType, buffer, rowPixelWidth);
}
GrOpsRenderPass* GrGLGpu::getOpsRenderPass(
GrOpsRenderPass* GrGLGpu::onGetOpsRenderPass(
GrRenderTarget* rt,
GrAttachment*,
GrSurfaceOrigin origin,

View File

@ -119,15 +119,6 @@ public:
void endCommandBuffer(GrRenderTarget*, const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore);
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
void invalidateBoundRenderTarget() {
fHWBoundRenderTargetUniqueID.makeInvalid();
}
@ -334,6 +325,15 @@ private:
void addFinishedProc(GrGpuFinishedProc finishedProc,
GrGpuFinishedContext finishedContext) override;
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
bool onSubmitToGpu(bool syncCpu) override;
bool waitSync(GrGLsync, uint64_t timeout, bool flush);

View File

@ -54,7 +54,7 @@ sk_sp<GrGpu> GrMockGpu::Make(const GrMockOptions* mockOptions,
return sk_sp<GrGpu>(new GrMockGpu(direct, *mockOptions, contextOptions));
}
GrOpsRenderPass* GrMockGpu::getOpsRenderPass(GrRenderTarget* rt,
GrOpsRenderPass* GrMockGpu::onGetOpsRenderPass(GrRenderTarget* rt,
GrAttachment*,
GrSurfaceOrigin origin,
const SkIRect& bounds,

View File

@ -24,15 +24,6 @@ public:
~GrMockGpu() override {}
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
GrFence SK_WARN_UNUSED_RESULT insertFence() override { return 0; }
bool waitFence(GrFence) override { return true; }
void deleteFence(GrFence) const override {}
@ -137,6 +128,15 @@ private:
finishedProc(finishedContext);
}
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
bool onSubmitToGpu(bool syncCpu) override {
return true;
}

View File

@ -83,14 +83,6 @@ public:
bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
GrOpsRenderPass* getOpsRenderPass(
GrRenderTarget*, GrAttachment*,
GrSurfaceOrigin, const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
void submit(GrOpsRenderPass* renderPass) override;
@ -215,6 +207,15 @@ private:
GrGpuFinishedContext finishedContext) override;
void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback);
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
bool onSubmitToGpu(bool syncCpu) override;
// Commits the current command buffer to the queue and then creates a new command buffer. If

View File

@ -182,7 +182,7 @@ void GrMtlGpu::destroyResources() {
fDevice = nil;
}
GrOpsRenderPass* GrMtlGpu::getOpsRenderPass(
GrOpsRenderPass* GrMtlGpu::onGetOpsRenderPass(
GrRenderTarget* renderTarget, GrAttachment*,
GrSurfaceOrigin origin, const SkIRect& bounds,
const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,

View File

@ -320,7 +320,7 @@ void GrVkGpu::disconnect(DisconnectType type) {
///////////////////////////////////////////////////////////////////////////////
GrOpsRenderPass* GrVkGpu::getOpsRenderPass(
GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
GrRenderTarget* rt,
GrAttachment* stencil,
GrSurfaceOrigin origin,

View File

@ -120,15 +120,6 @@ public:
int numSamples,
GrProtected isProtected) override;
GrOpsRenderPass* getOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
void addBufferMemoryBarrier(const GrManagedResource*,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
@ -292,6 +283,15 @@ private:
void addFinishedCallback(sk_sp<GrRefCntedCallback> finishedCallback);
GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
GrAttachment*,
GrSurfaceOrigin,
const SkIRect&,
const GrOpsRenderPass::LoadAndStoreInfo&,
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
GrXferBarrierFlags renderPassXferBarriers) override;
void prepareSurfacesForBackendAccessAndStateUpdates(
GrSurfaceProxy* proxies[],
int numProxies,