Move vulkan memory uma stats to report on submit.

The current model is biased towards users who make lots of allocations
so the overall histogram gets more samples for high allocation/memory
use. By switching this to reporting at submit time, it should make
the reports much more even across all users.

Change-Id: I269df9ea5e54439f0cca5e7637b0f39d1eaf903a
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/336957
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
This commit is contained in:
Greg Daniel 2020-11-20 10:50:50 -05:00 committed by Skia Commit-Bot
parent df29db4c41
commit f77b6e6a68
5 changed files with 25 additions and 18 deletions

View File

@ -661,6 +661,12 @@ bool GrGpu::submitToGpu(bool syncCpu) {
this->callSubmittedProcs(submitted);
this->reportSubmitHistograms();
return submitted;
}
void GrGpu::reportSubmitHistograms() {
#if SK_HISTOGRAMS_ENABLED
// The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
// values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
@ -672,7 +678,7 @@ bool GrGpu::submitToGpu(bool syncCpu) {
fCurrentSubmitRenderPassCount = 0;
#endif
return submitted;
this->onReportSubmitHistograms();
}
bool GrGpu::checkAndResetOOMed() {

View File

@ -875,6 +875,9 @@ private:
virtual bool onSubmitToGpu(bool syncCpu) = 0;
void reportSubmitHistograms();
virtual void onReportSubmitHistograms() {}
#ifdef SK_ENABLE_DUMP_GPU
virtual void onDumpJSON(SkJSONWriter*) const {}
#endif

View File

@ -2128,6 +2128,19 @@ bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
}
}
void GrVkGpu::onReportSubmitHistograms() {
#if SK_HISTOGRAMS_ENABLED
uint64_t allocatedMemory = fMemoryAllocator->totalAllocatedMemory();
uint64_t usedMemory = fMemoryAllocator->totalUsedMemory();
SkASSERT(usedMemory <= allocatedMemory);
SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
(usedMemory * 100) / allocatedMemory);
// allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
// supports samples up to around 500MB which should support the amounts of memory we allocate.
SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
#endif
}
static int get_surface_sample_cnt(GrSurface* surf) {
if (const GrRenderTarget* rt = surf->asRenderTarget()) {
return rt->numSamples();

View File

@ -299,6 +299,8 @@ private:
bool onSubmitToGpu(bool syncCpu) override;
void onReportSubmitHistograms() override;
// Ends and submits the current command buffer to the queue and then creates a new command
// buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
// work in the queue to finish before returning. If this GrVkGpu object has any semaphores in

View File

@ -14,19 +14,6 @@
using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
using BufferUsage = GrVkMemoryAllocator::BufferUsage;
static void report_memory_usage(GrVkMemoryAllocator* allocator) {
#if SK_HISTOGRAMS_ENABLED
uint64_t allocatedMemory = allocator->totalAllocatedMemory();
uint64_t usedMemory = allocator->totalUsedMemory();
SkASSERT(usedMemory <= allocatedMemory);
SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
(usedMemory * 100) / allocatedMemory);
// allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
// supports samples up to around 500MB which should support the amounts of memory we allocate.
SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
#endif
}
static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
switch (type) {
case GrVkBuffer::kVertex_Type: // fall through
@ -85,8 +72,6 @@ bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
return false;
}
report_memory_usage(allocator);
return true;
}
@ -139,8 +124,6 @@ bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
return false;
}
report_memory_usage(allocator);
return true;
}