Add option to report discarded allocations in sampling heap profiler
A couple of customers have asked about using devtools to get information about temporary allocations, with the goal of reducing GC time and/or peak memory usage. Currently, the sampling heap profiler reports only objects which are still alive at the end of the profiling session. In this change, I propose adding configuration options when starting the sampling heap profiler so that it can optionally include information about objects which were discarded by the GC before the end of the profiling session. A user could run the sampling heap profiler in several different modes depending on their goals: 1. To find memory leaks or determine which functions contribute most to steady-state memory consumption, the current default mode is best. 2. To find functions which cause large temporary memory spikes or large GC pauses, the user can request data about both live objects and those collected by major GC. 3. To tune for minimal GC activity in latency-sensitive applications like real-time audio processing, the user can request data about every allocation, including objects collected by major or minor GC. 4. I'm not sure why anybody would want data about objects collected by minor GC and not objects collected by major GC, but it's also a valid flags combination. Change-Id: If55d5965a1de04fed3ae640a02ca369723f64fdf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3868522 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Camillo Bruni <cbruni@chromium.org> Reviewed-by: Simon Zünd <szuend@chromium.org> Commit-Queue: Seth Brenith <seth.brenith@microsoft.com> Cr-Commit-Position: refs/heads/main@{#83202}
This commit is contained in:
parent
95216968f5
commit
3d59a3c2c1
@ -766,6 +766,22 @@ experimental domain HeapProfiler
|
||||
# Average sample interval in bytes. Poisson distribution is used for the intervals. The
|
||||
# default value is 32768 bytes.
|
||||
optional number samplingInterval
|
||||
# By default, the sampling heap profiler reports only objects which are
|
||||
# still alive when the profile is returned via getSamplingProfile or
|
||||
# stopSampling, which is useful for determining what functions contribute
|
||||
# the most to steady-state memory usage. This flag instructs the sampling
|
||||
# heap profiler to also include information about objects discarded by
|
||||
# major GC, which will show which functions cause large temporary memory
|
||||
# usage or long GC pauses.
|
||||
optional boolean includeObjectsCollectedByMajorGC
|
||||
# By default, the sampling heap profiler reports only objects which are
|
||||
# still alive when the profile is returned via getSamplingProfile or
|
||||
# stopSampling, which is useful for determining what functions contribute
|
||||
# the most to steady-state memory usage. This flag instructs the sampling
|
||||
# heap profiler to also include information about objects discarded by
|
||||
# minor GC, which is useful when tuning a latency-sensitive application
|
||||
# for minimal GC activity.
|
||||
optional boolean includeObjectsCollectedByMinorGC
|
||||
|
||||
command startTrackingHeapObjects
|
||||
parameters
|
||||
|
@ -903,6 +903,8 @@ class V8_EXPORT HeapProfiler {
|
||||
enum SamplingFlags {
|
||||
kSamplingNoFlags = 0,
|
||||
kSamplingForceGC = 1 << 0,
|
||||
kSamplingIncludeObjectsCollectedByMajorGC = 1 << 1,
|
||||
kSamplingIncludeObjectsCollectedByMinorGC = 1 << 2,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1097,10 +1099,8 @@ class V8_EXPORT HeapProfiler {
|
||||
* |stack_depth| parameter controls the maximum number of stack frames to be
|
||||
* captured on each allocation.
|
||||
*
|
||||
* NOTE: This is a proof-of-concept at this point. Right now we only sample
|
||||
* newspace allocations. Support for paged space allocation (e.g. pre-tenured
|
||||
* objects, large objects, code objects, etc.) and native allocations
|
||||
* doesn't exist yet, but is anticipated in the future.
|
||||
* NOTE: Support for native allocations doesn't exist yet, but is anticipated
|
||||
* in the future.
|
||||
*
|
||||
* Objects allocated before the sampling is started will not be included in
|
||||
* the profile.
|
||||
|
@ -1819,6 +1819,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
|
||||
|
||||
collector = SelectGarbageCollector(space, gc_reason, &collector_reason);
|
||||
|
||||
current_or_last_garbage_collector_ = collector;
|
||||
|
||||
if (collector == GarbageCollector::MARK_COMPACTOR &&
|
||||
incremental_marking()->IsMinorMarking()) {
|
||||
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kFinalizeMinorMC);
|
||||
|
@ -1455,6 +1455,10 @@ class Heap {
|
||||
|
||||
bool is_current_gc_forced() const { return is_current_gc_forced_; }
|
||||
|
||||
GarbageCollector current_or_last_garbage_collector() const {
|
||||
return current_or_last_garbage_collector_;
|
||||
}
|
||||
|
||||
// Returns whether the currently in-progress GC should avoid increasing the
|
||||
// ages on any objects that live for a set number of collections.
|
||||
bool ShouldCurrentGCKeepAgesUnchanged() const {
|
||||
@ -2389,6 +2393,8 @@ class Heap {
|
||||
|
||||
bool is_current_gc_forced_ = false;
|
||||
bool is_current_gc_for_heap_profiler_ = false;
|
||||
GarbageCollector current_or_last_garbage_collector_ =
|
||||
GarbageCollector::SCAVENGER;
|
||||
|
||||
ExternalStringTable external_string_table_;
|
||||
|
||||
|
@ -29,6 +29,7 @@ static const char allocationTrackingEnabled[] = "allocationTrackingEnabled";
|
||||
static const char samplingHeapProfilerEnabled[] = "samplingHeapProfilerEnabled";
|
||||
static const char samplingHeapProfilerInterval[] =
|
||||
"samplingHeapProfilerInterval";
|
||||
static const char samplingHeapProfilerFlags[] = "samplingHeapProfilerFlags";
|
||||
} // namespace HeapProfilerAgentState
|
||||
|
||||
class HeapSnapshotProgress final : public v8::ActivityControl {
|
||||
@ -208,7 +209,16 @@ void V8HeapProfilerAgentImpl::restore() {
|
||||
double samplingInterval = m_state->doubleProperty(
|
||||
HeapProfilerAgentState::samplingHeapProfilerInterval, -1);
|
||||
DCHECK_GE(samplingInterval, 0);
|
||||
startSampling(Maybe<double>(samplingInterval));
|
||||
int flags = m_state->integerProperty(
|
||||
HeapProfilerAgentState::samplingHeapProfilerFlags, 0);
|
||||
startSampling(
|
||||
Maybe<double>(samplingInterval),
|
||||
Maybe<bool>(
|
||||
flags &
|
||||
v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMajorGC),
|
||||
Maybe<bool>(
|
||||
flags &
|
||||
v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMinorGC));
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,7 +397,9 @@ void V8HeapProfilerAgentImpl::stopTrackingHeapObjectsInternal() {
|
||||
}
|
||||
|
||||
Response V8HeapProfilerAgentImpl::startSampling(
|
||||
Maybe<double> samplingInterval) {
|
||||
Maybe<double> samplingInterval,
|
||||
Maybe<bool> includeObjectsCollectedByMajorGC,
|
||||
Maybe<bool> includeObjectsCollectedByMinorGC) {
|
||||
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
|
||||
if (!profiler) return Response::ServerError("Cannot access v8 heap profiler");
|
||||
const unsigned defaultSamplingInterval = 1 << 15;
|
||||
@ -400,9 +412,17 @@ Response V8HeapProfilerAgentImpl::startSampling(
|
||||
samplingIntervalValue);
|
||||
m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
|
||||
true);
|
||||
int flags = v8::HeapProfiler::kSamplingForceGC;
|
||||
if (includeObjectsCollectedByMajorGC.fromMaybe(false)) {
|
||||
flags |= v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMajorGC;
|
||||
}
|
||||
if (includeObjectsCollectedByMinorGC.fromMaybe(false)) {
|
||||
flags |= v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMinorGC;
|
||||
}
|
||||
m_state->setInteger(HeapProfilerAgentState::samplingHeapProfilerFlags, flags);
|
||||
profiler->StartSamplingHeapProfiler(
|
||||
static_cast<uint64_t>(samplingIntervalValue), 128,
|
||||
v8::HeapProfiler::kSamplingForceGC);
|
||||
static_cast<v8::HeapProfiler::SamplingFlags>(flags));
|
||||
return Response::Success();
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,9 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
|
||||
Response getHeapObjectId(const String16& objectId,
|
||||
String16* heapSnapshotObjectId) override;
|
||||
|
||||
Response startSampling(Maybe<double> samplingInterval) override;
|
||||
Response startSampling(Maybe<double> samplingInterval,
|
||||
Maybe<bool> includeObjectsCollectedByMajorGC,
|
||||
Maybe<bool> includeObjectsCollectedByMinorGC) override;
|
||||
Response stopSampling(
|
||||
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
|
||||
Response getSamplingProfile(
|
||||
|
@ -95,6 +95,19 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
|
||||
void SamplingHeapProfiler::OnWeakCallback(
|
||||
const WeakCallbackInfo<Sample>& data) {
|
||||
Sample* sample = data.GetParameter();
|
||||
Heap* heap = reinterpret_cast<Isolate*>(data.GetIsolate())->heap();
|
||||
bool is_minor_gc =
|
||||
heap->current_or_last_garbage_collector() == GarbageCollector::SCAVENGER;
|
||||
bool should_keep_sample =
|
||||
is_minor_gc
|
||||
? (sample->profiler->flags_ &
|
||||
v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMinorGC)
|
||||
: (sample->profiler->flags_ &
|
||||
v8::HeapProfiler::kSamplingIncludeObjectsCollectedByMajorGC);
|
||||
if (should_keep_sample) {
|
||||
sample->global.Reset();
|
||||
return;
|
||||
}
|
||||
AllocationNode* node = sample->owner;
|
||||
DCHECK_GT(node->allocations_[sample->size], 0);
|
||||
node->allocations_[sample->size]--;
|
||||
|
@ -0,0 +1,6 @@
|
||||
Checks sampling heap profiler methods.
|
||||
Retained size is less than 10KB: true
|
||||
Including major GC increases size: true
|
||||
Minor GC collected more: true
|
||||
Total allocation is greater than 100KB: true
|
||||
Successfully finished
|
61
test/inspector/heap-profiler/sampling-heap-profiler-flags.js
Normal file
61
test/inspector/heap-profiler/sampling-heap-profiler-flags.js
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --sampling-heap-profiler-suppress-randomness
|
||||
|
||||
(async function() {
|
||||
let {contextGroup, Protocol} = InspectorTest.start('Checks sampling heap profiler methods.');
|
||||
|
||||
contextGroup.addScript(`
|
||||
function generateTrash() {
|
||||
var arr = new Array(100);
|
||||
for (var i = 0; i < 3000; ++i) {
|
||||
var s = {a:i, b: new Array(100).fill(42)};
|
||||
arr[i % 100] = s;
|
||||
}
|
||||
return arr[30];
|
||||
}
|
||||
//# sourceURL=test.js`);
|
||||
|
||||
Protocol.HeapProfiler.enable();
|
||||
|
||||
await Protocol.HeapProfiler.startSampling({
|
||||
samplingInterval: 1e4,
|
||||
includeObjectsCollectedByMajorGC: false,
|
||||
includeObjectsCollectedByMinorGC: false,
|
||||
});
|
||||
await Protocol.Runtime.evaluate({ expression: 'generateTrash()' });
|
||||
const profile1 = await Protocol.HeapProfiler.stopSampling();
|
||||
const size1 = nodeSize(profile1.result.profile.head);
|
||||
InspectorTest.log('Retained size is less than 10KB:', size1 < 10000);
|
||||
|
||||
await Protocol.HeapProfiler.startSampling({
|
||||
samplingInterval: 100,
|
||||
includeObjectsCollectedByMajorGC: true,
|
||||
includeObjectsCollectedByMinorGC: false,
|
||||
});
|
||||
await Protocol.Runtime.evaluate({ expression: 'generateTrash()' });
|
||||
const profile2 = await Protocol.HeapProfiler.stopSampling();
|
||||
const size2 = nodeSize(profile2.result.profile.head);
|
||||
InspectorTest.log('Including major GC increases size:', size1 < size2);
|
||||
|
||||
await Protocol.HeapProfiler.startSampling({
|
||||
samplingInterval: 100,
|
||||
includeObjectsCollectedByMajorGC: true,
|
||||
includeObjectsCollectedByMinorGC: true,
|
||||
});
|
||||
await Protocol.Runtime.evaluate({ expression: 'generateTrash()' });
|
||||
const profile3 = await Protocol.HeapProfiler.stopSampling();
|
||||
const size3 = nodeSize(profile3.result.profile.head);
|
||||
InspectorTest.log('Minor GC collected more:', size3 > size2);
|
||||
InspectorTest.log('Total allocation is greater than 100KB:', size3 > 100000);
|
||||
|
||||
InspectorTest.log('Successfully finished');
|
||||
InspectorTest.completeTest();
|
||||
|
||||
function nodeSize(node) {
|
||||
return node.children.reduce((res, child) => res + nodeSize(child),
|
||||
node.callFrame.functionName === 'generateTrash' ? node.selfSize : 0);
|
||||
}
|
||||
})();
|
Loading…
Reference in New Issue
Block a user