Revert "Reland "[heap] Add global memory controller""
This reverts commitdac86be251
. Reason for revert: Still failing msan: https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/26904 Original change's description: > Reland "[heap] Add global memory controller" > > Provide a global memory controller used to compute limits for combined > on-heap and embedder memory. The global controller uses the same > mechanism (gc speed, mutator speed) and growing factors as the regular > on-heap controller. > > Rely on V8's mechanisms for configured state that stops shrinking the > limit. > > This reverts commit5e043f2773
. > > Tbr: ulan@chromium.org > Bug: chromium:948807 > Change-Id: Id4f94e7dcb458d1d0d2f872194f8f3ea0959a73f > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1622968 > Reviewed-by: Michael Lippautz <mlippautz@chromium.org> > Commit-Queue: Michael Lippautz <mlippautz@chromium.org> > Cr-Commit-Position: refs/heads/master@{#61715} TBR=ulan@chromium.org,mlippautz@chromium.org Change-Id: If30649f158a08fd185f2771a13b8e09cf53fb667 No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: chromium:948807 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1622849 Reviewed-by: Clemens Hammacher <clemensh@chromium.org> Commit-Queue: Clemens Hammacher <clemensh@chromium.org> Cr-Commit-Position: refs/heads/master@{#61716}
This commit is contained in:
parent
dac86be251
commit
7a1f7e8861
33
include/v8.h
33
include/v8.h
@ -7138,24 +7138,6 @@ class V8_EXPORT EmbedderHeapTracer {
|
||||
virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Summary of a garbage collection cycle. See |TraceEpilogue| on how the
|
||||
* summary is reported.
|
||||
*/
|
||||
struct TraceSummary {
|
||||
/**
|
||||
* Time spent managing the retained memory in milliseconds. This can e.g.
|
||||
* include the time tracing through objects in the embedder.
|
||||
*/
|
||||
double time;
|
||||
|
||||
/**
|
||||
* Memory retained by the embedder through the |EmbedderHeapTracer|
|
||||
* mechanism in bytes.
|
||||
*/
|
||||
size_t allocated_size;
|
||||
};
|
||||
|
||||
virtual ~EmbedderHeapTracer() = default;
|
||||
|
||||
/**
|
||||
@ -7202,12 +7184,9 @@ class V8_EXPORT EmbedderHeapTracer {
|
||||
/**
|
||||
* Called at the end of a GC cycle.
|
||||
*
|
||||
* Note that allocation is *not* allowed within |TraceEpilogue|. Can be
|
||||
* overriden to fill a |TraceSummary| that is used by V8 to schedule future
|
||||
* garbage collections.
|
||||
* Note that allocation is *not* allowed within |TraceEpilogue|.
|
||||
*/
|
||||
virtual void TraceEpilogue() {}
|
||||
virtual void TraceEpilogue(TraceSummary* trace_summary) { TraceEpilogue(); }
|
||||
virtual void TraceEpilogue() = 0;
|
||||
|
||||
/**
|
||||
* Called upon entering the final marking pause. No more incremental marking
|
||||
@ -7244,14 +7223,6 @@ class V8_EXPORT EmbedderHeapTracer {
|
||||
*/
|
||||
void GarbageCollectionForTesting(EmbedderStackState stack_state);
|
||||
|
||||
/*
|
||||
* Called by the embedder to signal newly allocated memory. Not bound to
|
||||
* tracing phases. Embedders should trade off when increments are reported as
|
||||
* V8 may consult global heuristics on whether to trigger garbage collection
|
||||
* on this change.
|
||||
*/
|
||||
void IncreaseAllocatedSize(size_t bytes);
|
||||
|
||||
/*
|
||||
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
|
||||
* is not attached to any v8::Isolate.
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include "src/frames-inl.h"
|
||||
#include "src/global-handles.h"
|
||||
#include "src/globals.h"
|
||||
#include "src/heap/embedder-tracing.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/init/bootstrapper.h"
|
||||
#include "src/init/icu_util.h"
|
||||
@ -10147,17 +10146,6 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
|
||||
kGCCallbackFlagForced);
|
||||
}
|
||||
|
||||
void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
|
||||
if (isolate_) {
|
||||
i::LocalEmbedderHeapTracer* const tracer =
|
||||
reinterpret_cast<i::Isolate*>(isolate_)
|
||||
->heap()
|
||||
->local_embedder_heap_tracer();
|
||||
DCHECK_NOT_NULL(tracer);
|
||||
tracer->IncreaseAllocatedSize(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void EmbedderHeapTracer::RegisterEmbedderReference(
|
||||
const TracedGlobal<v8::Value>& ref) {
|
||||
if (ref.IsEmpty()) return;
|
||||
|
@ -747,8 +747,6 @@ DEFINE_BOOL(huge_max_old_generation_size, false,
|
||||
"Increase max size of the old space to 4 GB for x64 systems with"
|
||||
"the physical memory bigger than 16 GB")
|
||||
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
|
||||
DEFINE_BOOL(global_gc_scheduling, false,
|
||||
"enable GC scheduling based on global memory")
|
||||
DEFINE_BOOL(gc_global, false, "always perform global GCs")
|
||||
DEFINE_INT(random_gc_interval, 0,
|
||||
"Collect garbage after random(0, X) allocations. It overrides "
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "src/heap/embedder-tracing.h"
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/objects/embedder-data-slot.h"
|
||||
#include "src/objects/js-objects-inl.h"
|
||||
|
||||
@ -32,17 +31,7 @@ void LocalEmbedderHeapTracer::TracePrologue(
|
||||
void LocalEmbedderHeapTracer::TraceEpilogue() {
|
||||
if (!InUse()) return;
|
||||
|
||||
EmbedderHeapTracer::TraceSummary summary;
|
||||
remote_tracer_->TraceEpilogue(&summary);
|
||||
remote_stats_.allocated_size = summary.allocated_size;
|
||||
// Force a check next time increased memory is reported. This allows for
|
||||
// setting limits close to actual heap sizes.
|
||||
remote_stats_.allocated_size_limit_for_check = 0;
|
||||
constexpr double kMinReportingTimeMs = 0.5;
|
||||
if (summary.time > kMinReportingTimeMs) {
|
||||
isolate_->heap()->tracer()->RecordEmbedderSpeed(summary.allocated_size,
|
||||
summary.time);
|
||||
}
|
||||
remote_tracer_->TraceEpilogue();
|
||||
}
|
||||
|
||||
void LocalEmbedderHeapTracer::EnterFinalPause() {
|
||||
@ -111,12 +100,5 @@ void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting(
|
||||
FlushWrapperCacheIfFull();
|
||||
}
|
||||
|
||||
void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
|
||||
Heap* heap = isolate_->heap();
|
||||
heap->StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||
heap->GCFlagsForIncrementalMarking(),
|
||||
kGCCallbackScheduleIdleGarbageCollection);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -76,27 +76,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
|
||||
embedder_worklist_empty_ = is_empty;
|
||||
}
|
||||
|
||||
void IncreaseAllocatedSize(size_t bytes) {
|
||||
remote_stats_.allocated_size += bytes;
|
||||
remote_stats_.accumulated_allocated_size += bytes;
|
||||
if (remote_stats_.allocated_size >
|
||||
remote_stats_.allocated_size_limit_for_check) {
|
||||
StartIncrementalMarkingIfNeeded();
|
||||
remote_stats_.allocated_size_limit_for_check =
|
||||
remote_stats_.allocated_size + kEmbedderAllocatedThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
void StartIncrementalMarkingIfNeeded();
|
||||
|
||||
size_t allocated_size() const { return remote_stats_.allocated_size; }
|
||||
size_t accumulated_allocated_size() const {
|
||||
return remote_stats_.accumulated_allocated_size;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
|
||||
|
||||
Isolate* const isolate_;
|
||||
EmbedderHeapTracer* remote_tracer_ = nullptr;
|
||||
|
||||
@ -108,19 +88,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
|
||||
// segments of potential embedder fields to move to the main thread.
|
||||
bool embedder_worklist_empty_ = false;
|
||||
|
||||
struct RemoteStatistics {
|
||||
// Allocated size of objects in bytes reported by the embedder. Updated via
|
||||
// TraceSummary at the end of tracing and incrementally when the GC is not
|
||||
// in progress.
|
||||
size_t allocated_size = 0;
|
||||
// Limit for |allocated_size_| in bytes to avoid checking for starting a GC
|
||||
// on each increment.
|
||||
size_t allocated_size_limit_for_check = 0;
|
||||
// Totally accumulated bytes allocated by the embedder. Monotonically
|
||||
// increasing value. Used to approximate allocation rate.
|
||||
size_t accumulated_allocated_size = 0;
|
||||
} remote_stats_;
|
||||
|
||||
friend class EmbedderStackStateScope;
|
||||
};
|
||||
|
||||
|
@ -191,7 +191,6 @@ void GCTracer::ResetForTesting() {
|
||||
recorded_incremental_mark_compacts_.Reset();
|
||||
recorded_new_generation_allocations_.Reset();
|
||||
recorded_old_generation_allocations_.Reset();
|
||||
recorded_embedder_generation_allocations_.Reset();
|
||||
recorded_context_disposal_times_.Reset();
|
||||
recorded_survival_ratios_.Reset();
|
||||
start_counter_ = 0;
|
||||
@ -222,8 +221,7 @@ void GCTracer::Start(GarbageCollector collector,
|
||||
previous_ = current_;
|
||||
double start_time = heap_->MonotonicallyIncreasingTimeInMs();
|
||||
SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
|
||||
heap_->OldGenerationAllocationCounter(),
|
||||
heap_->EmbedderAllocationCounter());
|
||||
heap_->OldGenerationAllocationCounter());
|
||||
|
||||
switch (collector) {
|
||||
case SCAVENGER:
|
||||
@ -377,16 +375,15 @@ void GCTracer::Stop(GarbageCollector collector) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::SampleAllocation(double current_ms,
|
||||
size_t new_space_counter_bytes,
|
||||
size_t old_generation_counter_bytes,
|
||||
size_t embedder_allocation_bytes) {
|
||||
size_t old_generation_counter_bytes) {
|
||||
if (allocation_time_ms_ == 0) {
|
||||
// It is the first sample.
|
||||
allocation_time_ms_ = current_ms;
|
||||
new_space_allocation_counter_bytes_ = new_space_counter_bytes;
|
||||
old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
|
||||
embedder_allocation_counter_bytes_ = embedder_allocation_bytes;
|
||||
return;
|
||||
}
|
||||
// This assumes that counters are unsigned integers so that the subtraction
|
||||
@ -395,8 +392,6 @@ void GCTracer::SampleAllocation(double current_ms,
|
||||
new_space_counter_bytes - new_space_allocation_counter_bytes_;
|
||||
size_t old_generation_allocated_bytes =
|
||||
old_generation_counter_bytes - old_generation_allocation_counter_bytes_;
|
||||
size_t embedder_allocated_bytes =
|
||||
embedder_allocation_bytes - embedder_allocation_counter_bytes_;
|
||||
double duration = current_ms - allocation_time_ms_;
|
||||
allocation_time_ms_ = current_ms;
|
||||
new_space_allocation_counter_bytes_ = new_space_counter_bytes;
|
||||
@ -405,9 +400,9 @@ void GCTracer::SampleAllocation(double current_ms,
|
||||
new_space_allocation_in_bytes_since_gc_ += new_space_allocated_bytes;
|
||||
old_generation_allocation_in_bytes_since_gc_ +=
|
||||
old_generation_allocated_bytes;
|
||||
embedder_allocation_in_bytes_since_gc_ += embedder_allocated_bytes;
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::AddAllocation(double current_ms) {
|
||||
allocation_time_ms_ = current_ms;
|
||||
if (allocation_duration_since_gc_ > 0) {
|
||||
@ -417,13 +412,10 @@ void GCTracer::AddAllocation(double current_ms) {
|
||||
recorded_old_generation_allocations_.Push(
|
||||
MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
|
||||
allocation_duration_since_gc_));
|
||||
recorded_embedder_generation_allocations_.Push(MakeBytesAndDuration(
|
||||
embedder_allocation_in_bytes_since_gc_, allocation_duration_since_gc_));
|
||||
}
|
||||
allocation_duration_since_gc_ = 0;
|
||||
new_space_allocation_in_bytes_since_gc_ = 0;
|
||||
old_generation_allocation_in_bytes_since_gc_ = 0;
|
||||
embedder_allocation_in_bytes_since_gc_ = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -889,16 +881,6 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
|
||||
}
|
||||
}
|
||||
|
||||
void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
|
||||
if (duration == 0 || bytes == 0) return;
|
||||
double current_speed = bytes / duration;
|
||||
if (recorded_embedder_speed_ == 0.0) {
|
||||
recorded_embedder_speed_ = current_speed;
|
||||
} else {
|
||||
recorded_embedder_speed_ = (recorded_embedder_speed_ + current_speed) / 2;
|
||||
}
|
||||
}
|
||||
|
||||
void GCTracer::RecordMutatorUtilization(double mark_compact_end_time,
|
||||
double mark_compact_duration) {
|
||||
if (previous_mark_compact_end_time_ == 0) {
|
||||
@ -937,6 +919,7 @@ double GCTracer::CurrentMarkCompactMutatorUtilization() const {
|
||||
}
|
||||
|
||||
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
|
||||
const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
|
||||
if (recorded_incremental_marking_speed_ != 0) {
|
||||
return recorded_incremental_marking_speed_;
|
||||
}
|
||||
@ -946,13 +929,6 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
|
||||
return kConservativeSpeedInBytesPerMillisecond;
|
||||
}
|
||||
|
||||
double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
|
||||
if (recorded_embedder_speed_ != 0.0) {
|
||||
return recorded_embedder_speed_;
|
||||
}
|
||||
return kConservativeSpeedInBytesPerMillisecond;
|
||||
}
|
||||
|
||||
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
|
||||
ScavengeSpeedMode mode) const {
|
||||
if (mode == kForAllObjects) {
|
||||
@ -999,15 +975,6 @@ double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
|
||||
return combined_mark_compact_speed_cache_;
|
||||
}
|
||||
|
||||
double GCTracer::CombineSpeedsInBytesPerMillisecond(double default_speed,
|
||||
double optional_speed) {
|
||||
constexpr double kMinimumSpeed = 0.5;
|
||||
if (optional_speed < kMinimumSpeed) {
|
||||
return default_speed;
|
||||
}
|
||||
return default_speed * optional_speed / (default_speed + optional_speed);
|
||||
}
|
||||
|
||||
double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms) const {
|
||||
size_t bytes = new_space_allocation_in_bytes_since_gc_;
|
||||
@ -1024,14 +991,6 @@ double GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
|
||||
MakeBytesAndDuration(bytes, durations), time_ms);
|
||||
}
|
||||
|
||||
double GCTracer::EmbedderAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms) const {
|
||||
size_t bytes = embedder_allocation_in_bytes_since_gc_;
|
||||
double durations = allocation_duration_since_gc_;
|
||||
return AverageSpeed(recorded_embedder_generation_allocations_,
|
||||
MakeBytesAndDuration(bytes, durations), time_ms);
|
||||
}
|
||||
|
||||
double GCTracer::AllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms) const {
|
||||
return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
|
||||
@ -1048,12 +1007,6 @@ double GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
|
||||
kThroughputTimeFrameMs);
|
||||
}
|
||||
|
||||
double GCTracer::CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
|
||||
const {
|
||||
return EmbedderAllocationThroughputInBytesPerMillisecond(
|
||||
kThroughputTimeFrameMs);
|
||||
}
|
||||
|
||||
double GCTracer::ContextDisposalRateInMilliseconds() const {
|
||||
if (recorded_context_disposal_times_.Count() <
|
||||
recorded_context_disposal_times_.kSize)
|
||||
|
@ -200,10 +200,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
};
|
||||
|
||||
static const int kThroughputTimeFrameMs = 5000;
|
||||
static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
|
||||
|
||||
static double CombineSpeedsInBytesPerMillisecond(double default_speed,
|
||||
double optional_speed);
|
||||
|
||||
static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
|
||||
|
||||
@ -221,8 +217,7 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
|
||||
// Sample and accumulate bytes allocated since the last GC.
|
||||
void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
|
||||
size_t old_generation_counter_bytes,
|
||||
size_t embedder_allocation_bytes);
|
||||
size_t old_generation_counter_bytes);
|
||||
|
||||
// Log the accumulated new space allocation bytes.
|
||||
void AddAllocation(double current_ms);
|
||||
@ -237,13 +232,9 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
void AddIncrementalMarkingStep(double duration, size_t bytes);
|
||||
|
||||
// Compute the average incremental marking speed in bytes/millisecond.
|
||||
// Returns a conservative value if no events have been recorded.
|
||||
// Returns 0 if no events have been recorded.
|
||||
double IncrementalMarkingSpeedInBytesPerMillisecond() const;
|
||||
|
||||
// Compute the average embedder speed in bytes/millisecond.
|
||||
// Returns a conservative value if no events have been recorded.
|
||||
double EmbedderSpeedInBytesPerMillisecond() const;
|
||||
|
||||
// Compute the average scavenge speed in bytes/millisecond.
|
||||
// Returns 0 if no events have been recorded.
|
||||
double ScavengeSpeedInBytesPerMillisecond(
|
||||
@ -277,12 +268,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
double OldGenerationAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms = 0) const;
|
||||
|
||||
// Allocation throughput in the embedder in bytes/millisecond in the
|
||||
// last time_ms milliseconds. Reported through v8::EmbedderHeapTracer.
|
||||
// Returns 0 if no allocation events have been recorded.
|
||||
double EmbedderAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms = 0) const;
|
||||
|
||||
// Allocation throughput in heap in bytes/millisecond in the last time_ms
|
||||
// milliseconds.
|
||||
// Returns 0 if no allocation events have been recorded.
|
||||
@ -298,11 +283,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
// Returns 0 if no allocation events have been recorded.
|
||||
double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
|
||||
|
||||
// Allocation throughput in the embedder in bytes/milliseconds in the last
|
||||
// kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer.
|
||||
// Returns 0 if no allocation events have been recorded.
|
||||
double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
|
||||
|
||||
// Computes the context disposal rate in milliseconds. It takes the time
|
||||
// frame of the first recorded context disposal to the current time and
|
||||
// divides it by the number of recorded events.
|
||||
@ -343,8 +323,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
|
||||
void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
|
||||
|
||||
void RecordEmbedderSpeed(size_t bytes, double duration);
|
||||
|
||||
private:
|
||||
FRIEND_TEST(GCTracer, AverageSpeed);
|
||||
FRIEND_TEST(GCTracerTest, AllocationThroughput);
|
||||
@ -436,8 +414,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
|
||||
double recorded_incremental_marking_speed_;
|
||||
|
||||
double recorded_embedder_speed_ = 0.0;
|
||||
|
||||
// Incremental scopes carry more information than just the duration. The infos
|
||||
// here are merged back upon starting/stopping the GC tracer.
|
||||
IncrementalMarkingInfos
|
||||
@ -448,13 +424,11 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
double allocation_time_ms_;
|
||||
size_t new_space_allocation_counter_bytes_;
|
||||
size_t old_generation_allocation_counter_bytes_;
|
||||
size_t embedder_allocation_counter_bytes_;
|
||||
|
||||
// Accumulated duration and allocated bytes since the last GC.
|
||||
double allocation_duration_since_gc_;
|
||||
size_t new_space_allocation_in_bytes_since_gc_;
|
||||
size_t old_generation_allocation_in_bytes_since_gc_;
|
||||
size_t embedder_allocation_in_bytes_since_gc_;
|
||||
|
||||
double combined_mark_compact_speed_cache_;
|
||||
|
||||
@ -474,7 +448,6 @@ class V8_EXPORT_PRIVATE GCTracer {
|
||||
base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
|
||||
base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
|
||||
base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
|
||||
base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
|
||||
base::RingBuffer<double> recorded_context_disposal_times_;
|
||||
base::RingBuffer<double> recorded_survival_ratios_;
|
||||
|
||||
|
@ -49,17 +49,17 @@ namespace internal {
|
||||
// F * (1 - MU / (R * (1 - MU))) = 1
|
||||
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
|
||||
// F = R * (1 - MU) / (R * (1 - MU) - MU)
|
||||
double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
|
||||
double max_factor) {
|
||||
double HeapController::GrowingFactor(double gc_speed, double mutator_speed,
|
||||
double max_factor) {
|
||||
DCHECK_LE(min_growing_factor_, max_factor);
|
||||
DCHECK_GE(max_growing_factor_, max_factor);
|
||||
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
|
||||
|
||||
const double speed_ratio = gc_speed / mutator_speed;
|
||||
|
||||
const double a = speed_ratio * (1 - target_mutator_utlization_);
|
||||
const double b = speed_ratio * (1 - target_mutator_utlization_) -
|
||||
target_mutator_utlization_;
|
||||
const double a = speed_ratio * (1 - kTargetMutatorUtilization);
|
||||
const double b =
|
||||
speed_ratio * (1 - kTargetMutatorUtilization) - kTargetMutatorUtilization;
|
||||
|
||||
// The factor is a / b, but we need to check for small b first.
|
||||
double factor = (a < b * max_factor) ? a / b : max_factor;
|
||||
@ -140,31 +140,6 @@ double HeapController::MaxGrowingFactor(size_t curr_max_size) {
|
||||
return factor;
|
||||
}
|
||||
|
||||
double GlobalMemoryController::MaxGrowingFactor(size_t curr_max_size) {
|
||||
constexpr double kMinSmallFactor = 1.3;
|
||||
constexpr double kMaxSmallFactor = 2.0;
|
||||
constexpr double kHighFactor = 4.0;
|
||||
|
||||
size_t max_size_in_mb = curr_max_size / MB;
|
||||
max_size_in_mb = Max(max_size_in_mb, kMinSize);
|
||||
|
||||
// If we are on a device with lots of memory, we allow a high heap
|
||||
// growing factor.
|
||||
if (max_size_in_mb >= kMaxSize) {
|
||||
return kHighFactor;
|
||||
}
|
||||
|
||||
DCHECK_GE(max_size_in_mb, kMinSize);
|
||||
DCHECK_LT(max_size_in_mb, kMaxSize);
|
||||
|
||||
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
|
||||
double factor = (max_size_in_mb - kMinSize) *
|
||||
(kMaxSmallFactor - kMinSmallFactor) /
|
||||
(kMaxSize - kMinSize) +
|
||||
kMinSmallFactor;
|
||||
return factor;
|
||||
}
|
||||
|
||||
size_t HeapController::CalculateAllocationLimit(
|
||||
size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
|
||||
size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
|
||||
@ -175,25 +150,7 @@ size_t HeapController::CalculateAllocationLimit(
|
||||
Isolate::FromHeap(heap_)->PrintWithTimestamp(
|
||||
"[%s] factor %.1f based on mu=%.3f, speed_ratio=%.f "
|
||||
"(gc=%.f, mutator=%.f)\n",
|
||||
ControllerName(), factor, target_mutator_utlization_,
|
||||
gc_speed / mutator_speed, gc_speed, mutator_speed);
|
||||
}
|
||||
|
||||
return CalculateAllocationLimitBase(curr_size, max_size, factor,
|
||||
new_space_capacity, growing_mode);
|
||||
}
|
||||
|
||||
size_t GlobalMemoryController::CalculateAllocationLimit(
|
||||
size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
|
||||
size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
|
||||
const double max_factor = MaxGrowingFactor(max_size);
|
||||
const double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
|
||||
|
||||
if (FLAG_trace_gc_verbose) {
|
||||
Isolate::FromHeap(heap_)->PrintWithTimestamp(
|
||||
"[%s] factor %.1f based on mu=%.3f, speed_ratio=%.f "
|
||||
"(gc=%.f, mutator=%.f)\n",
|
||||
ControllerName(), factor, target_mutator_utlization_,
|
||||
ControllerName(), factor, kTargetMutatorUtilization,
|
||||
gc_speed / mutator_speed, gc_speed, mutator_speed);
|
||||
}
|
||||
|
||||
|
@ -15,38 +15,31 @@ namespace internal {
|
||||
|
||||
class V8_EXPORT_PRIVATE MemoryController {
|
||||
public:
|
||||
// Computes the growing step when the limit increases.
|
||||
static size_t MinimumAllocationLimitGrowingStep(
|
||||
Heap::HeapGrowingMode growing_mode);
|
||||
|
||||
virtual ~MemoryController() = default;
|
||||
|
||||
// Computes the growing step when the limit increases.
|
||||
size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
|
||||
|
||||
protected:
|
||||
MemoryController(Heap* heap, double min_growing_factor,
|
||||
double max_growing_factor,
|
||||
double conservative_growing_factor,
|
||||
double target_mutator_utlization)
|
||||
double conservative_growing_factor)
|
||||
: heap_(heap),
|
||||
min_growing_factor_(min_growing_factor),
|
||||
max_growing_factor_(max_growing_factor),
|
||||
conservative_growing_factor_(conservative_growing_factor),
|
||||
target_mutator_utlization_(target_mutator_utlization) {}
|
||||
conservative_growing_factor_(conservative_growing_factor) {}
|
||||
|
||||
// Computes the allocation limit to trigger the next garbage collection.
|
||||
size_t CalculateAllocationLimitBase(size_t curr_size, size_t max_size,
|
||||
double factor, size_t additional_bytes,
|
||||
Heap::HeapGrowingMode growing_mode);
|
||||
|
||||
double GrowingFactor(double gc_speed, double mutator_speed,
|
||||
double max_factor);
|
||||
|
||||
virtual const char* ControllerName() = 0;
|
||||
|
||||
Heap* const heap_;
|
||||
const double min_growing_factor_;
|
||||
const double max_growing_factor_;
|
||||
const double conservative_growing_factor_;
|
||||
const double target_mutator_utlization_;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE HeapController : public MemoryController {
|
||||
@ -54,9 +47,9 @@ class V8_EXPORT_PRIVATE HeapController : public MemoryController {
|
||||
// Sizes are in MB.
|
||||
static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
|
||||
static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
|
||||
static constexpr double kTargetMutatorUtilization = 0.97;
|
||||
|
||||
explicit HeapController(Heap* heap)
|
||||
: MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
|
||||
explicit HeapController(Heap* heap) : MemoryController(heap, 1.1, 4.0, 1.3) {}
|
||||
|
||||
size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
|
||||
double gc_speed, double mutator_speed,
|
||||
@ -64,6 +57,9 @@ class V8_EXPORT_PRIVATE HeapController : public MemoryController {
|
||||
Heap::HeapGrowingMode growing_mode);
|
||||
|
||||
protected:
|
||||
double GrowingFactor(double gc_speed, double mutator_speed,
|
||||
double max_factor);
|
||||
|
||||
double MaxGrowingFactor(size_t curr_max_size);
|
||||
|
||||
const char* ControllerName() override { return "HeapController"; }
|
||||
@ -74,26 +70,6 @@ class V8_EXPORT_PRIVATE HeapController : public MemoryController {
|
||||
FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE GlobalMemoryController : public MemoryController {
|
||||
public:
|
||||
// Sizes are in MB.
|
||||
static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
|
||||
static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
|
||||
|
||||
explicit GlobalMemoryController(Heap* heap)
|
||||
: MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
|
||||
|
||||
size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
|
||||
double gc_speed, double mutator_speed,
|
||||
size_t new_space_capacity,
|
||||
Heap::HeapGrowingMode growing_mode);
|
||||
|
||||
protected:
|
||||
double MaxGrowingFactor(size_t curr_max_size);
|
||||
|
||||
const char* ControllerName() override { return "GlobalMemoryController"; }
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
106
src/heap/heap.cc
106
src/heap/heap.cc
@ -183,7 +183,6 @@ Heap::Heap()
|
||||
Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
|
||||
memory_pressure_level_(MemoryPressureLevel::kNone),
|
||||
old_generation_allocation_limit_(initial_old_generation_size_),
|
||||
global_allocation_limit_(initial_old_generation_size_),
|
||||
global_pretenuring_feedback_(kInitialFeedbackCapacity),
|
||||
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
|
||||
is_current_gc_forced_(false),
|
||||
@ -1527,12 +1526,9 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
|
||||
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
|
||||
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
|
||||
StartIncrementalMarking(
|
||||
gc_flags,
|
||||
OldGenerationSpaceAvailable() <= new_space_->Capacity()
|
||||
? GarbageCollectionReason::kAllocationLimit
|
||||
: GarbageCollectionReason::kGlobalAllocationLimit,
|
||||
gc_callback_flags);
|
||||
StartIncrementalMarking(gc_flags,
|
||||
GarbageCollectionReason::kAllocationLimit,
|
||||
gc_callback_flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1933,24 +1929,6 @@ bool Heap::PerformGarbageCollection(
|
||||
double mutator_speed =
|
||||
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
|
||||
size_t old_gen_size = OldGenerationSizeOfObjects();
|
||||
|
||||
double global_mutator_speed;
|
||||
double global_gc_speed;
|
||||
size_t global_memory_size;
|
||||
if (UseGlobalMemoryScheduling()) {
|
||||
global_mutator_speed = GCTracer::CombineSpeedsInBytesPerMillisecond(
|
||||
mutator_speed,
|
||||
local_embedder_heap_tracer()
|
||||
? tracer()
|
||||
->CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
|
||||
: 0.0);
|
||||
global_gc_speed = GCTracer::CombineSpeedsInBytesPerMillisecond(
|
||||
gc_speed, local_embedder_heap_tracer()
|
||||
? tracer()->EmbedderSpeedInBytesPerMillisecond()
|
||||
: 0.0);
|
||||
global_memory_size = GlobalSizeOfObjects();
|
||||
}
|
||||
|
||||
if (collector == MARK_COMPACTOR) {
|
||||
// Register the amount of external allocated memory.
|
||||
isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
|
||||
@ -1963,13 +1941,7 @@ bool Heap::PerformGarbageCollection(
|
||||
heap_controller()->CalculateAllocationLimit(
|
||||
old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
|
||||
new_space()->Capacity(), CurrentHeapGrowingMode());
|
||||
if (UseGlobalMemoryScheduling()) {
|
||||
global_allocation_limit_ =
|
||||
global_memory_controller()->CalculateAllocationLimit(
|
||||
global_memory_size, max_global_memory_size_, global_gc_speed,
|
||||
global_mutator_speed, new_space()->Capacity(),
|
||||
CurrentHeapGrowingMode());
|
||||
}
|
||||
|
||||
CheckIneffectiveMarkCompact(
|
||||
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
|
||||
} else if (HasLowYoungGenerationAllocationRate() &&
|
||||
@ -1980,16 +1952,6 @@ bool Heap::PerformGarbageCollection(
|
||||
if (new_limit < old_generation_allocation_limit_) {
|
||||
old_generation_allocation_limit_ = new_limit;
|
||||
}
|
||||
if (UseGlobalMemoryScheduling()) {
|
||||
const size_t new_global_limit =
|
||||
global_memory_controller()->CalculateAllocationLimit(
|
||||
global_memory_size, max_global_memory_size_, global_gc_speed,
|
||||
global_mutator_speed, new_space()->Capacity(),
|
||||
CurrentHeapGrowingMode());
|
||||
if (new_global_limit < global_allocation_limit_) {
|
||||
global_allocation_limit_ = new_global_limit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
@ -2646,29 +2608,18 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
|
||||
|
||||
void Heap::ConfigureInitialOldGenerationSize() {
|
||||
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
|
||||
const size_t minimum_growing_step =
|
||||
MemoryController::MinimumAllocationLimitGrowingStep(
|
||||
CurrentHeapGrowingMode());
|
||||
const size_t new_old_generation_allocation_limit =
|
||||
Max(OldGenerationSizeOfObjects() + minimum_growing_step,
|
||||
const size_t new_limit =
|
||||
Max(OldGenerationSizeOfObjects() +
|
||||
heap_controller()->MinimumAllocationLimitGrowingStep(
|
||||
CurrentHeapGrowingMode()),
|
||||
static_cast<size_t>(
|
||||
static_cast<double>(old_generation_allocation_limit_) *
|
||||
(tracer()->AverageSurvivalRatio() / 100)));
|
||||
if (new_old_generation_allocation_limit <
|
||||
old_generation_allocation_limit_) {
|
||||
old_generation_allocation_limit_ = new_old_generation_allocation_limit;
|
||||
if (new_limit < old_generation_allocation_limit_) {
|
||||
old_generation_allocation_limit_ = new_limit;
|
||||
} else {
|
||||
old_generation_size_configured_ = true;
|
||||
}
|
||||
if (UseGlobalMemoryScheduling()) {
|
||||
const size_t new_global_memory_limit = Max(
|
||||
GlobalSizeOfObjects() + minimum_growing_step,
|
||||
static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
|
||||
(tracer()->AverageSurvivalRatio() / 100)));
|
||||
if (new_global_memory_limit < global_allocation_limit_) {
|
||||
global_allocation_limit_ = new_global_memory_limit;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3430,8 +3381,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
|
||||
double idle_time_in_ms = deadline_in_ms - start_ms;
|
||||
|
||||
tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
|
||||
OldGenerationAllocationCounter(),
|
||||
EmbedderAllocationCounter());
|
||||
OldGenerationAllocationCounter());
|
||||
|
||||
GCIdleTimeHeapState heap_state = ComputeHeapState();
|
||||
|
||||
@ -3684,8 +3634,6 @@ const char* Heap::GarbageCollectionReasonToString(
|
||||
return "testing";
|
||||
case GarbageCollectionReason::kExternalFinalize:
|
||||
return "external finalize";
|
||||
case GarbageCollectionReason::kGlobalAllocationLimit:
|
||||
return "global allocation limit";
|
||||
case GarbageCollectionReason::kUnknown:
|
||||
return "unknown";
|
||||
}
|
||||
@ -4424,15 +4372,6 @@ size_t Heap::OldGenerationSizeOfObjects() {
|
||||
return total + lo_space_->SizeOfObjects();
|
||||
}
|
||||
|
||||
size_t Heap::GlobalSizeOfObjects() {
|
||||
const size_t on_heap_size = OldGenerationSizeOfObjects();
|
||||
const size_t embedder_size =
|
||||
local_embedder_heap_tracer()
|
||||
? local_embedder_heap_tracer()->allocated_size()
|
||||
: 0;
|
||||
return on_heap_size + embedder_size;
|
||||
}
|
||||
|
||||
uint64_t Heap::PromotedExternalMemorySize() {
|
||||
IsolateData* isolate_data = isolate()->isolate_data();
|
||||
if (isolate_data->external_memory_ <=
|
||||
@ -4492,14 +4431,6 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
|
||||
return Heap::HeapGrowingMode::kDefault;
|
||||
}
|
||||
|
||||
size_t Heap::GlobalMemoryAvailable() {
|
||||
return UseGlobalMemoryScheduling()
|
||||
? GlobalSizeOfObjects() < global_allocation_limit_
|
||||
? global_allocation_limit_ - GlobalSizeOfObjects()
|
||||
: 0
|
||||
: 1;
|
||||
}
|
||||
|
||||
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
|
||||
// The kNoLimit means that either incremental marking is disabled or it is too
|
||||
// early to start incremental marking.
|
||||
@ -4560,10 +4491,8 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
||||
}
|
||||
|
||||
size_t old_generation_space_available = OldGenerationSpaceAvailable();
|
||||
const size_t global_memory_available = GlobalMemoryAvailable();
|
||||
|
||||
if (old_generation_space_available > new_space_->Capacity() &&
|
||||
(global_memory_available > 0)) {
|
||||
if (old_generation_space_available > new_space_->Capacity()) {
|
||||
return IncrementalMarkingLimit::kNoLimit;
|
||||
}
|
||||
if (ShouldOptimizeForMemoryUsage()) {
|
||||
@ -4575,9 +4504,6 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
||||
if (old_generation_space_available == 0) {
|
||||
return IncrementalMarkingLimit::kHardLimit;
|
||||
}
|
||||
if (global_memory_available == 0) {
|
||||
return IncrementalMarkingLimit::kHardLimit;
|
||||
}
|
||||
return IncrementalMarkingLimit::kSoftLimit;
|
||||
}
|
||||
|
||||
@ -4731,7 +4657,6 @@ void Heap::SetUp() {
|
||||
store_buffer_.reset(new StoreBuffer(this));
|
||||
|
||||
heap_controller_.reset(new HeapController(this));
|
||||
global_memory_controller_.reset(new GlobalMemoryController(this));
|
||||
|
||||
mark_compact_collector_.reset(new MarkCompactCollector(this));
|
||||
|
||||
@ -5009,7 +4934,6 @@ void Heap::TearDown() {
|
||||
}
|
||||
|
||||
heap_controller_.reset();
|
||||
global_memory_controller_.reset();
|
||||
|
||||
if (mark_compact_collector_) {
|
||||
mark_compact_collector_->TearDown();
|
||||
@ -5857,12 +5781,6 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t Heap::EmbedderAllocationCounter() const {
|
||||
return local_embedder_heap_tracer()
|
||||
? local_embedder_heap_tracer()->accumulated_allocated_size()
|
||||
: 0;
|
||||
}
|
||||
|
||||
void Heap::CreateObjectStats() {
|
||||
if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
|
||||
if (!live_object_stats_) {
|
||||
|
@ -62,7 +62,6 @@ class ConcurrentMarking;
|
||||
class GCIdleTimeHandler;
|
||||
class GCIdleTimeHeapState;
|
||||
class GCTracer;
|
||||
class GlobalMemoryController;
|
||||
class HeapController;
|
||||
class HeapObjectAllocationTracker;
|
||||
class HeapObjectsFilter;
|
||||
@ -130,8 +129,7 @@ enum class GarbageCollectionReason {
|
||||
kSamplingProfiler = 19,
|
||||
kSnapshotCreator = 20,
|
||||
kTesting = 21,
|
||||
kExternalFinalize = 22,
|
||||
kGlobalAllocationLimit = 23,
|
||||
kExternalFinalize = 22
|
||||
// If you add new items here, then update the incremental_marking_reason,
|
||||
// mark_compact_reason, and scavenge_reason counters in counters.h.
|
||||
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
|
||||
@ -1149,8 +1147,6 @@ class Heap {
|
||||
PromotedSinceLastGC();
|
||||
}
|
||||
|
||||
size_t EmbedderAllocationCounter() const;
|
||||
|
||||
// This should be used only for testing.
|
||||
void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
|
||||
old_generation_allocation_counter_at_last_gc_ = new_value;
|
||||
@ -1182,8 +1178,6 @@ class Heap {
|
||||
// Excludes external memory held by those objects.
|
||||
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
|
||||
|
||||
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
|
||||
|
||||
// ===========================================================================
|
||||
// Prologue/epilogue callback methods.========================================
|
||||
// ===========================================================================
|
||||
@ -1693,9 +1687,6 @@ class Heap {
|
||||
// ===========================================================================
|
||||
|
||||
HeapController* heap_controller() { return heap_controller_.get(); }
|
||||
GlobalMemoryController* global_memory_controller() const {
|
||||
return global_memory_controller_.get();
|
||||
}
|
||||
MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
|
||||
|
||||
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
|
||||
@ -1723,12 +1714,6 @@ class Heap {
|
||||
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
|
||||
IncrementalMarkingLimit IncrementalMarkingLimitReached();
|
||||
|
||||
bool UseGlobalMemoryScheduling() const {
|
||||
return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
|
||||
}
|
||||
|
||||
size_t GlobalMemoryAvailable();
|
||||
|
||||
// ===========================================================================
|
||||
// Idle notification. ========================================================
|
||||
// ===========================================================================
|
||||
@ -1822,11 +1807,6 @@ class Heap {
|
||||
size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
|
||||
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
|
||||
size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
|
||||
// TODO(mlippautz): Clarify whether this should be take some embedder
|
||||
// configurable limit into account.
|
||||
size_t max_global_memory_size_ =
|
||||
Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
|
||||
static_cast<uint64_t>(max_old_generation_size_) * 2);
|
||||
size_t initial_max_old_generation_size_;
|
||||
size_t initial_max_old_generation_size_threshold_;
|
||||
size_t initial_old_generation_size_;
|
||||
@ -1935,7 +1915,6 @@ class Heap {
|
||||
// which collector to invoke, before expanding a paged space in the old
|
||||
// generation and on every allocation in large object space.
|
||||
size_t old_generation_allocation_limit_;
|
||||
size_t global_allocation_limit_;
|
||||
|
||||
// Indicates that inline bump-pointer allocation has been globally disabled
|
||||
// for all spaces. This is used to disable allocations in generated code.
|
||||
@ -1986,7 +1965,6 @@ class Heap {
|
||||
std::unique_ptr<MemoryAllocator> memory_allocator_;
|
||||
std::unique_ptr<StoreBuffer> store_buffer_;
|
||||
std::unique_ptr<HeapController> heap_controller_;
|
||||
std::unique_ptr<GlobalMemoryController> global_memory_controller_;
|
||||
std::unique_ptr<IncrementalMarking> incremental_marking_;
|
||||
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
|
||||
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
|
||||
@ -2086,7 +2064,6 @@ class Heap {
|
||||
friend class ConcurrentMarking;
|
||||
friend class GCCallbacksScope;
|
||||
friend class GCTracer;
|
||||
friend class GlobalMemoryController;
|
||||
friend class HeapController;
|
||||
friend class MemoryController;
|
||||
friend class HeapIterator;
|
||||
|
@ -39,8 +39,7 @@ void MemoryReducer::TimerTask::RunInternal() {
|
||||
Event event;
|
||||
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
|
||||
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
|
||||
heap->OldGenerationAllocationCounter(),
|
||||
heap->EmbedderAllocationCounter());
|
||||
heap->OldGenerationAllocationCounter());
|
||||
bool low_allocation_rate = heap->HasLowAllocationRate();
|
||||
bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
|
||||
if (FLAG_trace_gc_verbose) {
|
||||
|
@ -17,9 +17,9 @@ namespace internal {
|
||||
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
|
||||
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
|
||||
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
|
||||
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23) \
|
||||
HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
|
||||
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
|
||||
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23) \
|
||||
HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
|
||||
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
|
||||
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
|
||||
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
|
||||
@ -34,7 +34,7 @@ namespace internal {
|
||||
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
|
||||
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
|
||||
100000, 50) \
|
||||
HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23) \
|
||||
HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
|
||||
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
|
||||
/* Asm/Wasm. */ \
|
||||
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
|
||||
|
@ -32,7 +32,7 @@ LocalEmbedderHeapTracer::WrapperInfo CreateWrapperInfo() {
|
||||
class MockEmbedderHeapTracer : public EmbedderHeapTracer {
|
||||
public:
|
||||
MOCK_METHOD1(TracePrologue, void(EmbedderHeapTracer::TraceFlags));
|
||||
MOCK_METHOD1(TraceEpilogue, void(EmbedderHeapTracer::TraceSummary*));
|
||||
MOCK_METHOD0(TraceEpilogue, void());
|
||||
MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
|
||||
MOCK_METHOD0(IsTracingDone, bool());
|
||||
MOCK_METHOD1(RegisterV8References,
|
||||
@ -80,7 +80,7 @@ TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
|
||||
StrictMock<MockEmbedderHeapTracer> remote_tracer;
|
||||
LocalEmbedderHeapTracer local_tracer(nullptr);
|
||||
local_tracer.SetRemoteTracer(&remote_tracer);
|
||||
EXPECT_CALL(remote_tracer, TraceEpilogue(_));
|
||||
EXPECT_CALL(remote_tracer, TraceEpilogue());
|
||||
local_tracer.TraceEpilogue();
|
||||
}
|
||||
|
||||
|
@ -53,13 +53,11 @@ TEST(GCTracer, AverageSpeed) {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr size_t kNoGlobalMemory = 0;
|
||||
|
||||
void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
|
||||
size_t new_space_counter_bytes,
|
||||
size_t old_generation_counter_bytes) {
|
||||
tracer->SampleAllocation(time_ms, new_space_counter_bytes,
|
||||
old_generation_counter_bytes, kNoGlobalMemory);
|
||||
old_generation_counter_bytes);
|
||||
tracer->AddAllocation(time_ms);
|
||||
}
|
||||
|
||||
@ -72,7 +70,7 @@ TEST_F(GCTracerTest, AllocationThroughput) {
|
||||
int time1 = 100;
|
||||
size_t counter1 = 1000;
|
||||
// First sample creates baseline but is not part of the recorded samples.
|
||||
tracer->SampleAllocation(time1, counter1, counter1, kNoGlobalMemory);
|
||||
tracer->SampleAllocation(time1, counter1, counter1);
|
||||
SampleAndAddAllocaton(tracer, time1, counter1, counter1);
|
||||
int time2 = 200;
|
||||
size_t counter2 = 2000;
|
||||
|
Loading…
Reference in New Issue
Block a user