[cleanup] Heap: Initialize trivial members at declaraton site
Bug: v8:8015 Change-Id: Ic247b288294259dea1795c909874920e9f37a871 Reviewed-on: https://chromium-review.googlesource.com/1238575 Reviewed-by: Hannes Payer <hpayer@chromium.org> Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/master@{#56133}
This commit is contained in:
parent
8a7464fe16
commit
32c0619740
@ -129,108 +129,14 @@ class IdleScavengeObserver : public AllocationObserver {
|
||||
};
|
||||
|
||||
Heap::Heap()
|
||||
: external_memory_(0),
|
||||
external_memory_limit_(kExternalAllocationSoftLimit),
|
||||
external_memory_at_last_mark_compact_(0),
|
||||
external_memory_concurrently_freed_(0),
|
||||
isolate_(nullptr),
|
||||
code_range_size_(0),
|
||||
// semispace_size_ should be a power of 2 and old_generation_size_ should
|
||||
// be a multiple of Page::kPageSize.
|
||||
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
|
||||
initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
|
||||
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
|
||||
initial_max_old_generation_size_(max_old_generation_size_),
|
||||
: initial_max_old_generation_size_(max_old_generation_size_),
|
||||
initial_old_generation_size_(max_old_generation_size_ /
|
||||
kInitalOldGenerationLimitFactor),
|
||||
old_generation_size_configured_(false),
|
||||
// Variables set based on semispace_size_ and old_generation_size_ in
|
||||
// ConfigureHeap.
|
||||
// Will be 4 * reserved_semispace_size_ to ensure that young
|
||||
// generation can be aligned to its size.
|
||||
maximum_committed_(0),
|
||||
backing_store_bytes_(0),
|
||||
survived_since_last_expansion_(0),
|
||||
survived_last_scavenge_(0),
|
||||
always_allocate_scope_count_(0),
|
||||
memory_pressure_level_(MemoryPressureLevel::kNone),
|
||||
contexts_disposed_(0),
|
||||
number_of_disposed_maps_(0),
|
||||
new_space_(nullptr),
|
||||
old_space_(nullptr),
|
||||
code_space_(nullptr),
|
||||
map_space_(nullptr),
|
||||
lo_space_(nullptr),
|
||||
new_lo_space_(nullptr),
|
||||
read_only_space_(nullptr),
|
||||
write_protect_code_memory_(false),
|
||||
code_space_memory_modification_scope_depth_(0),
|
||||
gc_state_(NOT_IN_GC),
|
||||
gc_post_processing_depth_(0),
|
||||
allocations_count_(0),
|
||||
raw_allocations_hash_(0),
|
||||
stress_marking_observer_(nullptr),
|
||||
stress_scavenge_observer_(nullptr),
|
||||
allocation_step_in_progress_(false),
|
||||
max_marking_limit_reached_(0.0),
|
||||
ms_count_(0),
|
||||
gc_count_(0),
|
||||
consecutive_ineffective_mark_compacts_(0),
|
||||
mmap_region_base_(0),
|
||||
remembered_unmapped_pages_index_(0),
|
||||
old_generation_allocation_limit_(initial_old_generation_size_),
|
||||
inline_allocation_disabled_(false),
|
||||
tracer_(nullptr),
|
||||
promoted_objects_size_(0),
|
||||
promotion_ratio_(0),
|
||||
semi_space_copied_object_size_(0),
|
||||
previous_semi_space_copied_object_size_(0),
|
||||
semi_space_copied_rate_(0),
|
||||
nodes_died_in_new_space_(0),
|
||||
nodes_copied_in_new_space_(0),
|
||||
nodes_promoted_(0),
|
||||
maximum_size_scavenges_(0),
|
||||
last_idle_notification_time_(0.0),
|
||||
last_gc_time_(0.0),
|
||||
mark_compact_collector_(nullptr),
|
||||
minor_mark_compact_collector_(nullptr),
|
||||
array_buffer_collector_(nullptr),
|
||||
memory_allocator_(nullptr),
|
||||
store_buffer_(nullptr),
|
||||
incremental_marking_(nullptr),
|
||||
concurrent_marking_(nullptr),
|
||||
gc_idle_time_handler_(nullptr),
|
||||
memory_reducer_(nullptr),
|
||||
live_object_stats_(nullptr),
|
||||
dead_object_stats_(nullptr),
|
||||
scavenge_job_(nullptr),
|
||||
idle_scavenge_observer_(nullptr),
|
||||
new_space_allocation_counter_(0),
|
||||
old_generation_allocation_counter_at_last_gc_(0),
|
||||
old_generation_size_at_last_gc_(0),
|
||||
global_pretenuring_feedback_(kInitialFeedbackCapacity),
|
||||
is_marking_flag_(false),
|
||||
ring_buffer_full_(false),
|
||||
ring_buffer_end_(0),
|
||||
configured_(false),
|
||||
current_gc_flags_(Heap::kNoGCFlags),
|
||||
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
|
||||
external_string_table_(this),
|
||||
gc_callbacks_depth_(0),
|
||||
deserialization_complete_(false),
|
||||
strong_roots_list_(nullptr),
|
||||
heap_iterator_depth_(0),
|
||||
local_embedder_heap_tracer_(nullptr),
|
||||
fast_promotion_mode_(false),
|
||||
force_oom_(false),
|
||||
delay_sweeper_tasks_for_testing_(false),
|
||||
pending_layout_change_object_(nullptr),
|
||||
unprotected_memory_chunks_registry_enabled_(false)
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
,
|
||||
allocation_timeout_(0)
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
{
|
||||
external_string_table_(this) {
|
||||
// Ensure old_generation_size_ is a multiple of kPageSize.
|
||||
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
|
||||
|
||||
|
188
src/heap/heap.h
188
src/heap/heap.h
@ -1915,20 +1915,20 @@ class Heap {
|
||||
void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
|
||||
|
||||
// The amount of external memory registered through the API.
|
||||
int64_t external_memory_;
|
||||
int64_t external_memory_ = 0;
|
||||
|
||||
// The limit when to trigger memory pressure from the API.
|
||||
int64_t external_memory_limit_;
|
||||
int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
|
||||
|
||||
// Caches the amount of external memory registered at the last MC.
|
||||
int64_t external_memory_at_last_mark_compact_;
|
||||
int64_t external_memory_at_last_mark_compact_ = 0;
|
||||
|
||||
// The amount of memory that has been freed concurrently.
|
||||
std::atomic<intptr_t> external_memory_concurrently_freed_;
|
||||
std::atomic<intptr_t> external_memory_concurrently_freed_{0};
|
||||
|
||||
// This can be calculated directly from a pointer to the heap; however, it is
|
||||
// more expedient to get at the isolate directly from within Heap methods.
|
||||
Isolate* isolate_;
|
||||
Isolate* isolate_ = nullptr;
|
||||
|
||||
RootsTable roots_;
|
||||
|
||||
@ -1953,28 +1953,28 @@ class Heap {
|
||||
static constexpr int kRootRegisterAddressableEndOffset =
|
||||
kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
|
||||
|
||||
size_t code_range_size_;
|
||||
size_t max_semi_space_size_;
|
||||
size_t initial_semispace_size_;
|
||||
size_t max_old_generation_size_;
|
||||
size_t code_range_size_ = 0;
|
||||
size_t max_semi_space_size_ = 8 * (kPointerSize / 4) * MB;
|
||||
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
|
||||
size_t max_old_generation_size_ = 700ul * (kPointerSize / 4) * MB;
|
||||
size_t initial_max_old_generation_size_;
|
||||
size_t initial_old_generation_size_;
|
||||
bool old_generation_size_configured_;
|
||||
size_t maximum_committed_;
|
||||
bool old_generation_size_configured_ = false;
|
||||
size_t maximum_committed_ = 0;
|
||||
|
||||
// Backing store bytes (array buffers and external strings).
|
||||
std::atomic<size_t> backing_store_bytes_;
|
||||
std::atomic<size_t> backing_store_bytes_{0};
|
||||
|
||||
// For keeping track of how much data has survived
|
||||
// scavenge since last new space expansion.
|
||||
size_t survived_since_last_expansion_;
|
||||
size_t survived_since_last_expansion_ = 0;
|
||||
|
||||
// ... and since the last scavenge.
|
||||
size_t survived_last_scavenge_;
|
||||
size_t survived_last_scavenge_ = 0;
|
||||
|
||||
// This is not the depth of nested AlwaysAllocateScope's but rather a single
|
||||
// count, as scopes can be acquired from multiple tasks (read: threads).
|
||||
std::atomic<size_t> always_allocate_scope_count_;
|
||||
std::atomic<size_t> always_allocate_scope_count_{0};
|
||||
|
||||
// Stores the memory pressure level that set by MemoryPressureNotification
|
||||
// and reset by a mark-compact garbage collection.
|
||||
@ -1984,74 +1984,75 @@ class Heap {
|
||||
near_heap_limit_callbacks_;
|
||||
|
||||
// For keeping track of context disposals.
|
||||
int contexts_disposed_;
|
||||
int contexts_disposed_ = 0;
|
||||
|
||||
// The length of the retained_maps array at the time of context disposal.
|
||||
// This separates maps in the retained_maps array that were created before
|
||||
// and after context disposal.
|
||||
int number_of_disposed_maps_;
|
||||
int number_of_disposed_maps_ = 0;
|
||||
|
||||
NewSpace* new_space_;
|
||||
OldSpace* old_space_;
|
||||
CodeSpace* code_space_;
|
||||
MapSpace* map_space_;
|
||||
LargeObjectSpace* lo_space_;
|
||||
NewLargeObjectSpace* new_lo_space_;
|
||||
ReadOnlySpace* read_only_space_;
|
||||
NewSpace* new_space_ = nullptr;
|
||||
OldSpace* old_space_ = nullptr;
|
||||
CodeSpace* code_space_ = nullptr;
|
||||
MapSpace* map_space_ = nullptr;
|
||||
LargeObjectSpace* lo_space_ = nullptr;
|
||||
NewLargeObjectSpace* new_lo_space_ = nullptr;
|
||||
ReadOnlySpace* read_only_space_ = nullptr;
|
||||
// Map from the space id to the space.
|
||||
Space* space_[LAST_SPACE + 1];
|
||||
|
||||
// Determines whether code space is write-protected. This is essentially a
|
||||
// race-free copy of the {FLAG_write_protect_code_memory} flag.
|
||||
bool write_protect_code_memory_;
|
||||
bool write_protect_code_memory_ = false;
|
||||
|
||||
// Holds the number of open CodeSpaceMemoryModificationScopes.
|
||||
uintptr_t code_space_memory_modification_scope_depth_;
|
||||
uintptr_t code_space_memory_modification_scope_depth_ = 0;
|
||||
|
||||
HeapState gc_state_;
|
||||
int gc_post_processing_depth_;
|
||||
HeapState gc_state_ = NOT_IN_GC;
|
||||
|
||||
int gc_post_processing_depth_ = 0;
|
||||
|
||||
// Returns the amount of external memory registered since last global gc.
|
||||
uint64_t PromotedExternalMemorySize();
|
||||
|
||||
// How many "runtime allocations" happened.
|
||||
uint32_t allocations_count_;
|
||||
uint32_t allocations_count_ = 0;
|
||||
|
||||
// Running hash over allocations performed.
|
||||
uint32_t raw_allocations_hash_;
|
||||
uint32_t raw_allocations_hash_ = 0;
|
||||
|
||||
// Starts marking when stress_marking_percentage_% of the marking start limit
|
||||
// is reached.
|
||||
int stress_marking_percentage_;
|
||||
int stress_marking_percentage_ = 0;
|
||||
|
||||
// Observer that causes more frequent checks for reached incremental marking
|
||||
// limit.
|
||||
AllocationObserver* stress_marking_observer_;
|
||||
AllocationObserver* stress_marking_observer_ = nullptr;
|
||||
|
||||
// Observer that can cause early scavenge start.
|
||||
StressScavengeObserver* stress_scavenge_observer_;
|
||||
StressScavengeObserver* stress_scavenge_observer_ = nullptr;
|
||||
|
||||
bool allocation_step_in_progress_;
|
||||
bool allocation_step_in_progress_ = false;
|
||||
|
||||
// The maximum percent of the marking limit reached wihout causing marking.
|
||||
// This is tracked when specyfing --fuzzer-gc-analysis.
|
||||
double max_marking_limit_reached_;
|
||||
double max_marking_limit_reached_ = 0.0;
|
||||
|
||||
// How many mark-sweep collections happened.
|
||||
unsigned int ms_count_;
|
||||
unsigned int ms_count_ = 0;
|
||||
|
||||
// How many gc happened.
|
||||
unsigned int gc_count_;
|
||||
unsigned int gc_count_ = 0;
|
||||
|
||||
// The number of Mark-Compact garbage collections that are considered as
|
||||
// ineffective. See IsIneffectiveMarkCompact() predicate.
|
||||
int consecutive_ineffective_mark_compacts_;
|
||||
int consecutive_ineffective_mark_compacts_ = 0;
|
||||
|
||||
static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
|
||||
uintptr_t mmap_region_base_;
|
||||
uintptr_t mmap_region_base_ = 0;
|
||||
|
||||
// For post mortem debugging.
|
||||
int remembered_unmapped_pages_index_;
|
||||
int remembered_unmapped_pages_index_ = 0;
|
||||
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
|
||||
|
||||
// Limit that triggers a global GC on the next (normally caused) GC. This
|
||||
@ -2062,7 +2063,7 @@ class Heap {
|
||||
|
||||
// Indicates that inline bump-pointer allocation has been globally disabled
|
||||
// for all spaces. This is used to disable allocations in generated code.
|
||||
bool inline_allocation_disabled_;
|
||||
bool inline_allocation_disabled_ = false;
|
||||
|
||||
// Weak list heads, threaded through the objects.
|
||||
// List heads are initialized lazily and contain the undefined_value at start.
|
||||
@ -2076,70 +2077,61 @@ class Heap {
|
||||
|
||||
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
|
||||
|
||||
GCTracer* tracer_;
|
||||
|
||||
size_t promoted_objects_size_;
|
||||
double promotion_ratio_;
|
||||
double promotion_rate_;
|
||||
size_t semi_space_copied_object_size_;
|
||||
size_t previous_semi_space_copied_object_size_;
|
||||
double semi_space_copied_rate_;
|
||||
int nodes_died_in_new_space_;
|
||||
int nodes_copied_in_new_space_;
|
||||
int nodes_promoted_;
|
||||
size_t promoted_objects_size_ = 0;
|
||||
double promotion_ratio_ = 0.0;
|
||||
double promotion_rate_ = 0.0;
|
||||
size_t semi_space_copied_object_size_ = 0;
|
||||
size_t previous_semi_space_copied_object_size_ = 0;
|
||||
double semi_space_copied_rate_ = 0.0;
|
||||
int nodes_died_in_new_space_ = 0;
|
||||
int nodes_copied_in_new_space_ = 0;
|
||||
int nodes_promoted_ = 0;
|
||||
|
||||
// This is the pretenuring trigger for allocation sites that are in maybe
|
||||
// tenure state. When we switched to the maximum new space size we deoptimize
|
||||
// the code that belongs to the allocation site and derive the lifetime
|
||||
// of the allocation site.
|
||||
unsigned int maximum_size_scavenges_;
|
||||
unsigned int maximum_size_scavenges_ = 0;
|
||||
|
||||
// Total time spent in GC.
|
||||
double total_gc_time_ms_;
|
||||
|
||||
// Last time an idle notification happened.
|
||||
double last_idle_notification_time_;
|
||||
double last_idle_notification_time_ = 0.0;
|
||||
|
||||
// Last time a garbage collection happened.
|
||||
double last_gc_time_;
|
||||
double last_gc_time_ = 0.0;
|
||||
|
||||
MarkCompactCollector* mark_compact_collector_;
|
||||
MinorMarkCompactCollector* minor_mark_compact_collector_;
|
||||
|
||||
ArrayBufferCollector* array_buffer_collector_;
|
||||
|
||||
MemoryAllocator* memory_allocator_;
|
||||
|
||||
StoreBuffer* store_buffer_;
|
||||
|
||||
HeapController* heap_controller_;
|
||||
|
||||
IncrementalMarking* incremental_marking_;
|
||||
ConcurrentMarking* concurrent_marking_;
|
||||
|
||||
GCIdleTimeHandler* gc_idle_time_handler_;
|
||||
|
||||
MemoryReducer* memory_reducer_;
|
||||
|
||||
ObjectStats* live_object_stats_;
|
||||
ObjectStats* dead_object_stats_;
|
||||
|
||||
ScavengeJob* scavenge_job_;
|
||||
|
||||
AllocationObserver* idle_scavenge_observer_;
|
||||
GCTracer* tracer_ = nullptr;
|
||||
MarkCompactCollector* mark_compact_collector_ = nullptr;
|
||||
MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
|
||||
ArrayBufferCollector* array_buffer_collector_ = nullptr;
|
||||
MemoryAllocator* memory_allocator_ = nullptr;
|
||||
StoreBuffer* store_buffer_ = nullptr;
|
||||
HeapController* heap_controller_ = nullptr;
|
||||
IncrementalMarking* incremental_marking_ = nullptr;
|
||||
ConcurrentMarking* concurrent_marking_ = nullptr;
|
||||
GCIdleTimeHandler* gc_idle_time_handler_ = nullptr;
|
||||
MemoryReducer* memory_reducer_ = nullptr;
|
||||
ObjectStats* live_object_stats_ = nullptr;
|
||||
ObjectStats* dead_object_stats_ = nullptr;
|
||||
ScavengeJob* scavenge_job_ = nullptr;
|
||||
AllocationObserver* idle_scavenge_observer_ = nullptr;
|
||||
LocalEmbedderHeapTracer* local_embedder_heap_tracer_ = nullptr;
|
||||
StrongRootsList* strong_roots_list_ = nullptr;
|
||||
|
||||
// This counter is increased before each GC and never reset.
|
||||
// To account for the bytes allocated since the last GC, use the
|
||||
// NewSpaceAllocationCounter() function.
|
||||
size_t new_space_allocation_counter_;
|
||||
size_t new_space_allocation_counter_ = 0;
|
||||
|
||||
// This counter is increased before each GC and never reset. To
|
||||
// account for the bytes allocated since the last GC, use the
|
||||
// OldGenerationAllocationCounter() function.
|
||||
size_t old_generation_allocation_counter_at_last_gc_;
|
||||
size_t old_generation_allocation_counter_at_last_gc_ = 0;
|
||||
|
||||
// The size of objects in old generation after the last MarkCompact GC.
|
||||
size_t old_generation_size_at_last_gc_;
|
||||
size_t old_generation_size_at_last_gc_ = 0;
|
||||
|
||||
// The feedback storage is used to store allocation sites (keys) and how often
|
||||
// they have been visited (values) by finding a memento behind an object. The
|
||||
@ -2151,20 +2143,20 @@ class Heap {
|
||||
char trace_ring_buffer_[kTraceRingBufferSize];
|
||||
|
||||
// Used as boolean.
|
||||
uint8_t is_marking_flag_;
|
||||
uint8_t is_marking_flag_ = 0;
|
||||
|
||||
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
|
||||
// full then the data is from ring_buffer_end_ to the end of the buffer and
|
||||
// from 0 to ring_buffer_end_.
|
||||
bool ring_buffer_full_;
|
||||
size_t ring_buffer_end_;
|
||||
bool ring_buffer_full_ = false;
|
||||
size_t ring_buffer_end_ = 0;
|
||||
|
||||
// Flag is set when the heap has been configured. The heap can be repeatedly
|
||||
// configured through the API until it is set up.
|
||||
bool configured_;
|
||||
bool configured_ = false;
|
||||
|
||||
// Currently set GC flags that are respected by all GC components.
|
||||
int current_gc_flags_;
|
||||
int current_gc_flags_ = Heap::kNoGCFlags;
|
||||
|
||||
// Currently set GC callback flags that are used to pass information between
|
||||
// the embedder and V8's GC.
|
||||
@ -2174,34 +2166,30 @@ class Heap {
|
||||
|
||||
base::Mutex relocation_mutex_;
|
||||
|
||||
int gc_callbacks_depth_;
|
||||
int gc_callbacks_depth_ = 0;
|
||||
|
||||
bool deserialization_complete_;
|
||||
|
||||
StrongRootsList* strong_roots_list_;
|
||||
bool deserialization_complete_ = false;
|
||||
|
||||
// The depth of HeapIterator nestings.
|
||||
int heap_iterator_depth_;
|
||||
int heap_iterator_depth_ = 0;
|
||||
|
||||
LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
|
||||
|
||||
bool fast_promotion_mode_;
|
||||
bool fast_promotion_mode_ = false;
|
||||
|
||||
// Used for testing purposes.
|
||||
bool force_oom_;
|
||||
bool delay_sweeper_tasks_for_testing_;
|
||||
bool force_oom_ = false;
|
||||
bool delay_sweeper_tasks_for_testing_ = false;
|
||||
|
||||
HeapObject* pending_layout_change_object_;
|
||||
HeapObject* pending_layout_change_object_ = nullptr;
|
||||
|
||||
base::Mutex unprotected_memory_chunks_mutex_;
|
||||
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
|
||||
bool unprotected_memory_chunks_registry_enabled_;
|
||||
bool unprotected_memory_chunks_registry_enabled_ = false;
|
||||
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
// If the --gc-interval flag is set to a positive value, this
|
||||
// variable holds the value indicating the number of allocations
|
||||
// remain until the next failure and garbage collection.
|
||||
int allocation_timeout_;
|
||||
int allocation_timeout_ = 0;
|
||||
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
|
||||
std::map<HeapObject*, HeapObject*> retainer_;
|
||||
|
Loading…
Reference in New Issue
Block a user