[test] Introduce flag to stress atomic gc.

--fuzzer-gc-analysis prints allocations count without need for
predictable mode.

--random_gc_interval is similar to --gc-interval with 2 differences:
1) Causes gc every random(0, X) allocations instead of constant X.
2) Doesn't update the allocation timeout when there is a GC for
different reason than timeout equals to 0.

Bug: v8:6972
Change-Id: I07ad935fc264a61069255c7358b4fcbe42bfb17a
Reviewed-on: https://chromium-review.googlesource.com/815214
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50456}
This commit is contained in:
Michal Majewski 2018-01-09 16:32:32 +01:00 committed by Commit Bot
parent 5d4e11a77e
commit 1d9035ab3b
4 changed files with 38 additions and 18 deletions

View File

@ -602,6 +602,9 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(random_gc_interval, 0,
"Collect garbage after random(0, X) allocations. It overrides "
"gc_interval.")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_INT(retain_maps_for_n_gc, 2,
"keeps maps alive for <n> old space garbage collections")
@ -709,8 +712,8 @@ DEFINE_BOOL(stress_incremental_marking, false,
"force incremental marking for small heaps and run it more often")
DEFINE_BOOL(fuzzer_gc_analysis, false,
"enables analysis mode for gc fuzz testing, e.g. --stress-marking, "
"--stress-scavenge")
"prints number of allocations and enables analysis mode for gc "
"fuzz testing, e.g. --stress-marking, --stress-scavenge")
DEFINE_INT(stress_marking, 0,
"force marking at random points between 0 and X (inclusive) percent "
"of the regular marking start limit")

View File

@ -251,9 +251,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 && !always_allocate() &&
Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry(space);
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry(space);
}
}
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
@ -321,10 +322,10 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
}
if (FLAG_trace_allocation_stack_interval > 0) {
if (!FLAG_verify_predictable) ++allocations_count_;
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
} else if (FLAG_trace_allocation_stack_interval > 0) {
++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@ -356,10 +357,11 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
}
}
void Heap::UpdateAllocationsHash(HeapObject* object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);

View File

@ -1215,12 +1215,13 @@ bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
// allow at least a few allocations after a collection. The reason
// for this is that we have a lot of allocation sequences and we
// assume that a garbage collection will allow the subsequent
// allocation attempts to go through.
allocation_timeout_ = Max(6, FLAG_gc_interval);
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
// of allocation sequences and we assume that a garbage collection will allow
// the subsequent allocation attempts to go through.
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
}
#endif
EnsureFillerObjectAtTop();
@ -5513,7 +5514,7 @@ void Heap::DisableInlineAllocation() {
bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
allocation_timeout_ = NextAllocationTimeout();
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@ -5660,6 +5661,19 @@ void Heap::ClearStackLimits() {
roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
int Heap::NextAllocationTimeout(int current_timeout) {
if (FLAG_random_gc_interval > 0) {
// If current timeout hasn't reached 0 the GC was caused by something
// different than --stress-atomic-gc flag and we don't update the timeout.
if (current_timeout <= 0) {
return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
} else {
return current_timeout;
}
}
return FLAG_gc_interval;
}
void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
@ -5737,7 +5751,7 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
if (FLAG_verify_predictable) {
if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}

View File

@ -1865,6 +1865,7 @@ class Heap {
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
int NextAllocationTimeout(int current_timeout = 0);
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();