Fixing --verify-predictable mode.

This CL fixes several sources of non-predictability by making Platform::MonotonicallyIncreasingTime() the only bottleneck for all time-querying functions and providing PredictablePlatform implementation.

Review URL: https://codereview.chromium.org/1415383004

Cr-Commit-Position: refs/heads/master@{#31959}
This commit is contained in:
ishell 2015-11-12 05:42:49 -08:00 committed by Commit bot
parent 0b0581421e
commit 722e19efd6
13 changed files with 111 additions and 49 deletions

View File

@ -101,6 +101,48 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
#ifndef V8_SHARED
// Predictable v8::Platform implementation. All background and foreground
// tasks are run immediately, delayed tasks are not executed at all.
class PredictablePlatform : public Platform {
public:
PredictablePlatform() {}
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override {
task->Run();
delete task;
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
task->Run();
delete task;
}
void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
double delay_in_seconds) override {
delete task;
}
void CallIdleOnForegroundThread(v8::Isolate* isolate,
IdleTask* task) override {
UNREACHABLE();
}
bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
double MonotonicallyIncreasingTime() override {
return synthetic_time_in_sec_ += 0.00001;
}
private:
double synthetic_time_in_sec_ = 0.0;
DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
};
#endif // !V8_SHARED
v8::Platform* g_platform = NULL;
@ -425,13 +467,11 @@ int PerIsolateData::RealmIndexOrThrow(
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
// When FLAG_verify_predictable mode is enabled it returns current value
// of Heap::allocations_count().
// When FLAG_verify_predictable mode is enabled it returns result of
// v8::Platform::MonotonicallyIncreasingTime().
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i::FLAG_verify_predictable) {
Isolate* v8_isolate = args.GetIsolate();
i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
args.GetReturnValue().Set(heap->synthetic_time());
args.GetReturnValue().Set(g_platform->MonotonicallyIncreasingTime());
} else {
base::TimeDelta delta =
base::TimeTicks::HighResolutionNow() - kInitialTicks;
@ -2016,7 +2056,13 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::EmptyMessageQueues(Isolate* isolate) {
while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
#ifndef V8_SHARED
if (!i::FLAG_verify_predictable) {
#endif
while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
#ifndef V8_SHARED
}
#endif
}
@ -2358,7 +2404,14 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU(options.icu_data_file);
#ifndef V8_SHARED
g_platform = i::FLAG_verify_predictable
? new PredictablePlatform()
: v8::platform::CreateDefaultPlatform();
#else
g_platform = v8::platform::CreateDefaultPlatform();
#endif // !V8_SHARED
v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {

View File

@ -497,6 +497,11 @@ void StackGuard::HandleGCInterrupt() {
Object* StackGuard::HandleInterrupts() {
if (FLAG_verify_predictable) {
// Advance synthetic time by making a time request.
isolate_->heap()->MonotonicallyIncreasingTimeInMs();
}
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
}

View File

@ -829,6 +829,7 @@ DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
@ -1076,7 +1077,7 @@ DEFINE_IMPLICATION(print_all_code, trace_codegen)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
DEFINE_INT(dump_allocations_digest_at_alloc, 0,
DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")

View File

@ -21,6 +21,19 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
}
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
}
GCTracer::Scope::~Scope() {
DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
tracer_->current_.scopes[scope_] +=
tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
}
GCTracer::AllocationEvent::AllocationEvent(double duration,
size_t allocation_in_bytes) {
duration_ = duration;
@ -111,7 +124,7 @@ GCTracer::GCTracer(Heap* heap)
combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = base::OS::TimeCurrentMillis();
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
previous_ = previous_incremental_mark_compactor_event_ = current_;
}
@ -851,7 +864,7 @@ size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
double GCTracer::ContextDisposalRateInMilliseconds() const {
if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
double begin = base::OS::TimeCurrentMillis();
double begin = heap_->MonotonicallyIncreasingTimeInMs();
double end = 0.0;
ContextDisposalEventBuffer::const_iterator iter =
context_disposal_events_.begin();

View File

@ -143,15 +143,8 @@ class GCTracer {
NUMBER_OF_SCOPES
};
Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
start_time_ = base::OS::TimeCurrentMillis();
}
~Scope() {
DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
tracer_->current_.scopes[scope_] +=
base::OS::TimeCurrentMillis() - start_time_;
}
Scope(GCTracer* tracer, ScopeId scope);
~Scope();
private:
GCTracer* tracer_;

View File

@ -257,20 +257,21 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
++allocations_count_;
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(object);
UpdateAllocationsHash(size_in_bytes);
if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
(--dump_allocations_hash_countdown_ == 0)) {
dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}
if (FLAG_trace_allocation_stack_interval > 0) {
if (!FLAG_verify_predictable) ++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@ -292,14 +293,14 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(source);
UpdateAllocationsHash(target);
UpdateAllocationsHash(size_in_bytes);
if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
(--dump_allocations_hash_countdown_ == 0)) {
dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}

View File

@ -102,7 +102,6 @@ Heap::Heap()
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
@ -1056,7 +1055,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
}
AgeInlineCaches();
set_retained_maps(ArrayList::cast(empty_fixed_array()));
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
MemoryReducer::Event event;
event.type = MemoryReducer::kContextDisposed;
event.time_ms = MonotonicallyIncreasingTimeInMs();

View File

@ -888,10 +888,6 @@ class Heap {
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
// Returns deterministic "time" value in ms. Works only with
// FLAG_verify_predictable.
double synthetic_time() { return allocations_count() / 2.0; }
// Print short heap statistics.
void PrintShortHeapStatistics();
@ -2163,9 +2159,6 @@ class Heap {
// Running hash over allocations performed.
uint32_t raw_allocations_hash_;
// Countdown counter, dumps allocation hash when 0.
uint32_t dump_allocations_hash_countdown_;
// How many mark-sweep collections happened.
unsigned int ms_count_;

View File

@ -786,7 +786,7 @@ void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
start = base::OS::TimeCurrentMillis();
start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Hurry\n");
}
@ -796,7 +796,7 @@ void IncrementalMarking::Hurry() {
ProcessMarkingDeque();
state_ = COMPLETE;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
double end = base::OS::TimeCurrentMillis();
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
@ -1058,7 +1058,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
double start = base::OS::TimeCurrentMillis();
double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write
@ -1109,7 +1109,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
// with marking.
SpeedUp();
double end = base::OS::TimeCurrentMillis();
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not

View File

@ -668,7 +668,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
std::vector<std::pair<int, Page*> > pages;
typedef std::pair<int, Page*> LiveBytesPagePair;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
PageIterator it(space);
@ -739,7 +740,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// - the total size of evacuated objects does not exceed the specified
// limit.
// - fragmentation of (n+1)-th page does not exceed the specified limit.
std::sort(pages.begin(), pages.end());
std::sort(pages.begin(), pages.end(),
[](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
int live_bytes = pages[i].first;
int free_bytes = area_size - live_bytes;
@ -1968,7 +1972,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
start_time = base::OS::TimeCurrentMillis();
start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
@ -2058,7 +2062,8 @@ void MarkCompactCollector::MarkLiveObjects() {
AfterMarking();
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}
}
@ -4404,7 +4409,7 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
start_time = base::OS::TimeCurrentMillis();
start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
#ifdef DEBUG
@ -4456,7 +4461,7 @@ void MarkCompactCollector::SweepSpaces() {
ReleaseEvacuationCandidates();
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}

View File

@ -2242,7 +2242,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
time_millis_at_init_ = base::OS::TimeCurrentMillis();
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
heap_.NotifyDeserializationComplete();

View File

@ -939,7 +939,7 @@ class Isolate {
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
return base::OS::TimeCurrentMillis() - time_millis_at_init_;
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
DateCache* date_cache() {

View File

@ -88,8 +88,7 @@ RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
// the OS time.
double millis;
if (FLAG_verify_predictable) {
millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000
millis += Floor(isolate->heap()->synthetic_time());
millis = Floor(isolate->heap()->MonotonicallyIncreasingTimeInMs());
} else {
millis = Floor(base::OS::TimeCurrentMillis());
}