diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc index 154edae60a..f8641063a8 100644 --- a/src/profiler/cpu-profiler.cc +++ b/src/profiler/cpu-profiler.cc @@ -90,7 +90,7 @@ ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener) // callbacks on the heap. DCHECK(isolate_->heap()->HasBeenSetUp()); - if (!FLAG_prof_browser_mode) { + if (!v8_flags.prof_browser_mode) { logger->LogCodeObjects(); } logger->LogCompiledFunctions(); @@ -511,7 +511,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode, naming_mode_(naming_mode), logging_mode_(logging_mode), base_sampling_interval_(base::TimeDelta::FromMicroseconds( - FLAG_cpu_profiler_sampling_interval)), + v8_flags.cpu_profiler_sampling_interval)), code_observer_(test_code_observer), profiles_(test_profiles), symbolizer_(test_symbolizer), diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc index 3921dc7647..edbdb4c91a 100644 --- a/src/profiler/heap-snapshot-generator.cc +++ b/src/profiler/heap-snapshot-generator.cc @@ -547,7 +547,7 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) { // Size of an object can change during its life, so to keep information // about the object in entries_ consistent, we have to adjust size when the // object is migrated. - if (FLAG_heap_profiler_trace_objects) { + if (v8_flags.heap_profiler_trace_objects) { PrintF("Move object from %p to %p old size %6d new size %6d\n", reinterpret_cast(from), reinterpret_cast(to), entries_.at(from_entry_info_index).size, object_size); @@ -586,7 +586,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, static_cast(reinterpret_cast(entry->value)); EntryInfo& entry_info = entries_.at(entry_index); entry_info.accessed = accessed; - if (FLAG_heap_profiler_trace_objects) { + if (v8_flags.heap_profiler_trace_objects) { PrintF("Update object size : %p with old size %d and new size %d\n", reinterpret_cast(addr), entry_info.size, size); } @@ -622,7 +622,7 @@ void HeapObjectsMap::AddMergedNativeEntry(NativeObject addr, void HeapObjectsMap::StopHeapObjectsTracking() { time_intervals_.clear(); } void HeapObjectsMap::UpdateHeapObjectsMap() { - if (FLAG_heap_profiler_trace_objects) { + if (v8_flags.heap_profiler_trace_objects) { PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n", entries_map_.occupancy()); } @@ -634,14 +634,14 @@ void HeapObjectsMap::UpdateHeapObjectsMap() { obj = iterator.Next()) { int object_size = obj.Size(cage_base); FindOrAddEntry(obj.address(), object_size); - if (FLAG_heap_profiler_trace_objects) { + if (v8_flags.heap_profiler_trace_objects) { PrintF("Update object : %p %6d. Next address is %p\n", reinterpret_cast(obj.address()), object_size, reinterpret_cast(obj.address() + object_size)); } } RemoveDeadEntries(); - if (FLAG_heap_profiler_trace_objects) { + if (v8_flags.heap_profiler_trace_objects) { PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n", entries_map_.occupancy()); } @@ -877,7 +877,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) { HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type, const char* name) { - if (FLAG_heap_profiler_show_hidden_objects && type == HeapEntry::kHidden) { + if (v8_flags.heap_profiler_show_hidden_objects && + type == HeapEntry::kHidden) { type = HeapEntry::kNative; } PtrComprCageBase cage_base(isolate()); @@ -2094,7 +2095,7 @@ bool V8HeapExplorer::IterateAndExtractReferences( // objects, and fails DCHECKs if we attempt to. Read-only objects can // never retain read-write objects, so there is no risk in skipping // verification for them. - if (FLAG_heap_snapshot_verify && + if (v8_flags.heap_snapshot_verify && !BasicMemoryChunk::FromHeapObject(obj)->InReadOnlySpace()) { verifier = std::make_unique(generator, obj); } @@ -2643,7 +2644,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences( HeapSnapshotGenerator* generator) { generator_ = generator; - if (FLAG_heap_profiler_use_embedder_graph && + if (v8_flags.heap_profiler_use_embedder_graph && snapshot_->profiler()->HasBuildEmbedderGraphCallback()) { v8::HandleScope scope(reinterpret_cast(isolate_)); DisallowGarbageCollection no_gc; @@ -2726,7 +2727,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { #ifdef VERIFY_HEAP Heap* debug_heap = heap_; - if (FLAG_verify_heap) { + if (v8_flags.verify_heap) { HeapVerifier::VerifyHeap(debug_heap); } #endif @@ -2734,7 +2735,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { InitProgressCounter(); #ifdef VERIFY_HEAP - if (FLAG_verify_heap) { + if (v8_flags.verify_heap) { HeapVerifier::VerifyHeap(debug_heap); } #endif diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h index 50d97de298..7750c2b5c2 100644 --- a/src/profiler/heap-snapshot-generator.h +++ b/src/profiler/heap-snapshot-generator.h @@ -591,7 +591,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { HeapEntry* result = entries_map_.emplace(ptr, allocator->AllocateEntry(ptr)).first->second; #ifdef V8_ENABLE_HEAP_SNAPSHOT_VERIFY - if (FLAG_heap_snapshot_verify) { + if (v8_flags.heap_snapshot_verify) { reverse_entries_map_.emplace(result, ptr); } #endif @@ -602,7 +602,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { HeapThing FindHeapThingForHeapEntry(HeapEntry* entry) { // The reverse lookup map is only populated if the verification flag is // enabled. - DCHECK(FLAG_heap_snapshot_verify); + DCHECK(v8_flags.heap_snapshot_verify); auto it = reverse_entries_map_.find(entry); return it == reverse_entries_map_.end() ? nullptr : it->second; diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc index b38d235e98..402a240188 100644 --- a/src/profiler/sampling-heap-profiler.cc +++ b/src/profiler/sampling-heap-profiler.cc @@ -25,7 +25,7 @@ namespace internal { // Let u be a uniformly distributed random number between 0 and 1, then // next_sample = (- ln u) / λ intptr_t SamplingHeapProfiler::Observer::GetNextSampleInterval(uint64_t rate) { - if (FLAG_sampling_heap_profiler_suppress_randomness) + if (v8_flags.sampling_heap_profiler_suppress_randomness) return static_cast(rate); double u = random_->NextDouble(); double next = (-base::ieee754::log(u)) * rate; diff --git a/src/profiler/strings-storage.cc b/src/profiler/strings-storage.cc index 592d101e68..a1bae849bc 100644 --- a/src/profiler/strings-storage.cc +++ b/src/profiler/strings-storage.cc @@ -81,8 +81,8 @@ const char* StringsStorage::GetSymbol(Symbol sym) { return ""; } String description = String::cast(sym.description()); - int length = - std::min(FLAG_heap_snapshot_string_limit.value(), description.length()); + int length = std::min(v8_flags.heap_snapshot_string_limit.value(), + description.length()); auto data = description.ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &length); if (sym.is_private_name()) { @@ -98,7 +98,7 @@ const char* StringsStorage::GetName(Name name) { if (name.IsString()) { String str = String::cast(name); int length = - std::min(FLAG_heap_snapshot_string_limit.value(), str.length()); + std::min(v8_flags.heap_snapshot_string_limit.value(), str.length()); int actual_length = 0; std::unique_ptr data = str.ToCString( DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length); @@ -117,7 +117,7 @@ const char* StringsStorage::GetConsName(const char* prefix, Name name) { if (name.IsString()) { String str = String::cast(name); int length = - std::min(FLAG_heap_snapshot_string_limit.value(), str.length()); + std::min(v8_flags.heap_snapshot_string_limit.value(), str.length()); int actual_length = 0; std::unique_ptr data = str.ToCString( DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length); diff --git a/src/profiler/symbolizer.cc b/src/profiler/symbolizer.cc index 6c9d92b2c1..8528b62693 100644 --- a/src/profiler/symbolizer.cc +++ b/src/profiler/symbolizer.cc @@ -161,7 +161,7 @@ Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample( } } - if (FLAG_prof_browser_mode) { + if (v8_flags.prof_browser_mode) { bool no_symbolized_entries = true; for (auto e : stack_trace) { if (e.code_entry != nullptr) { diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc index cf13e48735..b1e7fb830e 100644 --- a/test/cctest/test-cpu-profiler.cc +++ b/test/cctest/test-cpu-profiler.cc @@ -125,11 +125,11 @@ namespace { class TestSetup { public: - TestSetup() : old_flag_prof_browser_mode_(i::FLAG_prof_browser_mode) { - i::FLAG_prof_browser_mode = false; + TestSetup() : old_flag_prof_browser_mode_(v8_flags.prof_browser_mode) { + v8_flags.prof_browser_mode = false; } - ~TestSetup() { i::FLAG_prof_browser_mode = old_flag_prof_browser_mode_; } + ~TestSetup() { v8_flags.prof_browser_mode = old_flag_prof_browser_mode_; } private: bool old_flag_prof_browser_mode_; @@ -762,9 +762,9 @@ static const char* cpu_profiler_test_source = TEST(CollectCpuProfile) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -795,9 +795,9 @@ TEST(CollectCpuProfile) { TEST(CollectCpuProfileCallerLineNumbers) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -859,7 +859,7 @@ static const char* hot_deopt_no_frame_entry_test_source = // If 'foo' has no ranges the samples falling into the prologue will miss the // 'start' function on the stack, so 'foo' will be attached to the (root). TEST(HotDeoptNoFrameEntry) { - i::FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -882,7 +882,7 @@ TEST(HotDeoptNoFrameEntry) { } TEST(CollectCpuProfileSamples) { - i::FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -936,7 +936,7 @@ static const char* cpu_profiler_test_source2 = // 16 16 loop [-1] #5 // 14 14 (program) [-1] #2 TEST(SampleWhenFrameIsNotSetup) { - i::FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -1235,15 +1235,15 @@ TEST(BoundFunctionCall) { // This tests checks distribution of the samples through the source lines. static void TickLines(bool optimize) { #ifndef V8_LITE_MODE - FLAG_turbofan = optimize; + v8_flags.turbofan = optimize; #ifdef V8_ENABLE_MAGLEV // TODO(v8:7700): Also test maglev here. - FLAG_maglev = false; + v8_flags.maglev = false; #endif // V8_ENABLE_MAGLEV #endif // V8_LITE_MODE CcTest::InitializeVM(); LocalContext env; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; i::Isolate* isolate = CcTest::i_isolate(); i::Factory* factory = isolate->factory(); i::HandleScope scope(isolate); @@ -1399,9 +1399,9 @@ static const char* call_function_test_source = TEST(FunctionCallSample) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (i::v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -1460,9 +1460,9 @@ static const char* function_apply_test_source = TEST(FunctionApplySample) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (i::v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -1569,7 +1569,7 @@ static void CallJsFunction(const v8::FunctionCallbackInfo& info) { // 55 1 bar #16 5 // 54 54 foo #16 6 TEST(JsNativeJsSample) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -1622,7 +1622,7 @@ static const char* js_native_js_runtime_js_test_source = // 51 51 foo #16 6 // 2 2 (program) #0 2 TEST(JsNativeJsRuntimeJsSample) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -1679,7 +1679,7 @@ static const char* js_native1_js_native2_js_test_source = // 54 54 foo #16 7 // 2 2 (program) #0 2 TEST(JsNative1JsNative2JsSample) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -1779,7 +1779,7 @@ static const char* js_native_js_runtime_multiple_test_source = // foo #16 6 // (program) #0 2 TEST(JsNativeJsRuntimeJsSampleMultiple) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -1847,7 +1847,7 @@ static const char* inlining_test_source = // action #16 7 // (program) #0 2 TEST(Inlining) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -1945,9 +1945,9 @@ static const char* inlining_test_source2 = R"( TEST(Inlining2) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (FLAG_concurrent_sparkplug) return; + if (v8_flags.concurrent_sparkplug) return; - FLAG_allow_natives_syntax = true; + v8_flags.allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); LocalContext env; v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate); @@ -2037,9 +2037,9 @@ static const char* cross_script_source_b = R"( TEST(CrossScriptInliningCallerLineNumbers) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (i::v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); LocalContext env; v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate); @@ -2132,9 +2132,9 @@ static const char* cross_script_source_f = R"( TEST(CrossScriptInliningCallerLineNumbers2) { // Skip test if concurrent sparkplug is enabled. The test becomes flaky, // since it requires a precise trace. - if (i::FLAG_concurrent_sparkplug) return; + if (i::v8_flags.concurrent_sparkplug) return; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(CcTest::isolate()); ProfilerHelper helper(env.local()); @@ -2251,7 +2251,7 @@ static void CheckFunctionDetails(v8::Isolate* isolate, } TEST(FunctionDetails) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2302,8 +2302,9 @@ TEST(FunctionDetails) { } TEST(FunctionDetailsInlining) { - if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return; - i::FLAG_allow_natives_syntax = true; + if (!CcTest::i_isolate()->use_optimizer() || i::v8_flags.always_turbofan) + return; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2434,7 +2435,7 @@ static const char* pre_profiling_osr_script = R"( // 0 startProfiling:0 2 0 #4 TEST(StartProfilingAfterOsr) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2510,8 +2511,9 @@ const char* GetBranchDeoptReason(v8::Local context, // deopt at top function TEST(CollectDeoptEvents) { - if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return; - i::FLAG_allow_natives_syntax = true; + if (!CcTest::i_isolate()->use_optimizer() || i::v8_flags.always_turbofan) + return; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2625,7 +2627,7 @@ TEST(CollectDeoptEvents) { } TEST(SourceLocation) { - i::FLAG_always_turbofan = true; + i::v8_flags.always_turbofan = true; LocalContext env; v8::HandleScope scope(CcTest::isolate()); @@ -2648,8 +2650,9 @@ static const char* inlined_source = // deopt at the first level inlined function TEST(DeoptAtFirstLevelInlinedSource) { - if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return; - i::FLAG_allow_natives_syntax = true; + if (!CcTest::i_isolate()->use_optimizer() || i::v8_flags.always_turbofan) + return; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2720,8 +2723,9 @@ TEST(DeoptAtFirstLevelInlinedSource) { // deopt at the second level inlined function TEST(DeoptAtSecondLevelInlinedSource) { - if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return; - i::FLAG_allow_natives_syntax = true; + if (!CcTest::i_isolate()->use_optimizer() || i::v8_flags.always_turbofan) + return; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -2798,8 +2802,9 @@ TEST(DeoptAtSecondLevelInlinedSource) { // deopt in untracked function TEST(DeoptUntrackedFunction) { - if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return; - i::FLAG_allow_natives_syntax = true; + if (!CcTest::i_isolate()->use_optimizer() || i::v8_flags.always_turbofan) + return; + i::v8_flags.allow_natives_syntax = true; v8::HandleScope scope(CcTest::isolate()); v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); @@ -3017,15 +3022,15 @@ TEST(Issue763073) { class AllowNativesSyntax { public: AllowNativesSyntax() - : allow_natives_syntax_(i::FLAG_allow_natives_syntax), - trace_deopt_(i::FLAG_trace_deopt) { - i::FLAG_allow_natives_syntax = true; - i::FLAG_trace_deopt = true; + : allow_natives_syntax_(i::v8_flags.allow_natives_syntax), + trace_deopt_(i::v8_flags.trace_deopt) { + i::v8_flags.allow_natives_syntax = true; + i::v8_flags.trace_deopt = true; } ~AllowNativesSyntax() { - i::FLAG_allow_natives_syntax = allow_natives_syntax_; - i::FLAG_trace_deopt = trace_deopt_; + i::v8_flags.allow_natives_syntax = allow_natives_syntax_; + i::v8_flags.trace_deopt = trace_deopt_; } private: @@ -3079,7 +3084,7 @@ static void CallStaticCollectSample( } TEST(StaticCollectSampleAPI) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -3431,7 +3436,7 @@ class UnlockingThread : public v8::base::Thread { // Checking for crashes with multiple thread/single Isolate profiling. TEST(MultipleThreadsSingleIsolate) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; v8::Isolate* isolate = CcTest::isolate(); v8::Locker locker(isolate); v8::HandleScope scope(isolate); @@ -3891,7 +3896,7 @@ TEST(Bug9151StaleCodeEntries) { // Tests that functions from other contexts aren't recorded when filtering for // another context. TEST(ContextIsolation) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext execution_env; i::HandleScope scope(CcTest::i_isolate()); @@ -3984,7 +3989,7 @@ void ValidateEmbedderState(v8::CpuProfile* profile, // Tests that embedder states from other contexts aren't recorded TEST(EmbedderContextIsolation) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext execution_env; i::HandleScope scope(CcTest::i_isolate()); @@ -4047,7 +4052,7 @@ TEST(EmbedderContextIsolation) { // Tests that embedder states from same context are recorded TEST(EmbedderStatePropagate) { - i::FLAG_allow_natives_syntax = true; + i::v8_flags.allow_natives_syntax = true; LocalContext execution_env; i::HandleScope scope(CcTest::i_isolate()); @@ -4110,12 +4115,13 @@ TEST(EmbedderStatePropagate) { // even after native context move TEST(EmbedderStatePropagateNativeContextMove) { // Reusing context addresses will cause this test to fail. - if (i::FLAG_gc_global || i::FLAG_stress_compaction || - i::FLAG_stress_incremental_marking || i::FLAG_enable_third_party_heap) { + if (i::v8_flags.gc_global || i::v8_flags.stress_compaction || + i::v8_flags.stress_incremental_marking || + i::v8_flags.enable_third_party_heap) { return; } - i::FLAG_allow_natives_syntax = true; - i::FLAG_manual_evacuation_candidates_selection = true; + i::v8_flags.allow_natives_syntax = true; + i::v8_flags.manual_evacuation_candidates_selection = true; LocalContext execution_env; i::HandleScope scope(CcTest::i_isolate()); @@ -4184,9 +4190,9 @@ TEST(EmbedderStatePropagateNativeContextMove) { // Tests that when a native context that's being filtered is moved, we continue // to track its execution. TEST(ContextFilterMovedNativeContext) { - if (i::FLAG_enable_third_party_heap) return; - i::FLAG_allow_natives_syntax = true; - i::FLAG_manual_evacuation_candidates_selection = true; + if (i::v8_flags.enable_third_party_heap) return; + i::v8_flags.allow_natives_syntax = true; + i::v8_flags.manual_evacuation_candidates_selection = true; LocalContext env; i::HandleScope scope(CcTest::i_isolate()); @@ -4267,8 +4273,8 @@ int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source, } UNINITIALIZED_TEST(DetailedSourcePositionAPI) { - i::FLAG_detailed_line_info = false; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.detailed_line_info = false; + i::v8_flags.allow_natives_syntax = true; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); @@ -4308,11 +4314,11 @@ UNINITIALIZED_TEST(DetailedSourcePositionAPI) { } UNINITIALIZED_TEST(DetailedSourcePositionAPI_Inlining) { - i::FLAG_detailed_line_info = false; - i::FLAG_turbo_inlining = true; - i::FLAG_stress_inline = true; - i::FLAG_always_turbofan = false; - i::FLAG_allow_natives_syntax = true; + i::v8_flags.detailed_line_info = false; + i::v8_flags.turbo_inlining = true; + i::v8_flags.stress_inline = true; + i::v8_flags.always_turbofan = false; + i::v8_flags.allow_natives_syntax = true; v8::Isolate::CreateParams create_params; create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); @@ -4457,7 +4463,7 @@ TEST(CanStartStopProfilerWithTitlesAndIds) { TEST(FastApiCPUProfiler) { #if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR) // None of the following configurations include JSCallReducer. - if (i::FLAG_jitless) return; + if (i::v8_flags.jitless) return; FLAG_SCOPE(turbofan); FLAG_SCOPE(turbo_fast_api_calls); @@ -4556,15 +4562,15 @@ TEST(FastApiCPUProfiler) { TEST(BytecodeFlushEventsEagerLogging) { #ifndef V8_LITE_MODE - FLAG_turbofan = false; - FLAG_always_turbofan = false; - i::FLAG_optimize_for_size = false; + v8_flags.turbofan = false; + v8_flags.always_turbofan = false; + v8_flags.optimize_for_size = false; #endif // V8_LITE_MODE #if ENABLE_SPARKPLUG - FLAG_always_sparkplug = false; + v8_flags.always_sparkplug = false; #endif // ENABLE_SPARKPLUG - i::FLAG_flush_bytecode = true; - i::FLAG_allow_natives_syntax = true; + v8_flags.flush_bytecode = true; + v8_flags.allow_natives_syntax = true; TestSetup test_setup; ManualGCScope manual_gc_scope; diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc index 5fbea256c3..188c9ef867 100644 --- a/test/cctest/test-heap-profiler.cc +++ b/test/cctest/test-heap-profiler.cc @@ -53,10 +53,10 @@ using i::AllocationTraceNode; using i::AllocationTraceTree; using i::AllocationTracker; using i::SourceLocation; +using i::heap::GrowNewSpaceToMaximumCapacity; using v8::base::ArrayVector; using v8::base::Optional; using v8::base::Vector; -using v8::internal::heap::GrowNewSpaceToMaximumCapacity; namespace { @@ -1297,7 +1297,7 @@ static TestStatsStream GetHeapStatsUpdate( TEST(HeapSnapshotObjectsStats) { // Concurrent allocation might break results - v8::internal::v8_flags.stress_concurrent_allocation = false; + i::v8_flags.stress_concurrent_allocation = false; LocalContext env; v8::HandleScope scope(env->GetIsolate()); @@ -2639,7 +2639,7 @@ TEST(ManyLocalsInSharedContext) { env->GetIsolate(), ok_object, v8::HeapGraphEdge::kInternal, "context"); CHECK(context_object); // Check the objects are not duplicated in the context. - CHECK_EQ(v8::internal::Context::MIN_CONTEXT_EXTENDED_SLOTS + num_objects - 1, + CHECK_EQ(i::Context::MIN_CONTEXT_EXTENDED_SLOTS + num_objects - 1, context_object->GetChildrenCount()); // Check all the objects have got their names. // ... well check just every 15th because otherwise it's too slow in debug. @@ -2695,7 +2695,7 @@ TEST(AllocationSitesAreVisible) { v8::HeapGraphEdge::kInternal, "elements"); CHECK(elements); CHECK_EQ(v8::HeapGraphNode::kCode, elements->GetType()); - CHECK_EQ(v8::internal::FixedArray::SizeFor(3), + CHECK_EQ(i::FixedArray::SizeFor(3), static_cast(elements->GetShallowSize())); v8::Local array_val = @@ -3704,10 +3704,10 @@ TEST(SamplingHeapProfiler) { // Turn off always_turbofan. Inlining can cause stack traces to be shorter // than what we expect in this test. - v8::internal::v8_flags.always_turbofan = false; + i::v8_flags.always_turbofan = false; // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; // Sample should be empty if requested before sampling has started. { @@ -3788,16 +3788,16 @@ TEST(SamplingHeapProfilerRateAgnosticEstimates) { // Turn off always_turbofan. Inlining can cause stack traces to be shorter // than what we expect in this test. - v8::internal::v8_flags.always_turbofan = false; + i::v8_flags.always_turbofan = false; // Disable compilation cache to force compilation in both cases - v8::internal::v8_flags.compilation_cache = false; + i::v8_flags.compilation_cache = false; // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; // stress_incremental_marking adds randomness to the test. - v8::internal::v8_flags.stress_incremental_marking = false; + i::v8_flags.stress_incremental_marking = false; // warmup compilation CompileRun(simple_sampling_heap_profiler_script); @@ -3869,7 +3869,7 @@ TEST(SamplingHeapProfilerApiAllocation) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; heap_profiler->StartSamplingHeapProfiler(256); @@ -3892,7 +3892,7 @@ TEST(SamplingHeapProfilerApiSamples) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; heap_profiler->StartSamplingHeapProfiler(1024); @@ -3937,7 +3937,7 @@ TEST(SamplingHeapProfilerLeftTrimming) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; heap_profiler->StartSamplingHeapProfiler(64); @@ -3950,7 +3950,7 @@ TEST(SamplingHeapProfilerLeftTrimming) { " a.shift();\n" "}\n"); - CcTest::CollectGarbage(v8::internal::NEW_SPACE); + CcTest::CollectGarbage(i::NEW_SPACE); // Should not crash. heap_profiler->StopSamplingHeapProfiler(); @@ -3975,7 +3975,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; GrowNewSpaceToMaximumCapacity(CcTest::heap()); @@ -4037,7 +4037,7 @@ TEST(SamplingHeapProfilerLargeInterval) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; heap_profiler->StartSamplingHeapProfiler(512 * 1024); @@ -4075,7 +4075,7 @@ TEST(SamplingHeapProfilerSampleDuringDeopt) { v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler(); // Suppress randomness to avoid flakiness in tests. - v8::internal::v8_flags.sampling_heap_profiler_suppress_randomness = true; + i::v8_flags.sampling_heap_profiler_suppress_randomness = true; // Small sample interval to force each object to be sampled. heap_profiler->StartSamplingHeapProfiler(i::kTaggedSize);