From 0aacfb2a6ecbeda1d1d97ca113afd8253a1b9670 Mon Sep 17 00:00:00 2001 From: Andrew Comminos Date: Thu, 15 Apr 2021 01:41:48 -0700 Subject: [PATCH] [cpu-profiler] Reintroduce support for context filtering As we can still intend to run the web-exposed profiler outside of an origin-isolated environment, add support back for filtering by v8::Context. This reverts commit 05af36810076ace617805a2343bc5e6bcd2006d0. Bug: chromium:956688 Change-Id: Idd98bea3213b5963f689a04de6c3743073efc587 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2785806 Reviewed-by: Ulan Degenbaev Reviewed-by: Yang Guo Commit-Queue: Andrew Comminos Cr-Commit-Position: refs/heads/master@{#74112} --- include/v8-profiler.h | 8 +- include/v8.h | 2 +- src/api/api.cc | 16 +++- src/heap/heap.cc | 3 + src/logging/code-events.h | 6 ++ src/logging/log.h | 3 + src/profiler/cpu-profiler.cc | 19 ++-- src/profiler/cpu-profiler.h | 20 ++++- src/profiler/profile-generator.cc | 33 ++++++- src/profiler/profile-generator.h | 33 ++++++- src/profiler/profiler-listener.cc | 7 ++ src/profiler/profiler-listener.h | 1 + src/profiler/tick-sample.cc | 9 ++ src/profiler/tick-sample.h | 1 + src/runtime/runtime-test.cc | 1 + test/cctest/test-cpu-profiler.cc | 142 +++++++++++++++++++++++++++++- 16 files changed, 284 insertions(+), 20 deletions(-) diff --git a/include/v8-profiler.h b/include/v8-profiler.h index b398ae4d08..aabd21f845 100644 --- a/include/v8-profiler.h +++ b/include/v8-profiler.h @@ -289,8 +289,8 @@ class V8_EXPORT CpuProfilingOptions { * interval, set via SetSamplingInterval(). If * zero, the sampling interval will be equal to * the profiler's sampling interval. - * \param filter_context Deprecated option to filter by context, currently a - * no-op. + * \param filter_context If specified, profiles will only contain frames + * using this context. Other frames will be elided. */ CpuProfilingOptions( CpuProfilingMode mode = kLeafNodeLineNumbers, @@ -304,9 +304,13 @@ class V8_EXPORT CpuProfilingOptions { private: friend class internal::CpuProfile; + bool has_filter_context() const { return !filter_context_.IsEmpty(); } + void* raw_filter_context() const; + CpuProfilingMode mode_; unsigned max_samples_; int sampling_interval_us_; + CopyablePersistentTraits::CopyablePersistent filter_context_; }; /** diff --git a/include/v8.h b/include/v8.h index 1a321d436f..ecc33d23d5 100644 --- a/include/v8.h +++ b/include/v8.h @@ -2424,7 +2424,7 @@ struct SampleInfo { StateTag vm_state; // Current VM state. void* external_callback_entry; // External callback address if VM is // executing an external callback. - void* top_context; // Incumbent native context address. + void* context; // Incumbent native context address. }; struct MemoryRange { diff --git a/src/api/api.cc b/src/api/api.cc index 7bf52ec39e..5840ec25d0 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -9478,7 +9478,21 @@ CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode, MaybeLocal filter_context) : mode_(mode), max_samples_(max_samples), - sampling_interval_us_(sampling_interval_us) {} + sampling_interval_us_(sampling_interval_us) { + if (!filter_context.IsEmpty()) { + Local local_filter_context = filter_context.ToLocalChecked(); + filter_context_.Reset(local_filter_context->GetIsolate(), + local_filter_context); + filter_context_.SetWeak(); + } +} + +void* CpuProfilingOptions::raw_filter_context() const { + return reinterpret_cast( + i::Context::cast(*Utils::OpenPersistent(filter_context_)) + .native_context() + .address()); +} void CpuProfiler::Dispose() { delete reinterpret_cast(this); } diff --git a/src/heap/heap.cc b/src/heap/heap.cc index f629a33c33..d858a268c2 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -3195,6 +3195,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source, if (target.IsSharedFunctionInfo()) { LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(), target.address())); + } else if (target.IsNativeContext()) { + PROFILE(isolate_, + NativeContextMoveEvent(source.address(), target.address())); } if (FLAG_verify_predictable) { diff --git a/src/logging/code-events.h b/src/logging/code-events.h index c009ba0b15..cda7c39fc7 100644 --- a/src/logging/code-events.h +++ b/src/logging/code-events.h @@ -98,6 +98,7 @@ class CodeEventListener { // Not handlified as this happens during GC. No allocation allowed. virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0; virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0; + virtual void NativeContextMoveEvent(Address from, Address to) = 0; virtual void CodeMovingGCEvent() = 0; virtual void CodeDisableOptEvent(Handle code, Handle shared) = 0; @@ -217,6 +218,11 @@ class CodeEventDispatcher : public CodeEventListener { listener->SharedFunctionInfoMoveEvent(from, to); }); } + void NativeContextMoveEvent(Address from, Address to) override { + DispatchEventToListeners([=](CodeEventListener* listener) { + listener->NativeContextMoveEvent(from, to); + }); + } void CodeMovingGCEvent() override { DispatchEventToListeners( [](CodeEventListener* listener) { listener->CodeMovingGCEvent(); }); diff --git a/src/logging/log.h b/src/logging/log.h index e52f9f2833..ec19032060 100644 --- a/src/logging/log.h +++ b/src/logging/log.h @@ -208,6 +208,7 @@ class Logger : public CodeEventListener { Handle source) override; void CodeMoveEvent(AbstractCode from, AbstractCode to) override; void SharedFunctionInfoMoveEvent(Address from, Address to) override; + void NativeContextMoveEvent(Address from, Address to) override {} void CodeMovingGCEvent() override; void CodeDisableOptEvent(Handle code, Handle shared) override; @@ -410,6 +411,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener { void GetterCallbackEvent(Handle name, Address entry_point) override {} void SetterCallbackEvent(Handle name, Address entry_point) override {} void SharedFunctionInfoMoveEvent(Address from, Address to) override {} + void NativeContextMoveEvent(Address from, Address to) override {} void CodeMovingGCEvent() override {} void CodeDeoptEvent(Handle code, DeoptimizeKind kind, Address pc, int fp_to_sp_delta, bool reuse_code) override {} @@ -475,6 +477,7 @@ class ExternalCodeEventListener : public CodeEventListener { void GetterCallbackEvent(Handle name, Address entry_point) override {} void SetterCallbackEvent(Handle name, Address entry_point) override {} void SharedFunctionInfoMoveEvent(Address from, Address to) override {} + void NativeContextMoveEvent(Address from, Address to) override {} void CodeMoveEvent(AbstractCode from, AbstractCode to) override; void CodeDisableOptEvent(Handle code, Handle shared) override {} diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc index 46a82a7719..ba9022be1b 100644 --- a/src/profiler/cpu-profiler.cc +++ b/src/profiler/cpu-profiler.cc @@ -104,10 +104,11 @@ ProfilingScope::~ProfilingScope() { ProfilerEventsProcessor::ProfilerEventsProcessor( Isolate* isolate, Symbolizer* symbolizer, - ProfilerCodeObserver* code_observer) + ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles) : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), symbolizer_(symbolizer), code_observer_(code_observer), + profiles_(profiles), last_code_event_id_(0), last_processed_code_event_id_(0), isolate_(isolate) { @@ -119,9 +120,8 @@ SamplingEventsProcessor::SamplingEventsProcessor( Isolate* isolate, Symbolizer* symbolizer, ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles, base::TimeDelta period, bool use_precise_sampling) - : ProfilerEventsProcessor(isolate, symbolizer, code_observer), + : ProfilerEventsProcessor(isolate, symbolizer, code_observer, profiles), sampler_(new CpuSampler(isolate, this)), - profiles_(profiles), period_(period), use_precise_sampling_(use_precise_sampling) { sampler_->Start(); @@ -188,7 +188,14 @@ void ProfilerEventsProcessor::StopSynchronously() { bool ProfilerEventsProcessor::ProcessCodeEvent() { CodeEventsContainer record; if (events_buffer_.Dequeue(&record)) { - code_observer_->CodeEventHandlerInternal(record); + if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) { + NativeContextMoveEventRecord& nc_record = + record.NativeContextMoveEventRecord_; + profiles_->UpdateNativeContextAddressForCurrentProfiles( + nc_record.from_address, nc_record.to_address); + } else { + code_observer_->CodeEventHandlerInternal(record); + } last_processed_code_event_id_ = record.generic.order; return true; } @@ -202,6 +209,7 @@ void ProfilerEventsProcessor::CodeEventHandler( case CodeEventRecord::CODE_MOVE: case CodeEventRecord::CODE_DISABLE_OPT: case CodeEventRecord::CODE_DELETE: + case CodeEventRecord::NATIVE_CONTEXT_MOVE: Enqueue(evt_rec); break; case CodeEventRecord::CODE_DEOPT: { @@ -224,7 +232,8 @@ void SamplingEventsProcessor::SymbolizeAndAddToProfiles( symbolizer_->SymbolizeTickSample(record->sample); profiles_->AddPathToCurrentProfiles( record->sample.timestamp, symbolized.stack_trace, symbolized.src_line, - record->sample.update_stats, record->sample.sampling_interval); + record->sample.update_stats, record->sample.sampling_interval, + reinterpret_cast
(record->sample.context)); } ProfilerEventsProcessor::SampleProcessingResult diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h index d605a8c3d3..ced37e4ade 100644 --- a/src/profiler/cpu-profiler.h +++ b/src/profiler/cpu-profiler.h @@ -37,10 +37,14 @@ class Symbolizer; V(REPORT_BUILTIN, ReportBuiltinEventRecord) \ V(CODE_DELETE, CodeDeleteEventRecord) +#define VM_EVENTS_TYPE_LIST(V) \ + CODE_EVENTS_TYPE_LIST(V) \ + V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord) + class CodeEventRecord { public: #define DECLARE_TYPE(type, ignore) type, - enum Type { NONE = 0, CODE_EVENTS_TYPE_LIST(DECLARE_TYPE) }; + enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) }; #undef DECLARE_TYPE Type type; @@ -99,6 +103,13 @@ class ReportBuiltinEventRecord : public CodeEventRecord { V8_INLINE void UpdateCodeMap(CodeMap* code_map); }; +// Signals that a native context's address has changed. +class NativeContextMoveEventRecord : public CodeEventRecord { + public: + Address from_address; + Address to_address; +}; + // A record type for sending samples from the main thread/signal handler to the // profiling thread. class TickSampleEventRecord { @@ -130,7 +141,7 @@ class CodeEventsContainer { union { CodeEventRecord generic; #define DECLARE_CLASS(ignore, type) type type##_; - CODE_EVENTS_TYPE_LIST(DECLARE_CLASS) + VM_EVENTS_TYPE_LIST(DECLARE_CLASS) #undef DECLARE_CLASS }; }; @@ -174,7 +185,8 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread, protected: ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer, - ProfilerCodeObserver* code_observer); + ProfilerCodeObserver* code_observer, + CpuProfilesCollection* profiles); // Called from events processing thread (Run() method.) bool ProcessCodeEvent(); @@ -188,6 +200,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread, Symbolizer* symbolizer_; ProfilerCodeObserver* code_observer_; + CpuProfilesCollection* profiles_; std::atomic_bool running_{true}; base::ConditionVariable running_cond_; base::Mutex running_mutex_; @@ -238,7 +251,6 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor SamplingCircularQueue ticks_buffer_; std::unique_ptr sampler_; - CpuProfilesCollection* profiles_; base::TimeDelta period_; // Samples & code events processing period. const bool use_precise_sampling_; // Whether or not busy-waiting is used for // low sampling intervals on Windows. diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc index 375079de3e..93075d4f7c 100644 --- a/src/profiler/profile-generator.cc +++ b/src/profiler/profile-generator.cc @@ -533,6 +533,12 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) { } } +void ContextFilter::OnMoveEvent(Address from_address, Address to_address) { + if (native_context_address() != from_address) return; + + set_native_context_address(to_address); +} + using v8::tracing::TracedValue; std::atomic CpuProfile::last_id_; @@ -557,6 +563,13 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title, value->SetDouble("startTime", start_time_.since_origin().InMicroseconds()); TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"), "Profile", id_, "data", std::move(value)); + + DisallowHeapAllocation no_gc; + if (options_.has_filter_context()) { + i::Address raw_filter_context = + reinterpret_cast(options_.raw_filter_context()); + context_filter_.set_native_context_address(raw_filter_context); + } } bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) { @@ -706,6 +719,8 @@ void CpuProfile::StreamPendingTraceEvents() { void CpuProfile::FinishProfile() { end_time_ = base::TimeTicks::HighResolutionNow(); + // Stop tracking context movements after profiling stops. + context_filter_.set_native_context_address(kNullAddress); StreamPendingTraceEvents(); auto value = TracedValue::Create(); // The endTime timestamp is not converted to Perfetto's clock domain and will @@ -942,14 +957,26 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const { void CpuProfilesCollection::AddPathToCurrentProfiles( base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line, - bool update_stats, base::TimeDelta sampling_interval) { + bool update_stats, base::TimeDelta sampling_interval, + Address native_context_address) { // As starting / stopping profiles is rare relatively to this // method, we don't bother minimizing the duration of lock holding, // e.g. copying contents of the list to a local vector. current_profiles_semaphore_.Wait(); for (const std::unique_ptr& profile : current_profiles_) { - profile->AddPath(timestamp, path, src_line, update_stats, - sampling_interval); + if (profile->context_filter().Accept(native_context_address)) { + profile->AddPath(timestamp, path, src_line, update_stats, + sampling_interval); + } + } + current_profiles_semaphore_.Signal(); +} + +void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles( + Address from, Address to) { + current_profiles_semaphore_.Wait(); + for (const std::unique_ptr& profile : current_profiles_) { + profile->context_filter().OnMoveEvent(from, to); } current_profiles_semaphore_.Signal(); } diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h index 551dfdf591..c4bffa945a 100644 --- a/src/profiler/profile-generator.h +++ b/src/profiler/profile-generator.h @@ -237,6 +237,31 @@ struct CodeEntryAndLineNumber { using ProfileStackTrace = std::vector; +// Filters stack frames from sources other than a target native context. +class ContextFilter { + public: + explicit ContextFilter(Address native_context_address = kNullAddress) + : native_context_address_(native_context_address) {} + + // Invoked when a native context has changed address. + void OnMoveEvent(Address from_address, Address to_address); + + bool Accept(Address native_context_address) const { + if (native_context_address_ == kNullAddress) return true; + return (native_context_address & ~kHeapObjectTag) == + native_context_address_; + } + + // Update the context's tracked address based on VM-thread events. + void set_native_context_address(Address address) { + native_context_address_ = address; + } + Address native_context_address() const { return native_context_address_; } + + private: + Address native_context_address_; +}; + class ProfileTree; class V8_EXPORT_PRIVATE ProfileNode { @@ -386,6 +411,7 @@ class CpuProfile { base::TimeTicks start_time() const { return start_time_; } base::TimeTicks end_time() const { return end_time_; } CpuProfiler* cpu_profiler() const { return profiler_; } + ContextFilter& context_filter() { return context_filter_; } void UpdateTicksScale(); @@ -397,6 +423,7 @@ class CpuProfile { const char* title_; const CpuProfilingOptions options_; std::unique_ptr delegate_; + ContextFilter context_filter_; base::TimeTicks start_time_; base::TimeTicks end_time_; std::deque samples_; @@ -486,7 +513,11 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection { void AddPathToCurrentProfiles(base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line, bool update_stats, - base::TimeDelta sampling_interval); + base::TimeDelta sampling_interval, + Address native_context_address = kNullAddress); + + // Called from profile generator thread. + void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to); // Limits the number of profiles that can be simultaneously collected. static const int kMaxSimultaneousProfiles = 100; diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc index 8b253fb472..a2cfb8b07b 100644 --- a/src/profiler/profiler-listener.cc +++ b/src/profiler/profiler-listener.cc @@ -302,6 +302,13 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) { DispatchCodeEvent(evt_rec); } +void ProfilerListener::NativeContextMoveEvent(Address from, Address to) { + CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE); + evt_rec.NativeContextMoveEventRecord_.from_address = from; + evt_rec.NativeContextMoveEventRecord_.to_address = to; + DispatchCodeEvent(evt_rec); +} + void ProfilerListener::CodeDisableOptEvent(Handle code, Handle shared) { CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT); diff --git a/src/profiler/profiler-listener.h b/src/profiler/profiler-listener.h index 49e7db32ba..50a9b81893 100644 --- a/src/profiler/profiler-listener.h +++ b/src/profiler/profiler-listener.h @@ -59,6 +59,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener, Handle source) override; void CodeMoveEvent(AbstractCode from, AbstractCode to) override; void SharedFunctionInfoMoveEvent(Address from, Address to) override {} + void NativeContextMoveEvent(Address from, Address to) override; void CodeMovingGCEvent() override {} void CodeDisableOptEvent(Handle code, Handle shared) override; diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc index dfc5a7e1f4..2dc82fef6c 100644 --- a/src/profiler/tick-sample.cc +++ b/src/profiler/tick-sample.cc @@ -177,6 +177,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate, pc = regs.pc; frames_count = static_cast(info.frames_count); has_external_callback = info.external_callback_entry != nullptr; + context = info.context; if (has_external_callback) { external_callback_entry = info.external_callback_entry; } else if (frames_count) { @@ -209,6 +210,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs, sample_info->frames_count = 0; sample_info->vm_state = isolate->current_vm_state(); sample_info->external_callback_entry = nullptr; + sample_info->context = nullptr; if (sample_info->vm_state == GC) return true; i::Address js_entry_sp = isolate->js_entry_sp(); @@ -278,6 +280,13 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs, reinterpret_cast(regs->lr), js_entry_sp); + Context top_context = isolate->context(); + if (top_context.ptr() != i::Context::kNoContext && + top_context.ptr() != i::Context::kInvalidContext) { + NativeContext top_native_context = top_context.native_context(); + sample_info->context = reinterpret_cast(top_native_context.ptr()); + } + if (it.done()) return true; size_t i = 0; diff --git a/src/profiler/tick-sample.h b/src/profiler/tick-sample.h index 777c3d192d..1bfcb7d097 100644 --- a/src/profiler/tick-sample.h +++ b/src/profiler/tick-sample.h @@ -90,6 +90,7 @@ struct V8_EXPORT TickSample { static const unsigned kMaxFramesCountLog2 = 8; static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1; void* stack[kMaxFramesCount]; // Call stack. + void* context = nullptr; // Address of the incumbent native context. unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames. bool has_external_callback : 1; bool update_stats : 1; // Whether the sample should update aggregated stats. diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc index 5a3cf9b950..5e6e659f2e 100644 --- a/src/runtime/runtime-test.cc +++ b/src/runtime/runtime-test.cc @@ -1310,6 +1310,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) { Handle source) final {} void CodeMoveEvent(AbstractCode from, AbstractCode to) final {} void SharedFunctionInfoMoveEvent(Address from, Address to) final {} + void NativeContextMoveEvent(Address from, Address to) final {} void CodeMovingGCEvent() final {} void CodeDisableOptEvent(Handle code, Handle shared) final {} diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc index a9fb73ba52..2a94d368f1 100644 --- a/test/cctest/test-cpu-profiler.cc +++ b/test/cctest/test-cpu-profiler.cc @@ -546,7 +546,8 @@ class ProfilerHelper { v8::Local function, v8::Local argv[], int argc, unsigned min_js_samples = 0, unsigned min_external_samples = 0, ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers, - unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit); + unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit, + v8::Local context = v8::Local()); v8::CpuProfiler* profiler() { return profiler_; } @@ -559,11 +560,12 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local function, v8::Local argv[], int argc, unsigned min_js_samples, unsigned min_external_samples, - ProfilingMode mode, unsigned max_samples) { + ProfilingMode mode, unsigned max_samples, + v8::Local context) { v8::Local profile_name = v8_str("my_profile"); profiler_->SetSamplingInterval(50); - profiler_->StartProfiling(profile_name, {mode, max_samples, 0}); + profiler_->StartProfiling(profile_name, {mode, max_samples, 0, context}); v8::internal::CpuProfiler* iprofiler = reinterpret_cast(profiler_); @@ -3803,6 +3805,140 @@ TEST(Bug9151StaleCodeEntries) { CHECK(callback); } +// Tests that functions from other contexts aren't recorded when filtering for +// another context. +TEST(ContextIsolation) { + i::FLAG_allow_natives_syntax = true; + LocalContext execution_env; + i::HandleScope scope(CcTest::i_isolate()); + + // Install CollectSample callback for more deterministic sampling. + v8::Local func_template = v8::FunctionTemplate::New( + execution_env.local()->GetIsolate(), CallCollectSample); + v8::Local func = + func_template->GetFunction(execution_env.local()).ToLocalChecked(); + func->SetName(v8_str("CallCollectSample")); + execution_env->Global() + ->Set(execution_env.local(), v8_str("CallCollectSample"), func) + .FromJust(); + + ProfilerHelper helper(execution_env.local()); + CompileRun(R"( + function optimized() { + CallCollectSample(); + } + + function unoptimized() { + CallCollectSample(); + } + + function start() { + // Test optimized functions + %PrepareFunctionForOptimization(optimized); + optimized(); + optimized(); + %OptimizeFunctionOnNextCall(optimized); + optimized(); + + // Test unoptimized functions + %NeverOptimizeFunction(unoptimized); + unoptimized(); + + // Test callback + CallCollectSample(); + } + )"); + v8::Local function = + GetFunction(execution_env.local(), "start"); + + v8::CpuProfile* same_context_profile = helper.Run( + function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers, + v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local()); + const v8::CpuProfileNode* root = same_context_profile->GetTopDownRoot(); + const v8::CpuProfileNode* start_node = FindChild(root, "start"); + CHECK(start_node); + const v8::CpuProfileNode* optimized_node = FindChild(start_node, "optimized"); + CHECK(optimized_node); + const v8::CpuProfileNode* unoptimized_node = + FindChild(start_node, "unoptimized"); + CHECK(unoptimized_node); + const v8::CpuProfileNode* callback_node = + FindChild(start_node, "CallCollectSample"); + CHECK(callback_node); + + { + LocalContext filter_env; + v8::CpuProfile* diff_context_profile = helper.Run( + function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers, + v8::CpuProfilingOptions::kNoSampleLimit, filter_env.local()); + const v8::CpuProfileNode* diff_root = + diff_context_profile->GetTopDownRoot(); + // Ensure that no children were recorded (including callbacks, builtins). + CHECK(!FindChild(diff_root, "start")); + } +} + +// Tests that when a native context that's being filtered is moved, we continue +// to track its execution. +TEST(ContextFilterMovedNativeContext) { + i::FLAG_allow_natives_syntax = true; + i::FLAG_manual_evacuation_candidates_selection = true; + LocalContext env; + i::HandleScope scope(CcTest::i_isolate()); + + { + // Install CollectSample callback for more deterministic sampling. + v8::Local sample_func_template = + v8::FunctionTemplate::New(env.local()->GetIsolate(), CallCollectSample); + v8::Local sample_func = + sample_func_template->GetFunction(env.local()).ToLocalChecked(); + sample_func->SetName(v8_str("CallCollectSample")); + env->Global() + ->Set(env.local(), v8_str("CallCollectSample"), sample_func) + .FromJust(); + + // Install a function that triggers the native context to be moved. + v8::Local move_func_template = + v8::FunctionTemplate::New( + env.local()->GetIsolate(), + [](const v8::FunctionCallbackInfo& info) { + i::Isolate* isolate = + reinterpret_cast(info.GetIsolate()); + i::heap::ForceEvacuationCandidate( + i::Page::FromHeapObject(isolate->raw_native_context())); + CcTest::CollectAllGarbage(); + }); + v8::Local move_func = + move_func_template->GetFunction(env.local()).ToLocalChecked(); + move_func->SetName(v8_str("ForceNativeContextMove")); + env->Global() + ->Set(env.local(), v8_str("ForceNativeContextMove"), move_func) + .FromJust(); + + ProfilerHelper helper(env.local()); + CompileRun(R"( + function start() { + ForceNativeContextMove(); + CallCollectSample(); + } + )"); + v8::Local function = GetFunction(env.local(), "start"); + + v8::CpuProfile* profile = helper.Run( + function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers, + v8::CpuProfilingOptions::kNoSampleLimit, env.local()); + const v8::CpuProfileNode* root = profile->GetTopDownRoot(); + const v8::CpuProfileNode* start_node = FindChild(root, "start"); + CHECK(start_node); + + // Verify that after moving the native context, CallCollectSample is still + // recorded. + const v8::CpuProfileNode* callback_node = + FindChild(start_node, "CallCollectSample"); + CHECK(callback_node); + } +} + enum class EntryCountMode { kAll, kOnlyInlined }; // Count the number of unique source positions.