2010-03-19 13:51:01 +00:00
|
|
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
2013-03-07 11:12:26 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2010-03-19 13:51:01 +00:00
|
|
|
//
|
2020-10-01 07:07:48 +00:00
|
|
|
// Tests of the CPU profiler and utilities.
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2018-04-11 13:09:15 +00:00
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
#include "include/libplatform/v8-tracing.h"
|
2020-11-18 11:19:56 +00:00
|
|
|
#include "include/v8-fast-api-calls.h"
|
2021-08-23 13:01:06 +00:00
|
|
|
#include "include/v8-function.h"
|
|
|
|
#include "include/v8-locker.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "include/v8-profiler.h"
|
2019-05-17 12:13:44 +00:00
|
|
|
#include "src/api/api-inl.h"
|
2014-06-30 13:25:46 +00:00
|
|
|
#include "src/base/platform/platform.h"
|
2021-06-22 13:27:00 +00:00
|
|
|
#include "src/base/strings.h"
|
2021-03-25 13:23:20 +00:00
|
|
|
#include "src/codegen/compilation-cache.h"
|
2019-05-21 09:30:15 +00:00
|
|
|
#include "src/codegen/source-position-table.h"
|
2022-07-18 10:54:58 +00:00
|
|
|
#include "src/deoptimizer/deoptimize-reason.h"
|
2021-12-07 07:28:08 +00:00
|
|
|
#include "src/execution/embedder-state.h"
|
2019-08-19 22:58:21 +00:00
|
|
|
#include "src/heap/spaces.h"
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
#include "src/init/v8.h"
|
2020-04-25 19:00:16 +00:00
|
|
|
#include "src/libsampler/sampler.h"
|
2019-05-20 09:15:06 +00:00
|
|
|
#include "src/logging/log.h"
|
2019-05-23 08:51:46 +00:00
|
|
|
#include "src/objects/objects-inl.h"
|
2022-07-18 10:54:58 +00:00
|
|
|
#include "src/profiler/cpu-profiler.h"
|
2016-06-22 16:43:46 +00:00
|
|
|
#include "src/profiler/profiler-listener.h"
|
2020-10-01 07:07:48 +00:00
|
|
|
#include "src/profiler/symbolizer.h"
|
2019-05-23 13:27:57 +00:00
|
|
|
#include "src/utils/utils.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
2019-08-19 22:58:21 +00:00
|
|
|
#include "test/cctest/heap/heap-utils.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "test/cctest/profiler-extension.h"
|
2020-11-18 11:19:56 +00:00
|
|
|
#include "test/common/flag-utils.h"
|
2016-06-15 09:59:00 +00:00
|
|
|
|
2019-08-05 14:57:31 +00:00
|
|
|
#ifdef V8_USE_PERFETTO
|
2019-09-06 13:12:37 +00:00
|
|
|
#include "protos/perfetto/trace/trace.pb.h"
|
2019-08-05 14:57:31 +00:00
|
|
|
#endif
|
|
|
|
|
2017-09-30 13:19:52 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace test_cpu_profiler {
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
// Helper methods
|
2015-12-08 18:44:23 +00:00
|
|
|
static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
|
|
|
|
const char* name) {
|
|
|
|
return v8::Local<v8::Function>::Cast(
|
|
|
|
env->Global()->Get(env, v8_str(name)).ToLocalChecked());
|
2015-02-06 16:50:56 +00:00
|
|
|
}
|
|
|
|
|
2015-04-08 16:13:24 +00:00
|
|
|
static size_t offset(const char* src, const char* substring) {
|
|
|
|
const char* it = strstr(src, substring);
|
|
|
|
CHECK(it);
|
|
|
|
return static_cast<size_t>(it - src);
|
2015-03-24 12:46:13 +00:00
|
|
|
}
|
|
|
|
|
2016-11-22 10:14:36 +00:00
|
|
|
template <typename A, typename B>
|
|
|
|
static int dist(A a, B b) {
|
|
|
|
return abs(static_cast<int>(a) - static_cast<int>(b));
|
|
|
|
}
|
|
|
|
|
2016-07-18 09:23:28 +00:00
|
|
|
static const char* reason(const i::DeoptimizeReason reason) {
|
|
|
|
return i::DeoptimizeReasonToString(reason);
|
2015-03-05 10:37:56 +00:00
|
|
|
}
|
|
|
|
|
2010-03-19 13:51:01 +00:00
|
|
|
TEST(StartStop) {
|
2016-10-28 20:17:44 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2016-10-28 20:17:44 +00:00
|
|
|
CpuProfilesCollection profiles(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver code_observer(isolate, storage);
|
2020-10-01 07:07:48 +00:00
|
|
|
Symbolizer symbolizer(code_observer.code_map());
|
2016-06-15 09:59:00 +00:00
|
|
|
std::unique_ptr<ProfilerEventsProcessor> processor(
|
2020-10-01 07:07:48 +00:00
|
|
|
new SamplingEventsProcessor(
|
|
|
|
isolate, &symbolizer, &code_observer, &profiles,
|
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), true));
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2013-08-23 10:59:29 +00:00
|
|
|
processor->StopSynchronously();
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
|
|
|
|
2012-11-30 10:26:21 +00:00
|
|
|
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
|
|
|
|
i::Address frame1,
|
2018-04-13 22:28:05 +00:00
|
|
|
i::Address frame2 = kNullAddress,
|
|
|
|
i::Address frame3 = kNullAddress) {
|
2018-10-16 13:40:25 +00:00
|
|
|
v8::internal::TickSample sample;
|
|
|
|
sample.pc = reinterpret_cast<void*>(frame1);
|
|
|
|
sample.tos = reinterpret_cast<void*>(frame1);
|
|
|
|
sample.frames_count = 0;
|
2018-04-13 22:28:05 +00:00
|
|
|
if (frame2 != kNullAddress) {
|
2018-10-16 13:40:25 +00:00
|
|
|
sample.stack[0] = reinterpret_cast<void*>(frame2);
|
|
|
|
sample.frames_count = 1;
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
2018-04-13 22:28:05 +00:00
|
|
|
if (frame3 != kNullAddress) {
|
2018-10-16 13:40:25 +00:00
|
|
|
sample.stack[1] = reinterpret_cast<void*>(frame3);
|
|
|
|
sample.frames_count = 2;
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
2022-02-04 11:42:25 +00:00
|
|
|
sample.timestamp = base::TimeTicks::Now();
|
2018-10-16 13:40:25 +00:00
|
|
|
proc->AddSample(sample);
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
|
|
|
|
2010-04-12 07:23:43 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class TestSetup {
|
|
|
|
public:
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
TestSetup() : old_flag_prof_browser_mode_(i::FLAG_prof_browser_mode) {
|
2010-04-12 07:23:43 +00:00
|
|
|
i::FLAG_prof_browser_mode = false;
|
|
|
|
}
|
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
~TestSetup() { i::FLAG_prof_browser_mode = old_flag_prof_browser_mode_; }
|
2010-04-12 07:23:43 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
bool old_flag_prof_browser_mode_;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2020-12-17 16:02:56 +00:00
|
|
|
i::AbstractCode CreateCode(i::Isolate* isolate, LocalContext* env) {
|
2013-07-01 10:12:03 +00:00
|
|
|
static int counter = 0;
|
2021-06-17 15:43:55 +00:00
|
|
|
base::EmbeddedVector<char, 256> script;
|
|
|
|
base::EmbeddedVector<char, 32> name;
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2021-06-22 13:27:00 +00:00
|
|
|
base::SNPrintF(name, "function_%d", ++counter);
|
2019-04-29 11:06:49 +00:00
|
|
|
const char* name_start = name.begin();
|
2021-06-22 13:27:00 +00:00
|
|
|
base::SNPrintF(script,
|
|
|
|
"function %s() {\n"
|
|
|
|
"var counter = 0;\n"
|
|
|
|
"for (var i = 0; i < %d; ++i) counter += i;\n"
|
|
|
|
"return '%s_' + counter;\n"
|
|
|
|
"}\n"
|
|
|
|
"%s();\n",
|
|
|
|
name_start, counter, name_start, name_start);
|
2019-04-29 11:06:49 +00:00
|
|
|
CompileRun(script.begin());
|
2015-02-06 16:50:56 +00:00
|
|
|
|
2015-10-23 12:26:49 +00:00
|
|
|
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Utils::OpenHandle(*GetFunction(env->local(), name_start)));
|
2020-12-17 16:02:56 +00:00
|
|
|
return fun->abstract_code(isolate);
|
2013-07-01 10:12:03 +00:00
|
|
|
}
|
|
|
|
|
2010-03-19 13:51:01 +00:00
|
|
|
TEST(CodeEvents) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2013-07-01 10:12:03 +00:00
|
|
|
LocalContext env;
|
2013-09-19 09:17:13 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2013-02-15 09:27:10 +00:00
|
|
|
i::Factory* factory = isolate->factory();
|
2010-04-12 07:23:43 +00:00
|
|
|
TestSetup test_setup;
|
2013-07-01 10:12:03 +00:00
|
|
|
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> aaa_code(CreateCode(isolate, &env), isolate);
|
|
|
|
i::Handle<i::AbstractCode> comment_code(CreateCode(isolate, &env), isolate);
|
|
|
|
i::Handle<i::AbstractCode> comment2_code(CreateCode(isolate, &env), isolate);
|
|
|
|
i::Handle<i::AbstractCode> moved_code(CreateCode(isolate, &env), isolate);
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2016-06-15 09:59:00 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver code_observer(isolate, storage);
|
2020-10-01 07:07:48 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
|
2018-10-16 13:40:25 +00:00
|
|
|
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
|
2020-10-01 07:07:48 +00:00
|
|
|
isolate, symbolizer, &code_observer, profiles,
|
2019-07-18 23:18:18 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), true);
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2020-12-21 21:42:34 +00:00
|
|
|
ProfilerListener profiler_listener(isolate, processor,
|
2021-06-15 22:45:03 +00:00
|
|
|
*code_observer.code_entries(),
|
2021-03-19 19:18:18 +00:00
|
|
|
*code_observer.weak_code_registry());
|
2022-05-09 13:16:17 +00:00
|
|
|
isolate->v8_file_logger()->AddLogEventListener(&profiler_listener);
|
2010-03-19 13:51:01 +00:00
|
|
|
|
|
|
|
// Enqueue code creation events.
|
|
|
|
const char* aaa_str = "aaa";
|
2014-04-17 13:27:02 +00:00
|
|
|
i::Handle<i::String> aaa_name = factory->NewStringFromAsciiChecked(aaa_str);
|
2022-06-07 17:51:13 +00:00
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kFunction,
|
|
|
|
aaa_code, aaa_name);
|
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
|
|
|
|
comment_code, "comment");
|
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
|
|
|
|
comment2_code, "comment2");
|
2019-12-20 10:54:21 +00:00
|
|
|
profiler_listener.CodeMoveEvent(*comment2_code, *moved_code);
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2012-11-30 10:26:21 +00:00
|
|
|
// Enqueue a tick event to enable code events processing.
|
2019-12-20 10:54:21 +00:00
|
|
|
EnqueueTickSampleEvent(processor, aaa_code->InstructionStart());
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2022-05-09 13:16:17 +00:00
|
|
|
isolate->v8_file_logger()->RemoveLogEventListener(&profiler_listener);
|
2013-08-23 10:59:29 +00:00
|
|
|
processor->StopSynchronously();
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2020-10-01 07:07:48 +00:00
|
|
|
// Check the state of the symbolizer.
|
2018-07-27 09:08:25 +00:00
|
|
|
CodeEntry* aaa =
|
2020-10-01 07:07:48 +00:00
|
|
|
symbolizer->code_map()->FindEntry(aaa_code->InstructionStart());
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(aaa);
|
2015-02-06 16:50:56 +00:00
|
|
|
CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2016-06-15 09:59:00 +00:00
|
|
|
CodeEntry* comment =
|
2020-10-01 07:07:48 +00:00
|
|
|
symbolizer->code_map()->FindEntry(comment_code->InstructionStart());
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(comment);
|
2015-01-30 09:48:50 +00:00
|
|
|
CHECK_EQ(0, strcmp("comment", comment->name()));
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2020-10-01 07:07:48 +00:00
|
|
|
CHECK(!symbolizer->code_map()->FindEntry(comment2_code->InstructionStart()));
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2018-07-27 09:08:25 +00:00
|
|
|
CodeEntry* comment2 =
|
2020-10-01 07:07:48 +00:00
|
|
|
symbolizer->code_map()->FindEntry(moved_code->InstructionStart());
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(comment2);
|
2015-01-30 09:48:50 +00:00
|
|
|
CHECK_EQ(0, strcmp("comment2", comment2->name()));
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
template <typename T>
|
2010-03-19 13:51:01 +00:00
|
|
|
static int CompareProfileNodes(const T* p1, const T* p2) {
|
|
|
|
return strcmp((*p1)->entry()->name(), (*p2)->entry()->name());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(TickEvents) {
|
2010-04-12 07:23:43 +00:00
|
|
|
TestSetup test_setup;
|
2013-07-01 10:12:03 +00:00
|
|
|
LocalContext env;
|
2013-09-19 09:17:13 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2013-07-01 10:12:03 +00:00
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> frame1_code(CreateCode(isolate, &env), isolate);
|
|
|
|
i::Handle<i::AbstractCode> frame2_code(CreateCode(isolate, &env), isolate);
|
|
|
|
i::Handle<i::AbstractCode> frame3_code(CreateCode(isolate, &env), isolate);
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2016-06-15 09:59:00 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-03-14 07:25:55 +00:00
|
|
|
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
|
2020-12-21 21:42:34 +00:00
|
|
|
CcTest::i_isolate(), symbolizer, code_observer, profiles,
|
2019-03-14 07:25:55 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id = profiles->StartProfiling().id;
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2020-12-21 21:42:34 +00:00
|
|
|
ProfilerListener profiler_listener(isolate, processor,
|
2021-06-15 22:45:03 +00:00
|
|
|
*code_observer->code_entries(),
|
2021-03-19 19:18:18 +00:00
|
|
|
*code_observer->weak_code_registry());
|
2022-05-09 13:16:17 +00:00
|
|
|
isolate->v8_file_logger()->AddLogEventListener(&profiler_listener);
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2022-06-07 17:51:13 +00:00
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
|
|
|
|
frame1_code, "bbb");
|
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kStub,
|
|
|
|
frame2_code, "ccc");
|
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
|
|
|
|
frame3_code, "ddd");
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2019-12-20 10:54:21 +00:00
|
|
|
EnqueueTickSampleEvent(processor, frame1_code->raw_instruction_start());
|
2020-10-22 09:58:56 +00:00
|
|
|
EnqueueTickSampleEvent(processor,
|
|
|
|
frame2_code->raw_instruction_start() +
|
|
|
|
frame2_code->raw_instruction_size() / 2,
|
|
|
|
frame1_code->raw_instruction_start() +
|
|
|
|
frame1_code->raw_instruction_size() / 2);
|
2019-12-20 10:54:21 +00:00
|
|
|
EnqueueTickSampleEvent(processor, frame3_code->raw_instruction_end() - 1,
|
|
|
|
frame2_code->raw_instruction_end() - 1,
|
|
|
|
frame1_code->raw_instruction_end() - 1);
|
2010-03-19 13:51:01 +00:00
|
|
|
|
2022-05-09 13:16:17 +00:00
|
|
|
isolate->v8_file_logger()->RemoveLogEventListener(&profiler_listener);
|
2013-08-23 10:59:29 +00:00
|
|
|
processor->StopSynchronously();
|
2022-04-05 22:45:41 +00:00
|
|
|
CpuProfile* profile = profiles->StopProfiling(id);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2010-03-19 13:51:01 +00:00
|
|
|
|
|
|
|
// Check call trees.
|
2017-08-29 12:40:31 +00:00
|
|
|
const std::vector<ProfileNode*>* top_down_root_children =
|
2010-03-30 11:38:39 +00:00
|
|
|
profile->top_down()->root()->children();
|
2017-08-29 12:40:31 +00:00
|
|
|
CHECK_EQ(1, top_down_root_children->size());
|
|
|
|
CHECK_EQ(0, strcmp("bbb", top_down_root_children->back()->entry()->name()));
|
|
|
|
const std::vector<ProfileNode*>* top_down_bbb_children =
|
|
|
|
top_down_root_children->back()->children();
|
|
|
|
CHECK_EQ(1, top_down_bbb_children->size());
|
2017-10-19 11:25:59 +00:00
|
|
|
CHECK_EQ(0, strcmp("ccc", top_down_bbb_children->back()->entry()->name()));
|
2017-08-29 12:40:31 +00:00
|
|
|
const std::vector<ProfileNode*>* top_down_stub_children =
|
|
|
|
top_down_bbb_children->back()->children();
|
|
|
|
CHECK_EQ(1, top_down_stub_children->size());
|
|
|
|
CHECK_EQ(0, strcmp("ddd", top_down_stub_children->back()->entry()->name()));
|
|
|
|
const std::vector<ProfileNode*>* top_down_ddd_children =
|
|
|
|
top_down_stub_children->back()->children();
|
|
|
|
CHECK(top_down_ddd_children->empty());
|
2010-03-19 13:51:01 +00:00
|
|
|
}
|
2010-03-30 11:38:39 +00:00
|
|
|
|
2020-10-27 10:02:27 +00:00
|
|
|
TEST(CodeMapClearedBetweenProfilesWithLazyLogging) {
|
|
|
|
TestSetup test_setup;
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
// This gets logged when the profiler starts up and scans the heap.
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code1(CreateCode(isolate, &env), isolate);
|
2020-10-27 10:02:27 +00:00
|
|
|
|
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging);
|
|
|
|
profiler.StartProfiling("");
|
|
|
|
|
|
|
|
CpuProfile* profile = profiler.StopProfiling("");
|
|
|
|
CHECK(profile);
|
|
|
|
|
2021-07-12 21:32:54 +00:00
|
|
|
// Check that the code map is empty.
|
2020-10-27 10:02:27 +00:00
|
|
|
CodeMap* code_map = profiler.code_map_for_test();
|
2021-07-12 21:32:54 +00:00
|
|
|
CHECK_EQ(code_map->size(), 0);
|
2020-10-27 10:02:27 +00:00
|
|
|
|
|
|
|
profiler.DeleteProfile(profile);
|
|
|
|
|
|
|
|
// Create code between profiles. This should not be logged yet.
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code2(CreateCode(isolate, &env), isolate);
|
2020-10-27 10:02:27 +00:00
|
|
|
|
|
|
|
CHECK(!code_map->FindEntry(code2->InstructionStart()));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(CodeMapNotClearedBetweenProfilesWithEagerLogging) {
|
|
|
|
TestSetup test_setup;
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
// This gets logged when the profiler starts up and scans the heap.
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code1(CreateCode(isolate, &env), isolate);
|
2020-10-27 10:02:27 +00:00
|
|
|
|
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
|
|
|
|
profiler.StartProfiling("");
|
|
|
|
|
|
|
|
CpuProfile* profile = profiler.StopProfiling("");
|
|
|
|
CHECK(profile);
|
|
|
|
|
|
|
|
// Check that our code is still in the code map.
|
|
|
|
CodeMap* code_map = profiler.code_map_for_test();
|
|
|
|
CodeEntry* code1_entry = code_map->FindEntry(code1->InstructionStart());
|
|
|
|
CHECK(code1_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
|
|
|
|
|
|
|
|
profiler.DeleteProfile(profile);
|
|
|
|
|
|
|
|
// We should still have an entry in kEagerLogging mode.
|
|
|
|
code1_entry = code_map->FindEntry(code1->InstructionStart());
|
|
|
|
CHECK(code1_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
|
|
|
|
|
|
|
|
// Create code between profiles. This should be logged too.
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code2(CreateCode(isolate, &env), isolate);
|
2020-10-27 10:02:27 +00:00
|
|
|
CHECK(code_map->FindEntry(code2->InstructionStart()));
|
|
|
|
|
|
|
|
profiler.StartProfiling("");
|
|
|
|
CpuProfile* profile2 = profiler.StopProfiling("");
|
|
|
|
CHECK(profile2);
|
|
|
|
|
|
|
|
// Check that we still have code map entries for both code objects.
|
|
|
|
code1_entry = code_map->FindEntry(code1->InstructionStart());
|
|
|
|
CHECK(code1_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
|
|
|
|
CodeEntry* code2_entry = code_map->FindEntry(code2->InstructionStart());
|
|
|
|
CHECK(code2_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
|
|
|
|
|
|
|
|
profiler.DeleteProfile(profile2);
|
|
|
|
|
|
|
|
// Check that we still have code map entries for both code objects, even after
|
|
|
|
// the last profile is deleted.
|
|
|
|
code1_entry = code_map->FindEntry(code1->InstructionStart());
|
|
|
|
CHECK(code1_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
|
|
|
|
code2_entry = code_map->FindEntry(code2->InstructionStart());
|
|
|
|
CHECK(code2_entry);
|
|
|
|
CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
|
|
|
|
}
|
|
|
|
|
2010-08-10 12:06:42 +00:00
|
|
|
// http://crbug/51594
|
|
|
|
// This test must not crash.
|
|
|
|
TEST(CrashIfStoppingLastNonExistentProfile) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2010-08-10 12:06:42 +00:00
|
|
|
TestSetup test_setup;
|
2016-07-08 15:00:23 +00:00
|
|
|
std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
|
2013-04-02 07:53:50 +00:00
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StopProfiling("2");
|
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StopProfiling("");
|
2010-08-10 12:06:42 +00:00
|
|
|
}
|
|
|
|
|
2011-05-19 08:25:38 +00:00
|
|
|
// http://code.google.com/p/v8/issues/detail?id=1398
|
|
|
|
// Long stacks (exceeding max frames limit) must not be erased.
|
|
|
|
TEST(Issue1398) {
|
|
|
|
TestSetup test_setup;
|
2013-07-01 10:12:03 +00:00
|
|
|
LocalContext env;
|
2013-09-19 09:17:13 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2013-07-01 10:12:03 +00:00
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code(CreateCode(isolate, &env), isolate);
|
2013-07-01 10:12:03 +00:00
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2016-06-15 09:59:00 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-03-14 07:25:55 +00:00
|
|
|
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
|
2020-12-21 21:42:34 +00:00
|
|
|
CcTest::i_isolate(), symbolizer, code_observer, profiles,
|
2019-03-14 07:25:55 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id = profiles->StartProfiling("").id;
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2020-12-21 21:42:34 +00:00
|
|
|
ProfilerListener profiler_listener(isolate, processor,
|
2021-06-15 22:45:03 +00:00
|
|
|
*code_observer->code_entries(),
|
2021-03-19 19:18:18 +00:00
|
|
|
*code_observer->weak_code_registry());
|
2011-05-19 08:25:38 +00:00
|
|
|
|
2022-06-07 17:51:13 +00:00
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kBuiltin,
|
|
|
|
code, "bbb");
|
2011-05-19 08:25:38 +00:00
|
|
|
|
2018-10-16 13:40:25 +00:00
|
|
|
v8::internal::TickSample sample;
|
2019-12-20 10:54:21 +00:00
|
|
|
sample.pc = reinterpret_cast<void*>(code->InstructionStart());
|
2018-10-16 13:40:25 +00:00
|
|
|
sample.tos = nullptr;
|
2019-08-21 15:22:17 +00:00
|
|
|
sample.frames_count = TickSample::kMaxFramesCount;
|
2018-10-16 13:40:25 +00:00
|
|
|
for (unsigned i = 0; i < sample.frames_count; ++i) {
|
2019-12-20 10:54:21 +00:00
|
|
|
sample.stack[i] = reinterpret_cast<void*>(code->InstructionStart());
|
2011-05-19 08:25:38 +00:00
|
|
|
}
|
2022-02-04 11:42:25 +00:00
|
|
|
sample.timestamp = base::TimeTicks::Now();
|
2018-10-16 13:40:25 +00:00
|
|
|
processor->AddSample(sample);
|
2011-05-19 08:25:38 +00:00
|
|
|
|
2013-08-23 10:59:29 +00:00
|
|
|
processor->StopSynchronously();
|
2022-04-05 22:45:41 +00:00
|
|
|
CpuProfile* profile = profiles->StopProfiling(id);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2011-05-19 08:25:38 +00:00
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
unsigned actual_depth = 0;
|
2011-05-19 08:25:38 +00:00
|
|
|
const ProfileNode* node = profile->top_down()->root();
|
2017-08-29 12:40:31 +00:00
|
|
|
while (!node->children()->empty()) {
|
|
|
|
node = node->children()->back();
|
2011-05-19 08:25:38 +00:00
|
|
|
++actual_depth;
|
|
|
|
}
|
|
|
|
|
2019-08-21 15:22:17 +00:00
|
|
|
CHECK_EQ(1 + TickSample::kMaxFramesCount, actual_depth); // +1 for PC.
|
2011-05-19 08:25:38 +00:00
|
|
|
}
|
|
|
|
|
2011-03-22 16:10:01 +00:00
|
|
|
TEST(DeleteAllCpuProfiles) {
|
2013-04-10 08:29:39 +00:00
|
|
|
CcTest::InitializeVM();
|
2011-03-22 16:10:01 +00:00
|
|
|
TestSetup test_setup;
|
2016-07-08 15:00:23 +00:00
|
|
|
std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
|
2013-04-02 07:53:50 +00:00
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
|
|
|
profiler->DeleteAllProfiles();
|
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
|
|
|
|
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StopProfiling("1");
|
|
|
|
CHECK_EQ(1, profiler->GetProfilesCount());
|
|
|
|
profiler->DeleteAllProfiles();
|
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StartProfiling("2");
|
|
|
|
profiler->StopProfiling("2");
|
|
|
|
profiler->StopProfiling("1");
|
|
|
|
CHECK_EQ(2, profiler->GetProfilesCount());
|
|
|
|
profiler->DeleteAllProfiles();
|
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
2011-03-22 16:10:01 +00:00
|
|
|
|
|
|
|
// Test profiling cancellation by the 'delete' command.
|
2013-04-02 07:53:50 +00:00
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StartProfiling("2");
|
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
|
|
|
profiler->DeleteAllProfiles();
|
|
|
|
CHECK_EQ(0, profiler->GetProfilesCount());
|
2011-03-22 16:10:01 +00:00
|
|
|
}
|
|
|
|
|
2013-12-18 08:59:09 +00:00
|
|
|
static bool FindCpuProfile(v8::CpuProfiler* v8profiler,
|
|
|
|
const v8::CpuProfile* v8profile) {
|
|
|
|
i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(v8profiler);
|
|
|
|
const i::CpuProfile* profile =
|
|
|
|
reinterpret_cast<const i::CpuProfile*>(v8profile);
|
|
|
|
int length = profiler->GetProfilesCount();
|
2013-05-28 08:00:16 +00:00
|
|
|
for (int i = 0; i < length; i++) {
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
if (profile == profiler->GetProfile(i)) return true;
|
2013-05-28 08:00:16 +00:00
|
|
|
}
|
2013-12-18 08:59:09 +00:00
|
|
|
return false;
|
2013-05-28 08:00:16 +00:00
|
|
|
}
|
|
|
|
|
2011-03-22 16:10:01 +00:00
|
|
|
TEST(DeleteCpuProfile) {
|
|
|
|
LocalContext env;
|
2013-03-15 12:06:53 +00:00
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
|
2013-12-18 08:59:09 +00:00
|
|
|
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(cpu_profiler);
|
2011-03-22 16:10:01 +00:00
|
|
|
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> name1 = v8_str("1");
|
2014-03-28 09:24:49 +00:00
|
|
|
cpu_profiler->StartProfiling(name1);
|
|
|
|
v8::CpuProfile* p1 = cpu_profiler->StopProfiling(name1);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(p1);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(1, iprofiler->GetProfilesCount());
|
|
|
|
CHECK(FindCpuProfile(cpu_profiler, p1));
|
2014-03-28 09:24:49 +00:00
|
|
|
p1->Delete();
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2011-03-22 16:10:01 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> name2 = v8_str("2");
|
2014-03-28 09:24:49 +00:00
|
|
|
cpu_profiler->StartProfiling(name2);
|
|
|
|
v8::CpuProfile* p2 = cpu_profiler->StopProfiling(name2);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(p2);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(1, iprofiler->GetProfilesCount());
|
|
|
|
CHECK(FindCpuProfile(cpu_profiler, p2));
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> name3 = v8_str("3");
|
2014-03-28 09:24:49 +00:00
|
|
|
cpu_profiler->StartProfiling(name3);
|
|
|
|
v8::CpuProfile* p3 = cpu_profiler->StopProfiling(name3);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(p3);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(2, iprofiler->GetProfilesCount());
|
|
|
|
CHECK_NE(p2, p3);
|
|
|
|
CHECK(FindCpuProfile(cpu_profiler, p3));
|
|
|
|
CHECK(FindCpuProfile(cpu_profiler, p2));
|
2014-03-28 09:24:49 +00:00
|
|
|
p2->Delete();
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(1, iprofiler->GetProfilesCount());
|
|
|
|
CHECK(!FindCpuProfile(cpu_profiler, p2));
|
|
|
|
CHECK(FindCpuProfile(cpu_profiler, p3));
|
2014-03-28 09:24:49 +00:00
|
|
|
p3->Delete();
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2016-07-08 15:00:23 +00:00
|
|
|
cpu_profiler->Dispose();
|
2011-03-22 16:10:01 +00:00
|
|
|
}
|
|
|
|
|
2013-08-05 07:17:08 +00:00
|
|
|
TEST(ProfileStartEndTime) {
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
|
2013-08-05 07:17:08 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> profile_name = v8_str("test");
|
2014-03-28 09:24:49 +00:00
|
|
|
cpu_profiler->StartProfiling(profile_name);
|
|
|
|
const v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
|
2013-08-05 07:17:08 +00:00
|
|
|
CHECK(profile->GetStartTime() <= profile->GetEndTime());
|
2016-07-08 15:00:23 +00:00
|
|
|
cpu_profiler->Dispose();
|
2013-08-05 07:17:08 +00:00
|
|
|
}
|
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
class ProfilerHelper {
|
|
|
|
public:
|
2021-07-12 21:32:54 +00:00
|
|
|
explicit ProfilerHelper(
|
|
|
|
const v8::Local<v8::Context>& context,
|
|
|
|
v8::CpuProfilingLoggingMode logging_mode = kLazyLogging)
|
2016-07-08 15:00:23 +00:00
|
|
|
: context_(context),
|
2021-07-12 21:32:54 +00:00
|
|
|
profiler_(v8::CpuProfiler::New(context->GetIsolate(), kDebugNaming,
|
|
|
|
logging_mode)) {
|
2016-07-08 15:00:23 +00:00
|
|
|
i::ProfilerExtension::set_profiler(profiler_);
|
|
|
|
}
|
|
|
|
~ProfilerHelper() {
|
|
|
|
i::ProfilerExtension::set_profiler(static_cast<CpuProfiler*>(nullptr));
|
|
|
|
profiler_->Dispose();
|
|
|
|
}
|
|
|
|
|
2019-05-27 11:31:49 +00:00
|
|
|
using ProfilingMode = v8::CpuProfilingMode;
|
2018-05-23 08:26:10 +00:00
|
|
|
|
2019-05-09 19:20:21 +00:00
|
|
|
v8::CpuProfile* Run(
|
|
|
|
v8::Local<v8::Function> function, v8::Local<v8::Value> argv[], int argc,
|
|
|
|
unsigned min_js_samples = 0, unsigned min_external_samples = 0,
|
|
|
|
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
|
2021-04-15 08:41:48 +00:00
|
|
|
unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit,
|
|
|
|
v8::Local<v8::Context> context = v8::Local<v8::Context>());
|
2016-07-08 15:00:23 +00:00
|
|
|
|
|
|
|
v8::CpuProfiler* profiler() { return profiler_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
v8::Local<v8::Context> context_;
|
|
|
|
v8::CpuProfiler* profiler_;
|
|
|
|
};
|
|
|
|
|
|
|
|
v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
|
|
|
|
v8::Local<v8::Value> argv[], int argc,
|
|
|
|
unsigned min_js_samples,
|
|
|
|
unsigned min_external_samples,
|
2021-04-15 08:41:48 +00:00
|
|
|
ProfilingMode mode, unsigned max_samples,
|
|
|
|
v8::Local<v8::Context> context) {
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> profile_name = v8_str("my_profile");
|
2013-07-10 12:56:58 +00:00
|
|
|
|
2021-10-26 11:11:30 +00:00
|
|
|
profiler_->SetSamplingInterval(20);
|
2021-04-15 08:41:48 +00:00
|
|
|
profiler_->StartProfiling(profile_name, {mode, max_samples, 0, context});
|
2013-07-10 12:56:58 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::internal::CpuProfiler* iprofiler =
|
|
|
|
reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
|
2018-10-16 13:40:25 +00:00
|
|
|
v8::sampler::Sampler* sampler =
|
|
|
|
reinterpret_cast<i::SamplingEventsProcessor*>(iprofiler->processor())
|
|
|
|
->sampler();
|
2013-07-10 12:56:58 +00:00
|
|
|
sampler->StartCountingSamples();
|
2019-08-19 22:58:21 +00:00
|
|
|
|
2013-07-10 12:56:58 +00:00
|
|
|
do {
|
2016-07-08 15:00:23 +00:00
|
|
|
function->Call(context_, context_->Global(), argc, argv).ToLocalChecked();
|
2016-02-12 16:02:29 +00:00
|
|
|
} while (sampler->js_sample_count() < min_js_samples ||
|
|
|
|
sampler->external_sample_count() < min_external_samples);
|
2013-07-10 12:56:58 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = profiler_->StopProfiling(profile_name);
|
2013-07-10 12:56:58 +00:00
|
|
|
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2013-07-10 12:56:58 +00:00
|
|
|
// Dump collected profile to have a better diagnostic in case of failure.
|
2014-03-28 09:24:49 +00:00
|
|
|
reinterpret_cast<i::CpuProfile*>(profile)->Print();
|
2013-07-10 12:56:58 +00:00
|
|
|
|
|
|
|
return profile;
|
|
|
|
}
|
|
|
|
|
2017-12-12 22:19:44 +00:00
|
|
|
static unsigned TotalHitCount(const v8::CpuProfileNode* node) {
|
|
|
|
unsigned hit_count = node->GetHitCount();
|
|
|
|
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i)
|
|
|
|
hit_count += TotalHitCount(node->GetChild(i));
|
|
|
|
return hit_count;
|
|
|
|
}
|
|
|
|
|
2021-01-14 14:06:12 +00:00
|
|
|
static unsigned TotalHitCount(const v8::CpuProfileNode* node,
|
|
|
|
const std::string& name) {
|
|
|
|
if (name.compare(node->GetFunctionNameStr()) == 0) return TotalHitCount(node);
|
|
|
|
unsigned hit_count = 0;
|
|
|
|
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i)
|
|
|
|
hit_count += TotalHitCount(node->GetChild(i), name);
|
|
|
|
return hit_count;
|
|
|
|
}
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
|
|
|
|
const v8::CpuProfileNode* node,
|
2013-04-10 09:47:44 +00:00
|
|
|
const char* name) {
|
|
|
|
int count = node->GetChildrenCount();
|
2016-02-08 18:12:04 +00:00
|
|
|
v8::Local<v8::String> name_handle = v8_str(name);
|
2013-04-10 09:47:44 +00:00
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
const v8::CpuProfileNode* child = node->GetChild(i);
|
2016-02-08 18:12:04 +00:00
|
|
|
if (name_handle->Equals(context, child->GetFunctionName()).FromJust()) {
|
2015-12-08 18:44:23 +00:00
|
|
|
return child;
|
|
|
|
}
|
2013-04-10 09:47:44 +00:00
|
|
|
}
|
2017-10-13 16:33:03 +00:00
|
|
|
return nullptr;
|
2013-04-10 09:47:44 +00:00
|
|
|
}
|
|
|
|
|
2017-12-12 22:19:44 +00:00
|
|
|
static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
|
|
|
|
const char* name) {
|
|
|
|
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
|
|
|
|
const v8::CpuProfileNode* child = node->GetChild(i);
|
|
|
|
if (strcmp(child->GetFunctionNameStr(), name) == 0) {
|
|
|
|
return child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
2013-04-10 09:47:44 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
static const v8::CpuProfileNode* GetChild(v8::Local<v8::Context> context,
|
|
|
|
const v8::CpuProfileNode* node,
|
2013-05-24 16:19:06 +00:00
|
|
|
const char* name) {
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* result = FindChild(context, node, name);
|
2017-12-18 16:19:23 +00:00
|
|
|
if (!result) FATAL("Failed to GetChild: %s", name);
|
2013-05-24 16:19:06 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
static void CheckSimpleBranch(v8::Local<v8::Context> context,
|
|
|
|
const v8::CpuProfileNode* node,
|
2013-04-10 09:47:44 +00:00
|
|
|
const char* names[], int length) {
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
const char* name = names[i];
|
2015-12-08 18:44:23 +00:00
|
|
|
node = GetChild(context, node, name);
|
2013-04-10 09:47:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
static const ProfileNode* GetSimpleBranch(v8::Local<v8::Context> context,
|
|
|
|
v8::CpuProfile* profile,
|
2015-03-05 10:37:56 +00:00
|
|
|
const char* names[], int length) {
|
|
|
|
const v8::CpuProfileNode* node = profile->GetTopDownRoot();
|
2015-02-10 14:32:42 +00:00
|
|
|
for (int i = 0; i < length; i++) {
|
2015-12-08 18:44:23 +00:00
|
|
|
node = GetChild(context, node, names[i]);
|
2015-02-10 14:32:42 +00:00
|
|
|
}
|
2015-03-05 10:37:56 +00:00
|
|
|
return reinterpret_cast<const ProfileNode*>(node);
|
2015-02-10 14:32:42 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 08:26:10 +00:00
|
|
|
struct NameLinePair {
|
|
|
|
const char* name;
|
|
|
|
int line_number;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
|
|
|
|
NameLinePair pair) {
|
|
|
|
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
|
|
|
|
const v8::CpuProfileNode* child = node->GetChild(i);
|
|
|
|
// The name and line number must match, or if the requested line number was
|
|
|
|
// -1, then match any function of the same name.
|
|
|
|
if (strcmp(child->GetFunctionNameStr(), pair.name) == 0 &&
|
|
|
|
(child->GetLineNumber() == pair.line_number ||
|
|
|
|
pair.line_number == -1)) {
|
|
|
|
return child;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
|
|
|
|
NameLinePair pair) {
|
|
|
|
const v8::CpuProfileNode* result = FindChild(node, pair);
|
|
|
|
if (!result) FATAL("Failed to GetChild: %s:%d", pair.name, pair.line_number);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void CheckBranch(const v8::CpuProfileNode* node, NameLinePair path[],
|
|
|
|
int length) {
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
NameLinePair pair = path[i];
|
|
|
|
node = GetChild(node, pair);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
static const char* cpu_profiler_test_source =
|
|
|
|
"%NeverOptimizeFunction(loop);\n"
|
|
|
|
"%NeverOptimizeFunction(delay);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(baz);\n"
|
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function loop(timeout) {\n"
|
|
|
|
" this.mmm = 0;\n"
|
|
|
|
" var start = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" var n = 1000;\n"
|
|
|
|
" while(n > 1) {\n"
|
|
|
|
" n--;\n"
|
|
|
|
" this.mmm += n * n * n;\n"
|
|
|
|
" }\n"
|
|
|
|
" } while (Date.now() - start < timeout);\n"
|
|
|
|
"}\n"
|
|
|
|
"function delay() { loop(10); }\n"
|
|
|
|
"function bar() { delay(); }\n"
|
|
|
|
"function baz() { delay(); }\n"
|
|
|
|
"function foo() {\n"
|
|
|
|
" delay();\n"
|
|
|
|
" bar();\n"
|
|
|
|
" delay();\n"
|
|
|
|
" baz();\n"
|
|
|
|
"}\n"
|
|
|
|
"function start(duration) {\n"
|
|
|
|
" var start = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" foo();\n"
|
|
|
|
" } while (Date.now() - start < duration);\n"
|
|
|
|
"}\n";
|
2013-04-10 09:47:44 +00:00
|
|
|
|
|
|
|
// Check that the profile tree for the script above will look like the
|
|
|
|
// following:
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// 1062 0 (root) [-1]
|
|
|
|
// 1054 0 start [-1]
|
|
|
|
// 1054 1 foo [-1]
|
|
|
|
// 265 0 baz [-1]
|
|
|
|
// 265 1 delay [-1]
|
|
|
|
// 264 264 loop [-1]
|
|
|
|
// 525 3 delay [-1]
|
|
|
|
// 522 522 loop [-1]
|
|
|
|
// 263 0 bar [-1]
|
|
|
|
// 263 1 delay [-1]
|
|
|
|
// 262 262 loop [-1]
|
|
|
|
// 2 2 (program) [-1]
|
|
|
|
// 6 6 (garbage collector) [-1]
|
|
|
|
TEST(CollectCpuProfile) {
|
2021-11-19 11:00:42 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2013-04-10 09:47:44 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(cpu_profiler_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-04-10 09:47:44 +00:00
|
|
|
|
2013-04-15 14:45:38 +00:00
|
|
|
int32_t profiling_interval_ms = 200;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
|
2013-04-10 09:47:44 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
const v8::CpuProfileNode* foo_node = GetChild(env.local(), start_node, "foo");
|
2013-04-10 09:47:44 +00:00
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
const char* bar_branch[] = {"bar", "delay", "loop"};
|
|
|
|
CheckSimpleBranch(env.local(), foo_node, bar_branch, arraysize(bar_branch));
|
|
|
|
const char* baz_branch[] = {"baz", "delay", "loop"};
|
|
|
|
CheckSimpleBranch(env.local(), foo_node, baz_branch, arraysize(baz_branch));
|
|
|
|
const char* delay_branch[] = {"delay", "loop"};
|
|
|
|
CheckSimpleBranch(env.local(), foo_node, delay_branch,
|
|
|
|
arraysize(delay_branch));
|
2013-04-10 14:31:13 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-04-10 09:47:44 +00:00
|
|
|
}
|
2013-05-14 22:51:33 +00:00
|
|
|
|
2018-05-23 08:26:10 +00:00
|
|
|
TEST(CollectCpuProfileCallerLineNumbers) {
|
2021-11-19 11:00:42 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2018-05-23 08:26:10 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
|
|
|
CompileRun(cpu_profiler_test_source);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
|
|
|
|
|
|
|
int32_t profiling_interval_ms = 200;
|
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
|
|
|
|
ProfilerHelper helper(env.local());
|
2019-05-22 00:06:41 +00:00
|
|
|
helper.Run(function, args, arraysize(args), 1000, 0,
|
|
|
|
v8::CpuProfilingMode::kCallerLineNumbers, 0);
|
2018-05-23 08:26:10 +00:00
|
|
|
v8::CpuProfile* profile =
|
2019-05-22 00:06:41 +00:00
|
|
|
helper.Run(function, args, arraysize(args), 1000, 0,
|
|
|
|
v8::CpuProfilingMode::kCallerLineNumbers, 0);
|
2018-05-23 08:26:10 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* start_node = GetChild(root, {"start", 27});
|
|
|
|
const v8::CpuProfileNode* foo_node = GetChild(start_node, {"foo", 30});
|
|
|
|
|
|
|
|
NameLinePair bar_branch[] = {{"bar", 23}, {"delay", 19}, {"loop", 18}};
|
|
|
|
CheckBranch(foo_node, bar_branch, arraysize(bar_branch));
|
|
|
|
NameLinePair baz_branch[] = {{"baz", 25}, {"delay", 20}, {"loop", 18}};
|
|
|
|
CheckBranch(foo_node, baz_branch, arraysize(baz_branch));
|
|
|
|
NameLinePair delay_at22_branch[] = {{"delay", 22}, {"loop", 18}};
|
|
|
|
CheckBranch(foo_node, delay_at22_branch, arraysize(delay_at22_branch));
|
|
|
|
NameLinePair delay_at24_branch[] = {{"delay", 24}, {"loop", 18}};
|
|
|
|
CheckBranch(foo_node, delay_at24_branch, arraysize(delay_at24_branch));
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2014-05-22 05:36:27 +00:00
|
|
|
static const char* hot_deopt_no_frame_entry_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function foo(a, b) {\n"
|
|
|
|
" return a + b;\n"
|
|
|
|
"}\n"
|
|
|
|
"function start(timeout) {\n"
|
|
|
|
" var start = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" for (var i = 1; i < 1000; ++i) foo(1, i);\n"
|
|
|
|
" var duration = Date.now() - start;\n"
|
|
|
|
" } while (duration < timeout);\n"
|
|
|
|
" return duration;\n"
|
|
|
|
"}\n";
|
2014-05-22 05:36:27 +00:00
|
|
|
|
|
|
|
// Check that the profile tree for the script above will look like the
|
|
|
|
// following:
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// 1062 0 (root) [-1]
|
|
|
|
// 1054 0 start [-1]
|
|
|
|
// 1054 1 foo [-1]
|
|
|
|
// 2 2 (program) [-1]
|
|
|
|
// 6 6 (garbage collector) [-1]
|
|
|
|
//
|
2016-02-12 17:30:01 +00:00
|
|
|
// The test checks no FP ranges are present in a deoptimized function.
|
2014-05-22 05:36:27 +00:00
|
|
|
// If 'foo' has no ranges the samples falling into the prologue will miss the
|
|
|
|
// 'start' function on the stack, so 'foo' will be attached to the (root).
|
|
|
|
TEST(HotDeoptNoFrameEntry) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-05-22 05:36:27 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(hot_deopt_no_frame_entry_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2014-05-22 05:36:27 +00:00
|
|
|
|
|
|
|
int32_t profiling_interval_ms = 200;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
|
2015-12-08 18:44:23 +00:00
|
|
|
function->Call(env.local(), env->Global(), arraysize(args), args)
|
|
|
|
.ToLocalChecked();
|
2014-05-22 05:36:27 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "foo");
|
2014-05-22 05:36:27 +00:00
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2014-04-25 18:53:06 +00:00
|
|
|
TEST(CollectCpuProfileSamples) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-04-25 18:53:06 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(cpu_profiler_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2014-04-25 18:53:06 +00:00
|
|
|
|
|
|
|
int32_t profiling_interval_ms = 200;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2014-04-25 18:53:06 +00:00
|
|
|
v8::CpuProfile* profile =
|
2019-05-22 00:06:41 +00:00
|
|
|
helper.Run(function, args, arraysize(args), 1000, 0);
|
2014-04-25 18:53:06 +00:00
|
|
|
|
|
|
|
CHECK_LE(200, profile->GetSamplesCount());
|
|
|
|
uint64_t end_time = profile->GetEndTime();
|
|
|
|
uint64_t current_time = profile->GetStartTime();
|
|
|
|
CHECK_LE(current_time, end_time);
|
|
|
|
for (int i = 0; i < profile->GetSamplesCount(); i++) {
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile->GetSample(i));
|
2014-04-25 18:53:06 +00:00
|
|
|
uint64_t timestamp = profile->GetSampleTimestamp(i);
|
|
|
|
CHECK_LE(current_time, timestamp);
|
|
|
|
CHECK_LE(timestamp, end_time);
|
|
|
|
current_time = timestamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
static const char* cpu_profiler_test_source2 =
|
|
|
|
"%NeverOptimizeFunction(loop);\n"
|
|
|
|
"%NeverOptimizeFunction(delay);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function loop() {}\n"
|
|
|
|
"function delay() { loop(); }\n"
|
|
|
|
"function start(duration) {\n"
|
|
|
|
" var start = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" for (var i = 0; i < 10000; ++i) delay();\n"
|
|
|
|
" } while (Date.now() - start < duration);\n"
|
|
|
|
"}";
|
2013-05-14 22:51:33 +00:00
|
|
|
|
2013-10-10 13:03:41 +00:00
|
|
|
// Check that the profile tree doesn't contain unexpected traces:
|
2013-05-14 22:51:33 +00:00
|
|
|
// - 'loop' can be called only by 'delay'
|
|
|
|
// - 'delay' may be called only by 'start'
|
|
|
|
// The profile will look like the following:
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// 135 0 (root) [-1] #1
|
|
|
|
// 121 72 start [-1] #3
|
|
|
|
// 49 33 delay [-1] #4
|
|
|
|
// 16 16 loop [-1] #5
|
|
|
|
// 14 14 (program) [-1] #2
|
|
|
|
TEST(SampleWhenFrameIsNotSetup) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2013-05-14 22:51:33 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(cpu_profiler_test_source2);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-05-14 22:51:33 +00:00
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
int32_t duration_ms = 100;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
2016-02-08 18:12:04 +00:00
|
|
|
v8::Integer::New(env->GetIsolate(), duration_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
|
2013-05-14 22:51:33 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
const v8::CpuProfileNode* delay_node =
|
|
|
|
GetChild(env.local(), start_node, "delay");
|
|
|
|
GetChild(env.local(), delay_node, "loop");
|
2013-05-14 22:51:33 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-05-14 22:51:33 +00:00
|
|
|
}
|
2013-06-04 10:57:32 +00:00
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
static const char* native_accessor_test_source =
|
|
|
|
"function start(count) {\n"
|
|
|
|
" for (var i = 0; i < count; i++) {\n"
|
|
|
|
" var o = instance.foo;\n"
|
|
|
|
" instance.foo = o + 1;\n"
|
|
|
|
" }\n"
|
|
|
|
"}\n";
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2013-06-11 15:00:41 +00:00
|
|
|
class TestApiCallbacks {
|
2013-06-04 10:57:32 +00:00
|
|
|
public:
|
2013-06-11 15:00:41 +00:00
|
|
|
explicit TestApiCallbacks(int min_duration_ms)
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
: min_duration_ms_(min_duration_ms), is_warming_up_(false) {}
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2013-07-17 12:16:16 +00:00
|
|
|
static void Getter(v8::Local<v8::String> name,
|
|
|
|
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
2016-02-08 18:12:04 +00:00
|
|
|
TestApiCallbacks* data = FromInfo(info);
|
2013-06-11 08:32:48 +00:00
|
|
|
data->Wait();
|
2013-06-04 10:57:32 +00:00
|
|
|
}
|
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
static void Setter(v8::Local<v8::String> name, v8::Local<v8::Value> value,
|
2013-07-17 12:16:16 +00:00
|
|
|
const v8::PropertyCallbackInfo<void>& info) {
|
2016-02-08 18:12:04 +00:00
|
|
|
TestApiCallbacks* data = FromInfo(info);
|
2013-06-11 15:00:41 +00:00
|
|
|
data->Wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void Callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
|
2016-02-08 18:12:04 +00:00
|
|
|
TestApiCallbacks* data = FromInfo(info);
|
2013-06-11 08:32:48 +00:00
|
|
|
data->Wait();
|
2013-06-05 06:15:41 +00:00
|
|
|
}
|
|
|
|
|
2013-06-11 08:32:48 +00:00
|
|
|
void set_warming_up(bool value) { is_warming_up_ = value; }
|
2013-06-04 10:57:32 +00:00
|
|
|
|
|
|
|
private:
|
2013-06-11 08:32:48 +00:00
|
|
|
void Wait() {
|
|
|
|
if (is_warming_up_) return;
|
2017-08-25 07:12:51 +00:00
|
|
|
v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
|
|
|
|
double start = platform->CurrentClockTimeMillis();
|
2013-06-05 06:15:41 +00:00
|
|
|
double duration = 0;
|
|
|
|
while (duration < min_duration_ms_) {
|
2015-05-04 22:53:22 +00:00
|
|
|
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
|
2017-08-25 07:12:51 +00:00
|
|
|
duration = platform->CurrentClockTimeMillis() - start;
|
2013-06-04 10:57:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
template <typename T>
|
|
|
|
static TestApiCallbacks* FromInfo(const T& info) {
|
2013-06-04 10:57:32 +00:00
|
|
|
void* data = v8::External::Cast(*info.Data())->Value();
|
2013-06-11 15:00:41 +00:00
|
|
|
return reinterpret_cast<TestApiCallbacks*>(data);
|
2013-06-04 10:57:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int min_duration_ms_;
|
2013-06-11 08:32:48 +00:00
|
|
|
bool is_warming_up_;
|
2013-06-04 10:57:32 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Test that native accessors are properly reported in the CPU profile.
|
|
|
|
// This test checks the case when the long-running accessors are called
|
|
|
|
// only once and the optimizer doesn't have chance to change the invocation
|
|
|
|
// code.
|
2013-06-11 08:32:48 +00:00
|
|
|
TEST(NativeAccessorUninitializedIC) {
|
2013-06-04 10:57:32 +00:00
|
|
|
LocalContext env;
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Isolate* isolate = env->GetIsolate();
|
|
|
|
v8::HandleScope scope(isolate);
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate);
|
2013-06-04 10:57:32 +00:00
|
|
|
v8::Local<v8::ObjectTemplate> instance_template =
|
|
|
|
func_template->InstanceTemplate();
|
|
|
|
|
2013-06-11 15:00:41 +00:00
|
|
|
TestApiCallbacks accessors(100);
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::Local<v8::External> data = v8::External::New(isolate, &accessors);
|
2015-02-06 16:50:56 +00:00
|
|
|
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
|
|
|
|
&TestApiCallbacks::Setter, data);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
v8::Local<v8::Object> instance =
|
|
|
|
func->NewInstance(env.local()).ToLocalChecked();
|
|
|
|
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(native_accessor_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2013-06-04 10:57:32 +00:00
|
|
|
int32_t repeat_count = 1;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
|
2013-06-04 10:57:32 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "get foo");
|
|
|
|
GetChild(env.local(), start_node, "set foo");
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-06-04 10:57:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test that native accessors are properly reported in the CPU profile.
|
|
|
|
// This test makes sure that the accessors are called enough times to become
|
|
|
|
// hot and to trigger optimizations.
|
2013-06-11 08:32:48 +00:00
|
|
|
TEST(NativeAccessorMonomorphicIC) {
|
2013-06-04 10:57:32 +00:00
|
|
|
LocalContext env;
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Isolate* isolate = env->GetIsolate();
|
|
|
|
v8::HandleScope scope(isolate);
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate);
|
2013-06-04 10:57:32 +00:00
|
|
|
v8::Local<v8::ObjectTemplate> instance_template =
|
|
|
|
func_template->InstanceTemplate();
|
|
|
|
|
2013-06-11 15:00:41 +00:00
|
|
|
TestApiCallbacks accessors(1);
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
v8::Local<v8::External> data = v8::External::New(isolate, &accessors);
|
2015-02-06 16:50:56 +00:00
|
|
|
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
|
|
|
|
&TestApiCallbacks::Setter, data);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
v8::Local<v8::Object> instance =
|
|
|
|
func->NewInstance(env.local()).ToLocalChecked();
|
|
|
|
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(native_accessor_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2013-06-11 08:32:48 +00:00
|
|
|
{
|
|
|
|
// Make sure accessors ICs are in monomorphic state before starting
|
|
|
|
// profiling.
|
|
|
|
accessors.set_warming_up(true);
|
|
|
|
int32_t warm_up_iterations = 3;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(isolate, warm_up_iterations)};
|
|
|
|
function->Call(env.local(), env->Global(), arraysize(args), args)
|
|
|
|
.ToLocalChecked();
|
2013-06-11 08:32:48 +00:00
|
|
|
accessors.set_warming_up(false);
|
|
|
|
}
|
|
|
|
|
2013-06-04 10:57:32 +00:00
|
|
|
int32_t repeat_count = 100;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
|
2013-06-04 10:57:32 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "get foo");
|
|
|
|
GetChild(env.local(), start_node, "set foo");
|
2013-06-04 10:57:32 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-06-04 10:57:32 +00:00
|
|
|
}
|
2013-06-11 15:00:41 +00:00
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
static const char* native_method_test_source =
|
|
|
|
"function start(count) {\n"
|
|
|
|
" for (var i = 0; i < count; i++) {\n"
|
|
|
|
" instance.fooMethod();\n"
|
|
|
|
" }\n"
|
|
|
|
"}\n";
|
2013-06-11 15:00:41 +00:00
|
|
|
|
|
|
|
TEST(NativeMethodUninitializedIC) {
|
|
|
|
LocalContext env;
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Isolate* isolate = env->GetIsolate();
|
|
|
|
v8::HandleScope scope(isolate);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
|
|
|
TestApiCallbacks callbacks(100);
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::Local<v8::External> data = v8::External::New(isolate, &callbacks);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate);
|
2016-07-08 15:00:23 +00:00
|
|
|
func_template->SetClassName(v8_str("Test_InstanceConstructor"));
|
2013-06-11 15:00:41 +00:00
|
|
|
v8::Local<v8::ObjectTemplate> proto_template =
|
|
|
|
func_template->PrototypeTemplate();
|
2013-11-28 08:21:26 +00:00
|
|
|
v8::Local<v8::Signature> signature =
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Signature::New(isolate, func_template);
|
2015-02-06 16:50:56 +00:00
|
|
|
proto_template->Set(
|
2020-09-09 11:07:28 +00:00
|
|
|
isolate, "fooMethod",
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
|
|
|
|
signature, 0));
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
v8::Local<v8::Object> instance =
|
|
|
|
func->NewInstance(env.local()).ToLocalChecked();
|
|
|
|
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(native_method_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2013-06-11 15:00:41 +00:00
|
|
|
int32_t repeat_count = 1;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "fooMethod");
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-06-11 15:00:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(NativeMethodMonomorphicIC) {
|
|
|
|
LocalContext env;
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Isolate* isolate = env->GetIsolate();
|
|
|
|
v8::HandleScope scope(isolate);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
|
|
|
TestApiCallbacks callbacks(1);
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
v8::Local<v8::External> data = v8::External::New(isolate, &callbacks);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate);
|
2015-02-06 16:50:56 +00:00
|
|
|
func_template->SetClassName(v8_str("Test_InstanceCostructor"));
|
2013-06-11 15:00:41 +00:00
|
|
|
v8::Local<v8::ObjectTemplate> proto_template =
|
|
|
|
func_template->PrototypeTemplate();
|
2013-11-28 08:21:26 +00:00
|
|
|
v8::Local<v8::Signature> signature =
|
2013-12-18 10:31:42 +00:00
|
|
|
v8::Signature::New(isolate, func_template);
|
2015-02-06 16:50:56 +00:00
|
|
|
proto_template->Set(
|
2020-09-09 11:07:28 +00:00
|
|
|
isolate, "fooMethod",
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
|
|
|
|
signature, 0));
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
v8::Local<v8::Object> instance =
|
|
|
|
func->NewInstance(env.local()).ToLocalChecked();
|
|
|
|
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(native_method_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-06-11 15:00:41 +00:00
|
|
|
{
|
|
|
|
// Make sure method ICs are in monomorphic state before starting
|
|
|
|
// profiling.
|
|
|
|
callbacks.set_warming_up(true);
|
|
|
|
int32_t warm_up_iterations = 3;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(isolate, warm_up_iterations)};
|
|
|
|
function->Call(env.local(), env->Global(), arraysize(args), args)
|
|
|
|
.ToLocalChecked();
|
2013-06-11 15:00:41 +00:00
|
|
|
callbacks.set_warming_up(false);
|
|
|
|
}
|
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2013-06-11 15:00:41 +00:00
|
|
|
int32_t repeat_count = 100;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 200);
|
2013-06-11 15:00:41 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2015-12-08 18:44:23 +00:00
|
|
|
GetChild(env.local(), root, "start");
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "fooMethod");
|
2013-06-11 15:00:41 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-06-11 15:00:41 +00:00
|
|
|
}
|
2013-06-27 09:28:11 +00:00
|
|
|
|
2014-07-11 09:06:12 +00:00
|
|
|
static const char* bound_function_test_source =
|
|
|
|
"function foo() {\n"
|
|
|
|
" startProfiling('my_profile');\n"
|
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
|
|
|
" var callback = foo.bind(this);\n"
|
|
|
|
" callback();\n"
|
|
|
|
"}";
|
2013-06-27 09:28:11 +00:00
|
|
|
|
|
|
|
TEST(BoundFunctionCall) {
|
2014-07-11 09:06:12 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-07-11 09:06:12 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2013-06-27 09:28:11 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(bound_function_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
2013-06-27 09:28:11 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0);
|
2013-06-27 09:28:11 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
GetChild(env, start_node, "foo");
|
2013-06-27 09:28:11 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-06-27 09:28:11 +00:00
|
|
|
}
|
2013-07-02 07:51:09 +00:00
|
|
|
|
2014-11-06 09:16:34 +00:00
|
|
|
// This tests checks distribution of the samples through the source lines.
|
2016-03-02 02:10:03 +00:00
|
|
|
static void TickLines(bool optimize) {
|
2018-10-22 11:45:45 +00:00
|
|
|
#ifndef V8_LITE_MODE
|
2022-04-28 14:22:23 +00:00
|
|
|
FLAG_turbofan = optimize;
|
2022-05-13 13:18:57 +00:00
|
|
|
#ifdef V8_ENABLE_MAGLEV
|
|
|
|
// TODO(v8:7700): Also test maglev here.
|
|
|
|
FLAG_maglev = false;
|
|
|
|
#endif // V8_ENABLE_MAGLEV
|
2018-10-22 11:45:45 +00:00
|
|
|
#endif // V8_LITE_MODE
|
2014-11-06 09:16:34 +00:00
|
|
|
CcTest::InitializeVM();
|
|
|
|
LocalContext env;
|
2016-03-02 02:10:03 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-11-06 09:16:34 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::Factory* factory = isolate->factory();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2021-06-17 15:43:55 +00:00
|
|
|
base::EmbeddedVector<char, 512> script;
|
|
|
|
base::EmbeddedVector<char, 64> prepare_opt;
|
|
|
|
base::EmbeddedVector<char, 64> optimize_call;
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
const char* func_name = "func";
|
2016-09-27 16:27:31 +00:00
|
|
|
if (optimize) {
|
2021-06-22 13:27:00 +00:00
|
|
|
base::SNPrintF(prepare_opt, "%%PrepareFunctionForOptimization(%s);\n",
|
|
|
|
func_name);
|
|
|
|
base::SNPrintF(optimize_call, "%%OptimizeFunctionOnNextCall(%s);\n",
|
|
|
|
func_name);
|
2016-09-27 16:27:31 +00:00
|
|
|
} else {
|
2019-04-30 11:04:41 +00:00
|
|
|
prepare_opt[0] = '\0';
|
2016-09-27 19:28:38 +00:00
|
|
|
optimize_call[0] = '\0';
|
2016-09-27 16:27:31 +00:00
|
|
|
}
|
2021-06-22 13:27:00 +00:00
|
|
|
base::SNPrintF(script,
|
|
|
|
"function %s() {\n"
|
|
|
|
" var n = 0;\n"
|
|
|
|
" var m = 100*100;\n"
|
|
|
|
" while (m > 1) {\n"
|
|
|
|
" m--;\n"
|
|
|
|
" n += m * m * m;\n"
|
|
|
|
" }\n"
|
|
|
|
"}\n"
|
|
|
|
"%s"
|
|
|
|
"%s();\n"
|
|
|
|
"%s"
|
|
|
|
"%s();\n",
|
|
|
|
func_name, prepare_opt.begin(), func_name,
|
|
|
|
optimize_call.begin(), func_name);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
2019-04-29 11:06:49 +00:00
|
|
|
CompileRun(script.begin());
|
2014-11-06 09:16:34 +00:00
|
|
|
|
2015-10-23 12:26:49 +00:00
|
|
|
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
|
2018-11-28 19:57:30 +00:00
|
|
|
CHECK(!func->shared().is_null());
|
2020-12-17 16:02:56 +00:00
|
|
|
CHECK(!func->shared().abstract_code(isolate).is_null());
|
2020-08-11 09:24:47 +00:00
|
|
|
CHECK(!optimize || func->HasAttachedOptimizedCode() ||
|
2017-04-28 15:36:46 +00:00
|
|
|
!CcTest::i_isolate()->use_optimizer());
|
2020-12-17 16:02:56 +00:00
|
|
|
i::Handle<i::AbstractCode> code(func->abstract_code(isolate), isolate);
|
2019-12-20 10:54:21 +00:00
|
|
|
CHECK(!code->is_null());
|
|
|
|
i::Address code_address = code->raw_instruction_start();
|
2018-11-25 03:47:59 +00:00
|
|
|
CHECK_NE(code_address, kNullAddress);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2016-06-15 09:59:00 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-03-14 07:25:55 +00:00
|
|
|
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
|
2020-12-21 21:42:34 +00:00
|
|
|
CcTest::i_isolate(), symbolizer, code_observer, profiles,
|
2019-03-14 07:25:55 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id = profiles->StartProfiling().id;
|
2019-03-26 15:17:30 +00:00
|
|
|
// TODO(delphick): Stop using the CpuProfiler internals here: This forces
|
|
|
|
// LogCompiledFunctions so that source positions are collected everywhere.
|
|
|
|
// This would normally happen automatically with CpuProfiler::StartProfiling
|
2020-10-01 07:07:48 +00:00
|
|
|
// but doesn't because it's constructed with a symbolizer and a processor.
|
2022-05-09 13:16:17 +00:00
|
|
|
isolate->v8_file_logger()->LogCompiledFunctions();
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2020-12-21 21:42:34 +00:00
|
|
|
ProfilerListener profiler_listener(isolate, processor,
|
2021-06-15 22:45:03 +00:00
|
|
|
*code_observer->code_entries(),
|
2021-03-19 19:18:18 +00:00
|
|
|
*code_observer->weak_code_registry());
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
// Enqueue code creation events.
|
|
|
|
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
|
|
|
|
int line = 1;
|
|
|
|
int column = 1;
|
2022-06-07 17:51:13 +00:00
|
|
|
profiler_listener.CodeCreateEvent(i::LogEventListener::CodeTag::kFunction,
|
|
|
|
code, handle(func->shared(), isolate), str,
|
|
|
|
line, column);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
// Enqueue a tick event to enable code events processing.
|
2016-06-15 09:59:00 +00:00
|
|
|
EnqueueTickSampleEvent(processor, code_address);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
processor->StopSynchronously();
|
|
|
|
|
2022-04-05 22:45:41 +00:00
|
|
|
CpuProfile* profile = profiles->StopProfiling(id);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
2020-10-01 07:07:48 +00:00
|
|
|
// Check the state of the symbolizer.
|
|
|
|
CodeEntry* func_entry = symbolizer->code_map()->FindEntry(code_address);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(func_entry);
|
2015-02-06 16:50:56 +00:00
|
|
|
CHECK_EQ(0, strcmp(func_name, func_entry->name()));
|
2018-04-11 13:09:15 +00:00
|
|
|
const i::SourcePositionTable* line_info = func_entry->line_info();
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(line_info);
|
2018-04-11 13:09:15 +00:00
|
|
|
CHECK_NE(v8::CpuProfileNode::kNoLineNumberInfo,
|
|
|
|
line_info->GetSourceLineNumber(100));
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
// Check the hit source lines using V8 Public APIs.
|
|
|
|
const i::ProfileTree* tree = profile->top_down();
|
|
|
|
ProfileNode* root = tree->root();
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(root);
|
2014-11-06 09:16:34 +00:00
|
|
|
ProfileNode* func_node = root->FindChild(func_entry);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(func_node);
|
2014-11-06 09:16:34 +00:00
|
|
|
|
|
|
|
// Add 10 faked ticks to source line #5.
|
|
|
|
int hit_line = 5;
|
|
|
|
int hit_count = 10;
|
|
|
|
for (int i = 0; i < hit_count; i++) func_node->IncrementLineTicks(hit_line);
|
|
|
|
|
|
|
|
unsigned int line_count = func_node->GetHitLineCount();
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK_EQ(2u, line_count); // Expect two hit source lines - #1 and #5.
|
2021-06-17 15:43:55 +00:00
|
|
|
base::ScopedVector<v8::CpuProfileNode::LineTick> entries(line_count);
|
2014-11-06 09:16:34 +00:00
|
|
|
CHECK(func_node->GetLineTicks(&entries[0], line_count));
|
|
|
|
int value = 0;
|
|
|
|
for (int i = 0; i < entries.length(); i++)
|
|
|
|
if (entries[i].line == hit_line) {
|
|
|
|
value = entries[i].hit_count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
CHECK_EQ(hit_count, value);
|
|
|
|
}
|
|
|
|
|
2016-03-02 02:10:03 +00:00
|
|
|
TEST(TickLinesBaseline) { TickLines(false); }
|
|
|
|
|
|
|
|
TEST(TickLinesOptimized) { TickLines(true); }
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
static const char* call_function_test_source =
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function bar(n) {\n"
|
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < n; i++) s += i * i * i;\n"
|
|
|
|
" return s;\n"
|
|
|
|
"}\n"
|
|
|
|
"function start(duration) {\n"
|
|
|
|
" var start = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" for (var i = 0; i < 100; ++i)\n"
|
|
|
|
" bar.call(this, 1000);\n"
|
|
|
|
" } while (Date.now() - start < duration);\n"
|
|
|
|
"}";
|
2013-07-02 07:51:09 +00:00
|
|
|
|
2020-09-14 18:42:22 +00:00
|
|
|
// Test that if we sampled thread when it was inside FunctionCall builtin then
|
2013-07-02 07:51:09 +00:00
|
|
|
// its caller frame will be '(unresolved function)' as we have no reliable way
|
|
|
|
// to resolve it.
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// 96 0 (root) [-1] #1
|
|
|
|
// 1 1 (garbage collector) [-1] #4
|
|
|
|
// 5 0 (unresolved function) [-1] #5
|
|
|
|
// 5 5 call [-1] #6
|
|
|
|
// 71 70 start [-1] #3
|
|
|
|
// 1 1 bar [-1] #7
|
|
|
|
// 19 19 (program) [-1] #2
|
|
|
|
TEST(FunctionCallSample) {
|
2021-11-17 13:19:26 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2013-07-02 07:51:09 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
// Collect garbage that might have be generated while installing
|
|
|
|
// extensions.
|
2017-04-26 22:16:41 +00:00
|
|
|
CcTest::CollectAllGarbage();
|
2013-07-08 11:26:15 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(call_function_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-07-02 07:51:09 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2013-07-02 07:51:09 +00:00
|
|
|
int32_t duration_ms = 100;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), duration_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
|
2013-07-02 07:51:09 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "bar");
|
2013-07-02 07:51:09 +00:00
|
|
|
|
2016-06-22 16:43:46 +00:00
|
|
|
const v8::CpuProfileNode* unresolved_node =
|
|
|
|
FindChild(env.local(), root, i::CodeEntry::kUnresolvedFunctionName);
|
2016-02-08 18:12:04 +00:00
|
|
|
CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "call"));
|
2013-07-02 07:51:09 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-07-02 07:51:09 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
static const char* function_apply_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(test);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function bar(n) {\n"
|
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < n; i++) s += i * i * i;\n"
|
|
|
|
" return s;\n"
|
2015-02-06 16:50:56 +00:00
|
|
|
"}\n"
|
|
|
|
"function test() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" bar.apply(this, [1000]);\n"
|
2015-02-06 16:50:56 +00:00
|
|
|
"}\n"
|
|
|
|
"function start(duration) {\n"
|
|
|
|
" var start = Date.now();\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" do {\n"
|
|
|
|
" for (var i = 0; i < 100; ++i) test();\n"
|
|
|
|
" } while (Date.now() - start < duration);\n"
|
2015-02-06 16:50:56 +00:00
|
|
|
"}";
|
2013-07-03 14:04:37 +00:00
|
|
|
|
|
|
|
// [Top down]:
|
|
|
|
// 94 0 (root) [-1] #0 1
|
|
|
|
// 2 2 (garbage collector) [-1] #0 7
|
|
|
|
// 82 49 start [-1] #16 3
|
|
|
|
// 1 0 (unresolved function) [-1] #0 8
|
|
|
|
// 1 1 apply [-1] #0 9
|
|
|
|
// 32 21 test [-1] #16 4
|
|
|
|
// 2 2 bar [-1] #16 6
|
|
|
|
// 10 10 (program) [-1] #0 2
|
|
|
|
TEST(FunctionApplySample) {
|
2021-11-17 13:19:26 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2013-07-03 14:04:37 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(function_apply_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2013-07-03 14:04:37 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2013-07-03 14:04:37 +00:00
|
|
|
int32_t duration_ms = 100;
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), duration_ms)};
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
|
2013-07-03 14:04:37 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
const v8::CpuProfileNode* test_node =
|
|
|
|
GetChild(env.local(), start_node, "test");
|
|
|
|
GetChild(env.local(), test_node, "bar");
|
2013-07-03 14:04:37 +00:00
|
|
|
|
2016-06-22 16:43:46 +00:00
|
|
|
const v8::CpuProfileNode* unresolved_node =
|
|
|
|
FindChild(env.local(), start_node, CodeEntry::kUnresolvedFunctionName);
|
2016-02-08 18:12:04 +00:00
|
|
|
CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "apply"));
|
2013-07-03 14:04:37 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-07-03 14:04:37 +00:00
|
|
|
}
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2014-06-24 16:00:51 +00:00
|
|
|
static const char* cpu_profiler_deep_stack_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"function foo(n) {\n"
|
|
|
|
" if (n)\n"
|
|
|
|
" foo(n - 1);\n"
|
|
|
|
" else\n"
|
|
|
|
" collectSample();\n"
|
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
|
|
|
" startProfiling('my_profile');\n"
|
|
|
|
" foo(250);\n"
|
|
|
|
"}\n";
|
2014-06-24 16:00:51 +00:00
|
|
|
|
|
|
|
// Check a deep stack
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// 0 (root) 0 #1
|
|
|
|
// 2 (program) 0 #2
|
|
|
|
// 0 start 21 #3 no reason
|
|
|
|
// 0 foo 21 #4 no reason
|
|
|
|
// 0 foo 21 #5 no reason
|
|
|
|
// ....
|
2016-02-08 18:12:04 +00:00
|
|
|
// 0 foo 21 #254 no reason
|
2014-06-24 16:00:51 +00:00
|
|
|
TEST(CpuProfileDeepStack) {
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-06-24 16:00:51 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
2014-06-24 16:00:51 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(cpu_profiler_deep_stack_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
2014-06-24 16:00:51 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> profile_name = v8_str("my_profile");
|
2017-10-13 16:33:03 +00:00
|
|
|
function->Call(env, env->Global(), 0, nullptr).ToLocalChecked();
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2014-06-24 16:00:51 +00:00
|
|
|
// Dump collected profile to have a better diagnostic in case of failure.
|
|
|
|
reinterpret_cast<i::CpuProfile*>(profile)->Print();
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* node = GetChild(env, root, "start");
|
2016-02-08 18:12:04 +00:00
|
|
|
for (int i = 0; i <= 250; ++i) {
|
2015-12-08 18:44:23 +00:00
|
|
|
node = GetChild(env, node, "foo");
|
2014-06-24 16:00:51 +00:00
|
|
|
}
|
2016-02-08 18:12:04 +00:00
|
|
|
CHECK(!FindChild(env, node, "foo"));
|
2014-06-24 16:00:51 +00:00
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2013-10-10 13:03:41 +00:00
|
|
|
static const char* js_native_js_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function foo(n) {\n"
|
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < n; i++) s += i * i * i;\n"
|
|
|
|
" return s;\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}\n"
|
|
|
|
"function bar() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" foo(1000);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" CallJsFunction(bar);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}";
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = info[0].As<v8::Function>();
|
|
|
|
v8::Local<v8::Value> argv[] = {info[1]};
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
function
|
|
|
|
->Call(info.GetIsolate()->GetCurrentContext(), info.This(),
|
|
|
|
arraysize(argv), argv)
|
2015-12-08 18:44:23 +00:00
|
|
|
.ToLocalChecked();
|
2013-07-23 15:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// [Top down]:
|
|
|
|
// 58 0 (root) #0 1
|
|
|
|
// 2 2 (program) #0 2
|
|
|
|
// 56 1 start #16 3
|
|
|
|
// 55 0 CallJsFunction #0 4
|
|
|
|
// 55 1 bar #16 5
|
|
|
|
// 54 54 foo #16 6
|
|
|
|
TEST(JsNativeJsSample) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env).ToLocalChecked();
|
2015-02-06 16:50:56 +00:00
|
|
|
func->SetName(v8_str("CallJsFunction"));
|
2015-12-08 18:44:23 +00:00
|
|
|
env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(js_native_js_test_source);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
const v8::CpuProfileNode* native_node =
|
|
|
|
GetChild(env, start_node, "CallJsFunction");
|
|
|
|
const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
|
|
|
|
GetChild(env, bar_node, "foo");
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-07-23 15:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char* js_native_js_runtime_js_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function foo(n) {\n"
|
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < n; i++) s += i * i * i;\n"
|
|
|
|
" return s;\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}\n"
|
|
|
|
"var bound = foo.bind(this);\n"
|
|
|
|
"function bar() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" bound(1000);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" CallJsFunction(bar);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}";
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
// [Top down]:
|
|
|
|
// 57 0 (root) #0 1
|
|
|
|
// 55 1 start #16 3
|
|
|
|
// 54 0 CallJsFunction #0 4
|
|
|
|
// 54 3 bar #16 5
|
|
|
|
// 51 51 foo #16 6
|
|
|
|
// 2 2 (program) #0 2
|
|
|
|
TEST(JsNativeJsRuntimeJsSample) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env).ToLocalChecked();
|
2015-02-06 16:50:56 +00:00
|
|
|
func->SetName(v8_str("CallJsFunction"));
|
2015-12-08 18:44:23 +00:00
|
|
|
env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(js_native_js_runtime_js_test_source);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
const v8::CpuProfileNode* native_node =
|
|
|
|
GetChild(env, start_node, "CallJsFunction");
|
|
|
|
const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
|
|
|
|
GetChild(env, bar_node, "foo");
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-07-23 15:01:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void CallJsFunction2(const v8::FunctionCallbackInfo<v8::Value>& info) {
|
2014-07-01 10:10:12 +00:00
|
|
|
v8::base::OS::Print("In CallJsFunction2\n");
|
2013-07-23 15:01:38 +00:00
|
|
|
CallJsFunction(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char* js_native1_js_native2_js_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"function foo() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < 1000; i++) s += i * i * i;\n"
|
|
|
|
" return s;\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}\n"
|
|
|
|
"function bar() {\n"
|
|
|
|
" CallJsFunction2(foo);\n"
|
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" CallJsFunction1(bar);\n"
|
2014-07-11 09:06:12 +00:00
|
|
|
"}";
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
// [Top down]:
|
|
|
|
// 57 0 (root) #0 1
|
|
|
|
// 55 1 start #16 3
|
|
|
|
// 54 0 CallJsFunction1 #0 4
|
|
|
|
// 54 0 bar #16 5
|
|
|
|
// 54 0 CallJsFunction2 #0 6
|
|
|
|
// 54 54 foo #16 7
|
|
|
|
// 2 2 (program) #0 2
|
|
|
|
TEST(JsNative1JsNative2JsSample) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func1 =
|
2016-02-08 18:12:04 +00:00
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction)
|
|
|
|
->GetFunction(env)
|
|
|
|
.ToLocalChecked();
|
2015-02-06 16:50:56 +00:00
|
|
|
func1->SetName(v8_str("CallJsFunction1"));
|
2015-12-08 18:44:23 +00:00
|
|
|
env->Global()->Set(env, v8_str("CallJsFunction1"), func1).FromJust();
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Function> func2 =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction2)
|
|
|
|
->GetFunction(env)
|
|
|
|
.ToLocalChecked();
|
2015-02-06 16:50:56 +00:00
|
|
|
func2->SetName(v8_str("CallJsFunction2"));
|
2015-12-08 18:44:23 +00:00
|
|
|
env->Global()->Set(env, v8_str("CallJsFunction2"), func2).FromJust();
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
CompileRun(js_native1_js_native2_js_test_source);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
|
2013-07-23 15:01:38 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
const v8::CpuProfileNode* native_node1 =
|
|
|
|
GetChild(env, start_node, "CallJsFunction1");
|
|
|
|
const v8::CpuProfileNode* bar_node = GetChild(env, native_node1, "bar");
|
|
|
|
const v8::CpuProfileNode* native_node2 =
|
|
|
|
GetChild(env, bar_node, "CallJsFunction2");
|
|
|
|
GetChild(env, native_node2, "foo");
|
2013-07-23 15:01:38 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2013-07-23 15:01:38 +00:00
|
|
|
}
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2016-01-26 20:47:23 +00:00
|
|
|
static const char* js_force_collect_sample_source =
|
|
|
|
"function start() {\n"
|
|
|
|
" CallCollectSample();\n"
|
|
|
|
"}";
|
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
|
2017-11-14 20:23:15 +00:00
|
|
|
v8::CpuProfiler::CollectSample(info.GetIsolate());
|
2016-07-08 15:00:23 +00:00
|
|
|
}
|
|
|
|
|
2016-01-26 20:47:23 +00:00
|
|
|
TEST(CollectSampleAPI) {
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2016-01-26 20:47:23 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
env->Global()->Set(env, v8_str("CallCollectSample"), func).FromJust();
|
|
|
|
|
|
|
|
CompileRun(js_force_collect_sample_source);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
2016-01-26 20:47:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 0);
|
2016-01-26 20:47:23 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
CHECK_LE(1, start_node->GetChildrenCount());
|
|
|
|
GetChild(env, start_node, "CallCollectSample");
|
2016-01-26 20:47:23 +00:00
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2016-02-04 20:00:28 +00:00
|
|
|
static const char* js_native_js_runtime_multiple_test_source =
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
2016-02-04 20:00:28 +00:00
|
|
|
"function foo() {\n"
|
|
|
|
" return Math.sin(Math.random());\n"
|
|
|
|
"}\n"
|
|
|
|
"var bound = foo.bind(this);\n"
|
|
|
|
"function bar() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" return bound();\n"
|
2016-02-04 20:00:28 +00:00
|
|
|
"}\n"
|
|
|
|
"function start() {\n"
|
2016-02-08 18:12:04 +00:00
|
|
|
" startProfiling('my_profile');\n"
|
|
|
|
" var startTime = Date.now();\n"
|
|
|
|
" do {\n"
|
|
|
|
" CallJsFunction(bar);\n"
|
|
|
|
" } while (Date.now() - startTime < 200);\n"
|
2016-02-04 20:00:28 +00:00
|
|
|
"}";
|
|
|
|
|
|
|
|
// The test check multiple entrances/exits between JS and native code.
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// (root) #0 1
|
|
|
|
// start #16 3
|
|
|
|
// CallJsFunction #0 4
|
|
|
|
// bar #16 5
|
|
|
|
// foo #16 6
|
|
|
|
// (program) #0 2
|
|
|
|
TEST(JsNativeJsRuntimeJsSampleMultiple) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2016-02-04 20:00:28 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2016-02-04 20:00:28 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallJsFunction"));
|
|
|
|
env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
|
|
|
|
|
|
|
|
CompileRun(js_native_js_runtime_multiple_test_source);
|
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 500, 500);
|
2016-02-04 20:00:28 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
const v8::CpuProfileNode* native_node =
|
|
|
|
GetChild(env, start_node, "CallJsFunction");
|
|
|
|
const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
|
|
|
|
GetChild(env, bar_node, "foo");
|
2016-02-04 20:00:28 +00:00
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2016-03-01 05:59:32 +00:00
|
|
|
static const char* inlining_test_source =
|
|
|
|
"var finish = false;\n"
|
|
|
|
"function action(n) {\n"
|
|
|
|
" var s = 0;\n"
|
|
|
|
" for (var i = 0; i < n; ++i) s += i*i*i;\n"
|
|
|
|
" if (finish)\n"
|
|
|
|
" startProfiling('my_profile');\n"
|
|
|
|
" return s;\n"
|
|
|
|
"}\n"
|
|
|
|
"function level3() { return action(100); }\n"
|
|
|
|
"function level2() { return level3() * 2; }\n"
|
|
|
|
"function level1() { return level2(); }\n"
|
|
|
|
"function start() {\n"
|
|
|
|
" var n = 100;\n"
|
|
|
|
" while (--n)\n"
|
|
|
|
" level1();\n"
|
|
|
|
" finish = true;\n"
|
|
|
|
" level1();\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"}"
|
|
|
|
"%PrepareFunctionForOptimization(level1);\n"
|
|
|
|
"%PrepareFunctionForOptimization(level2);\n"
|
|
|
|
"%PrepareFunctionForOptimization(level3);\n"
|
|
|
|
"%NeverOptimizeFunction(action);\n"
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"level1();\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(level1);\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(level2);\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(level3);\n";
|
2016-03-01 05:59:32 +00:00
|
|
|
|
|
|
|
// The test check multiple entrances/exits between JS and native code.
|
|
|
|
//
|
|
|
|
// [Top down]:
|
|
|
|
// (root) #0 1
|
|
|
|
// start #16 3
|
|
|
|
// level1 #0 4
|
|
|
|
// level2 #16 5
|
|
|
|
// level3 #16 6
|
|
|
|
// action #16 7
|
|
|
|
// (program) #0 2
|
|
|
|
TEST(Inlining) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2016-03-01 05:59:32 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
2016-03-01 05:59:32 +00:00
|
|
|
|
|
|
|
CompileRun(inlining_test_source);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
|
|
|
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("my_profile");
|
2017-10-13 16:33:03 +00:00
|
|
|
function->Call(env, env->Global(), 0, nullptr).ToLocalChecked();
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
|
2016-03-01 05:59:32 +00:00
|
|
|
CHECK(profile);
|
|
|
|
// Dump collected profile to have a better diagnostic in case of failure.
|
|
|
|
reinterpret_cast<i::CpuProfile*>(profile)->Print();
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
|
|
|
|
const v8::CpuProfileNode* level1_node = GetChild(env, start_node, "level1");
|
|
|
|
const v8::CpuProfileNode* level2_node = GetChild(env, level1_node, "level2");
|
|
|
|
const v8::CpuProfileNode* level3_node = GetChild(env, level2_node, "level3");
|
|
|
|
GetChild(env, level3_node, "action");
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2019-01-02 12:19:06 +00:00
|
|
|
static const char* inlining_test_source2 = R"(
|
|
|
|
function action(n) {
|
|
|
|
var s = 0;
|
|
|
|
for (var i = 0; i < n; ++i) s += i*i*i;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
function level4() {
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
action(100);
|
|
|
|
return action(100);
|
2019-01-02 12:19:06 +00:00
|
|
|
}
|
|
|
|
function level3() {
|
|
|
|
const a = level4();
|
|
|
|
const b = level4();
|
|
|
|
return a + b * 1.1;
|
|
|
|
}
|
|
|
|
function level2() {
|
|
|
|
return level3() * 2;
|
|
|
|
}
|
|
|
|
function level1() {
|
|
|
|
action(1);
|
|
|
|
action(200);
|
|
|
|
action(1);
|
|
|
|
return level2();
|
|
|
|
}
|
|
|
|
function start(n) {
|
|
|
|
while (--n)
|
|
|
|
level1();
|
|
|
|
};
|
2019-04-30 11:04:41 +00:00
|
|
|
%NeverOptimizeFunction(action);
|
|
|
|
%NeverOptimizeFunction(start);
|
|
|
|
%PrepareFunctionForOptimization(level1);
|
|
|
|
%PrepareFunctionForOptimization(level2);
|
|
|
|
%PrepareFunctionForOptimization(level3);
|
|
|
|
%PrepareFunctionForOptimization(level4);
|
|
|
|
level1();
|
|
|
|
level1();
|
|
|
|
%OptimizeFunctionOnNextCall(level1);
|
|
|
|
%OptimizeFunctionOnNextCall(level2);
|
|
|
|
%OptimizeFunctionOnNextCall(level3);
|
|
|
|
%OptimizeFunctionOnNextCall(level4);
|
|
|
|
level1();
|
2019-01-02 12:19:06 +00:00
|
|
|
)";
|
|
|
|
|
|
|
|
// [Top down]:
|
|
|
|
// 0 (root):0 0 #1
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
// 13 start:34 6 #3
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 19 level1:36 6 #4
|
|
|
|
// 16 action:29 6 #14
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 2748 action:30 6 #10
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 18 action:31 6 #15
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 0 level2:32 6 #5
|
|
|
|
// 0 level3:26 6 #6
|
|
|
|
// 12 level4:22 6 #11
|
|
|
|
// 1315 action:17 6 #13
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 1324 action:18 6 #12
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 16 level4:21 6 #7
|
|
|
|
// 1268 action:17 6 #9
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 1322 action:18 6 #8
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
2019-01-02 12:19:06 +00:00
|
|
|
// 2 (program):0 0 #2
|
|
|
|
TEST(Inlining2) {
|
2021-11-17 13:19:26 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2019-01-08 14:32:58 +00:00
|
|
|
FLAG_allow_natives_syntax = true;
|
2019-05-20 10:33:40 +00:00
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
2020-01-02 13:27:34 +00:00
|
|
|
LocalContext env;
|
2019-05-20 10:33:40 +00:00
|
|
|
v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
|
|
|
|
v8::HandleScope scope(isolate);
|
2020-01-02 13:27:34 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2019-01-02 12:19:06 +00:00
|
|
|
|
|
|
|
CompileRun(inlining_test_source2);
|
2020-01-02 13:27:34 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2019-01-02 12:19:06 +00:00
|
|
|
|
2020-01-02 13:27:34 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(env->GetIsolate(), 20)};
|
|
|
|
static const unsigned min_samples = 4000;
|
|
|
|
static const unsigned min_ext_samples = 0;
|
|
|
|
v8::CpuProfile* profile =
|
|
|
|
helper.Run(function, args, arraysize(args), min_samples, min_ext_samples,
|
|
|
|
v8::CpuProfilingMode::kCallerLineNumbers);
|
2019-01-02 12:19:06 +00:00
|
|
|
CHECK(profile);
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2020-01-02 13:27:34 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
2019-01-02 12:19:06 +00:00
|
|
|
|
2019-04-30 11:04:41 +00:00
|
|
|
NameLinePair l421_a17[] = {{"level1", 27},
|
|
|
|
{"level2", 23},
|
|
|
|
{"level3", 17},
|
|
|
|
{"level4", 12},
|
|
|
|
{"action", 8}};
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CheckBranch(start_node, l421_a17, arraysize(l421_a17));
|
2019-04-30 11:04:41 +00:00
|
|
|
NameLinePair l422_a17[] = {{"level1", 27},
|
|
|
|
{"level2", 23},
|
|
|
|
{"level3", 17},
|
|
|
|
{"level4", 13},
|
|
|
|
{"action", 8}};
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CheckBranch(start_node, l422_a17, arraysize(l422_a17));
|
|
|
|
|
2019-04-30 11:04:41 +00:00
|
|
|
NameLinePair l421_a18[] = {{"level1", 27},
|
|
|
|
{"level2", 23},
|
|
|
|
{"level3", 17},
|
|
|
|
{"level4", 12},
|
|
|
|
{"action", 9}};
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CheckBranch(start_node, l421_a18, arraysize(l421_a18));
|
2019-04-30 11:04:41 +00:00
|
|
|
NameLinePair l422_a18[] = {{"level1", 27},
|
|
|
|
{"level2", 23},
|
|
|
|
{"level3", 17},
|
|
|
|
{"level4", 13},
|
|
|
|
{"action", 9}};
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CheckBranch(start_node, l422_a18, arraysize(l422_a18));
|
|
|
|
|
2019-04-30 11:04:41 +00:00
|
|
|
NameLinePair action_direct[] = {{"level1", 27}, {"action", 21}};
|
2019-01-02 12:19:06 +00:00
|
|
|
CheckBranch(start_node, action_direct, arraysize(action_direct));
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2019-05-13 14:45:14 +00:00
|
|
|
static const char* cross_script_source_a = R"(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
%NeverOptimizeFunction(action);
|
|
|
|
function action(n) {
|
|
|
|
var s = 0;
|
|
|
|
for (var i = 0; i < n; ++i) s += i*i*i;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
function level1() {
|
|
|
|
const a = action(1);
|
|
|
|
const b = action(200);
|
|
|
|
const c = action(1);
|
|
|
|
return a + b + c;
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
static const char* cross_script_source_b = R"(
|
|
|
|
%PrepareFunctionForOptimization(start);
|
|
|
|
%PrepareFunctionForOptimization(level1);
|
|
|
|
start(1);
|
|
|
|
start(1);
|
|
|
|
%OptimizeFunctionOnNextCall(start);
|
|
|
|
%OptimizeFunctionOnNextCall(level1);
|
|
|
|
start(1);
|
|
|
|
function start(n) {
|
|
|
|
while (--n)
|
|
|
|
level1();
|
|
|
|
};
|
|
|
|
)";
|
|
|
|
|
|
|
|
TEST(CrossScriptInliningCallerLineNumbers) {
|
2021-11-17 13:19:26 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2019-05-13 14:45:14 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2019-05-20 10:33:40 +00:00
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
2020-01-02 13:58:23 +00:00
|
|
|
LocalContext env;
|
2019-05-20 10:33:40 +00:00
|
|
|
v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
|
|
|
|
v8::HandleScope scope(isolate);
|
2020-01-02 13:58:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2019-05-13 14:45:14 +00:00
|
|
|
|
|
|
|
v8::Local<v8::Script> script_a =
|
|
|
|
CompileWithOrigin(cross_script_source_a, "script_a", false);
|
|
|
|
v8::Local<v8::Script> script_b =
|
|
|
|
CompileWithOrigin(cross_script_source_b, "script_b", false);
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
script_a->Run(env.local()).ToLocalChecked();
|
|
|
|
script_b->Run(env.local()).ToLocalChecked();
|
2019-05-13 14:45:14 +00:00
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2019-05-13 14:45:14 +00:00
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(env->GetIsolate(), 10)};
|
|
|
|
static const unsigned min_samples = 1000;
|
|
|
|
static const unsigned min_ext_samples = 0;
|
|
|
|
v8::CpuProfile* profile =
|
|
|
|
helper.Run(function, args, arraysize(args), min_samples, min_ext_samples,
|
|
|
|
v8::CpuProfilingMode::kCallerLineNumbers);
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK(profile);
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_b", start_node->GetScriptResourceNameStr()));
|
|
|
|
|
|
|
|
NameLinePair l19_a10[] = {{"level1", 11}, {"action", 15}};
|
|
|
|
CheckBranch(start_node, l19_a10, arraysize(l19_a10));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* level1_node =
|
|
|
|
GetChild(env.local(), start_node, "level1");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_a", level1_node->GetScriptResourceNameStr()));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* action_node =
|
|
|
|
GetChild(env.local(), level1_node, "action");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_a", action_node->GetScriptResourceNameStr()));
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char* cross_script_source_c = R"(
|
|
|
|
function level3() {
|
|
|
|
const a = action(1);
|
|
|
|
const b = action(100);
|
|
|
|
const c = action(1);
|
|
|
|
return a + b + c;
|
|
|
|
}
|
|
|
|
%NeverOptimizeFunction(action);
|
|
|
|
function action(n) {
|
|
|
|
var s = 0;
|
|
|
|
for (var i = 0; i < n; ++i) s += i*i*i;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
static const char* cross_script_source_d = R"(
|
|
|
|
function level2() {
|
|
|
|
const p = level3();
|
|
|
|
const q = level3();
|
|
|
|
return p + q;
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
static const char* cross_script_source_e = R"(
|
|
|
|
function level1() {
|
|
|
|
return level2() + 1000;
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
static const char* cross_script_source_f = R"(
|
|
|
|
%PrepareFunctionForOptimization(start);
|
|
|
|
%PrepareFunctionForOptimization(level1);
|
|
|
|
%PrepareFunctionForOptimization(level2);
|
|
|
|
%PrepareFunctionForOptimization(level3);
|
|
|
|
start(1);
|
|
|
|
start(1);
|
|
|
|
%OptimizeFunctionOnNextCall(start);
|
|
|
|
%OptimizeFunctionOnNextCall(level1);
|
|
|
|
%OptimizeFunctionOnNextCall(level2);
|
|
|
|
%OptimizeFunctionOnNextCall(level3);
|
|
|
|
start(1);
|
|
|
|
function start(n) {
|
|
|
|
while (--n)
|
|
|
|
level1();
|
|
|
|
};
|
|
|
|
)";
|
|
|
|
|
|
|
|
TEST(CrossScriptInliningCallerLineNumbers2) {
|
2021-11-17 13:19:26 +00:00
|
|
|
// Skip test if concurrent sparkplug is enabled. The test becomes flaky,
|
|
|
|
// since it requires a precise trace.
|
|
|
|
if (i::FLAG_concurrent_sparkplug) return;
|
|
|
|
|
2019-05-13 14:45:14 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2020-01-02 13:58:23 +00:00
|
|
|
LocalContext env;
|
2019-05-13 14:45:14 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2020-01-02 13:58:23 +00:00
|
|
|
ProfilerHelper helper(env.local());
|
2019-05-13 14:45:14 +00:00
|
|
|
|
|
|
|
v8::Local<v8::Script> script_c =
|
|
|
|
CompileWithOrigin(cross_script_source_c, "script_c", false);
|
|
|
|
v8::Local<v8::Script> script_d =
|
|
|
|
CompileWithOrigin(cross_script_source_d, "script_d", false);
|
|
|
|
v8::Local<v8::Script> script_e =
|
|
|
|
CompileWithOrigin(cross_script_source_e, "script_e", false);
|
|
|
|
v8::Local<v8::Script> script_f =
|
|
|
|
CompileWithOrigin(cross_script_source_f, "script_f", false);
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
script_c->Run(env.local()).ToLocalChecked();
|
|
|
|
script_d->Run(env.local()).ToLocalChecked();
|
|
|
|
script_e->Run(env.local()).ToLocalChecked();
|
|
|
|
script_f->Run(env.local()).ToLocalChecked();
|
2019-05-13 14:45:14 +00:00
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
2019-05-13 14:45:14 +00:00
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(env->GetIsolate(), 10)};
|
|
|
|
static const unsigned min_samples = 1000;
|
|
|
|
static const unsigned min_ext_samples = 0;
|
|
|
|
v8::CpuProfile* profile =
|
|
|
|
helper.Run(function, args, arraysize(args), min_samples, min_ext_samples,
|
|
|
|
v8::CpuProfilingMode::kCallerLineNumbers);
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK(profile);
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_f", start_node->GetScriptResourceNameStr()));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* level1_node =
|
|
|
|
GetChild(env.local(), start_node, "level1");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_e", level1_node->GetScriptResourceNameStr()));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* level2_node =
|
|
|
|
GetChild(env.local(), level1_node, "level2");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_d", level2_node->GetScriptResourceNameStr()));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* level3_node =
|
|
|
|
GetChild(env.local(), level2_node, "level3");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_c", level3_node->GetScriptResourceNameStr()));
|
|
|
|
|
2020-01-02 13:58:23 +00:00
|
|
|
const v8::CpuProfileNode* action_node =
|
|
|
|
GetChild(env.local(), level3_node, "action");
|
2019-05-13 14:45:14 +00:00
|
|
|
CHECK_EQ(0, strcmp("script_c", action_node->GetScriptResourceNameStr()));
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2013-08-07 17:04:27 +00:00
|
|
|
// [Top down]:
|
2016-01-26 20:47:23 +00:00
|
|
|
// 0 (root) #0 1
|
|
|
|
// 2 (program) #0 2
|
|
|
|
// 3 (idle) #0 3
|
2013-08-07 17:04:27 +00:00
|
|
|
TEST(IdleTime) {
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2015-02-06 16:50:56 +00:00
|
|
|
v8::Local<v8::String> profile_name = v8_str("my_profile");
|
2014-03-28 09:24:49 +00:00
|
|
|
cpu_profiler->StartProfiling(profile_name);
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2013-09-19 09:17:13 +00:00
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2016-07-08 15:00:23 +00:00
|
|
|
i::ProfilerEventsProcessor* processor =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(cpu_profiler)->processor();
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2018-10-17 12:09:34 +00:00
|
|
|
processor->AddCurrentStack(true);
|
2018-03-06 13:52:53 +00:00
|
|
|
isolate->SetIdle(true);
|
2013-08-07 17:04:27 +00:00
|
|
|
for (int i = 0; i < 3; i++) {
|
2018-10-17 12:09:34 +00:00
|
|
|
processor->AddCurrentStack(true);
|
2013-08-07 17:04:27 +00:00
|
|
|
}
|
2018-03-06 13:52:53 +00:00
|
|
|
isolate->SetIdle(false);
|
2018-10-17 12:09:34 +00:00
|
|
|
processor->AddCurrentStack(true);
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
|
2015-01-30 09:29:25 +00:00
|
|
|
CHECK(profile);
|
2013-08-07 17:04:27 +00:00
|
|
|
// Dump collected profile to have a better diagnostic in case of failure.
|
2014-03-28 09:24:49 +00:00
|
|
|
reinterpret_cast<i::CpuProfile*>(profile)->Print();
|
2013-08-07 17:04:27 +00:00
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* program_node =
|
2016-06-22 16:43:46 +00:00
|
|
|
GetChild(env.local(), root, CodeEntry::kProgramEntryName);
|
2016-02-08 18:12:04 +00:00
|
|
|
CHECK_EQ(0, program_node->GetChildrenCount());
|
|
|
|
CHECK_GE(program_node->GetHitCount(), 2u);
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2016-02-08 18:12:04 +00:00
|
|
|
const v8::CpuProfileNode* idle_node =
|
2016-06-22 16:43:46 +00:00
|
|
|
GetChild(env.local(), root, CodeEntry::kIdleEntryName);
|
2016-02-08 18:12:04 +00:00
|
|
|
CHECK_EQ(0, idle_node->GetChildrenCount());
|
|
|
|
CHECK_GE(idle_node->GetHitCount(), 3u);
|
2013-08-07 17:04:27 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
profile->Delete();
|
2016-07-08 15:00:23 +00:00
|
|
|
cpu_profiler->Dispose();
|
2013-08-07 17:04:27 +00:00
|
|
|
}
|
2013-10-10 13:15:47 +00:00
|
|
|
|
2013-11-22 12:43:17 +00:00
|
|
|
static void CheckFunctionDetails(v8::Isolate* isolate,
|
|
|
|
const v8::CpuProfileNode* node,
|
|
|
|
const char* name, const char* script_name,
|
2019-03-07 18:54:37 +00:00
|
|
|
bool is_shared_cross_origin, int script_id,
|
2019-03-15 19:42:03 +00:00
|
|
|
int line, int column,
|
|
|
|
const v8::CpuProfileNode* parent) {
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Context> context = isolate->GetCurrentContext();
|
|
|
|
CHECK(v8_str(name)->Equals(context, node->GetFunctionName()).FromJust());
|
2016-09-09 15:50:23 +00:00
|
|
|
CHECK_EQ(0, strcmp(name, node->GetFunctionNameStr()));
|
2015-12-08 18:44:23 +00:00
|
|
|
CHECK(v8_str(script_name)
|
|
|
|
->Equals(context, node->GetScriptResourceName())
|
|
|
|
.FromJust());
|
2016-09-09 15:50:23 +00:00
|
|
|
CHECK_EQ(0, strcmp(script_name, node->GetScriptResourceNameStr()));
|
2013-10-10 13:15:47 +00:00
|
|
|
CHECK_EQ(script_id, node->GetScriptId());
|
|
|
|
CHECK_EQ(line, node->GetLineNumber());
|
|
|
|
CHECK_EQ(column, node->GetColumnNumber());
|
2019-03-15 19:42:03 +00:00
|
|
|
CHECK_EQ(parent, node->GetParent());
|
2019-04-03 00:07:48 +00:00
|
|
|
CHECK_EQ(v8::CpuProfileNode::kScript, node->GetSourceType());
|
2013-10-10 13:15:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(FunctionDetails) {
|
2016-02-08 18:12:04 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
2013-10-10 13:15:47 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> script_a = CompileWithOrigin(
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(foo);\n"
|
|
|
|
"%NeverOptimizeFunction(bar);\n"
|
|
|
|
" function foo\n() { bar(); }\n"
|
2015-12-08 18:44:23 +00:00
|
|
|
" function bar() { startProfiling(); }\n",
|
2019-03-07 18:54:37 +00:00
|
|
|
"script_a", false);
|
2015-12-08 18:44:23 +00:00
|
|
|
script_a->Run(env).ToLocalChecked();
|
|
|
|
v8::Local<v8::Script> script_b = CompileWithOrigin(
|
2016-02-08 18:12:04 +00:00
|
|
|
"%NeverOptimizeFunction(baz);"
|
|
|
|
"\n\n function baz() { foo(); }\n"
|
2015-12-08 18:44:23 +00:00
|
|
|
"\n\nbaz();\n"
|
|
|
|
"stopProfiling();\n",
|
2019-03-07 18:54:37 +00:00
|
|
|
"script_b", true);
|
2015-12-08 18:44:23 +00:00
|
|
|
script_b->Run(env).ToLocalChecked();
|
2014-01-17 10:52:00 +00:00
|
|
|
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
|
2020-08-31 14:09:16 +00:00
|
|
|
reinterpret_cast<const i::CpuProfile*>(profile)->Print();
|
2013-10-10 13:15:47 +00:00
|
|
|
// The tree should look like this:
|
[profiler] Clean up CodeEvent tags
Clean-up and slightly unify the CodeEvent tags:
* Remove INTERPRETED_FUNCTION_TAG. It was only used for interpreter
trampoline copies, which are used for
--interpreted-frames-native-stack. However, even actual bytecode
compilation doesn't use INTERPRETED_FUNCTION_TAG, so we can remove
it for simplicity.
* The tag used by the above is now the same as for the bytecode
creation event, i.e. EVAL_TAG, SCRIPT_TAG, FUNCTION_TAG or
LAZY_COMPILE, depending on whether this was a script, and eval, an
eager or a lazy compile (respectively.
* Baseline was also using INTERPRETED_FUNCTION_TAG, so now it does the
same thing as above.
* Existing code is now logged as FUNCTION_TAG rather than
LAZY_COMPILE, because we lost the laziness information.
* The SCRIPT_TAG is set based on the SharedFunctionInfo flags, not
the compilation flags, so that eager inner functions are labelled as
FUNCTION_TAG rather than SCRIPT_TAG.
Bug: v8:11420,v8:11429
Change-Id: I0286002674255ff4ba8f5d865df372a3e2975b16
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2713104
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73047}
2021-02-23 13:09:16 +00:00
|
|
|
// 0 (root):0 3 0 #1
|
|
|
|
// 0 :0 0 5 #2 script_b:0
|
|
|
|
// 0 baz:3 0 5 #3 script_b:3
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 0 foo:4 0 4 #4 script_a:4
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 0 bar:5 0 4 #5 script_a:5
|
|
|
|
// bailed out due to 'Optimization is always disabled'
|
|
|
|
// 0 startProfiling:0 2 0 #6
|
2013-10-10 13:15:47 +00:00
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2019-03-15 19:42:03 +00:00
|
|
|
CHECK_EQ(root->GetParent(), nullptr);
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* script = GetChild(env, root, "");
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), script, "", "script_b", true,
|
[profiler] Clean up CodeEvent tags
Clean-up and slightly unify the CodeEvent tags:
* Remove INTERPRETED_FUNCTION_TAG. It was only used for interpreter
trampoline copies, which are used for
--interpreted-frames-native-stack. However, even actual bytecode
compilation doesn't use INTERPRETED_FUNCTION_TAG, so we can remove
it for simplicity.
* The tag used by the above is now the same as for the bytecode
creation event, i.e. EVAL_TAG, SCRIPT_TAG, FUNCTION_TAG or
LAZY_COMPILE, depending on whether this was a script, and eval, an
eager or a lazy compile (respectively.
* Baseline was also using INTERPRETED_FUNCTION_TAG, so now it does the
same thing as above.
* Existing code is now logged as FUNCTION_TAG rather than
LAZY_COMPILE, because we lost the laziness information.
* The SCRIPT_TAG is set based on the SharedFunctionInfo flags, not
the compilation flags, so that eager inner functions are labelled as
FUNCTION_TAG rather than SCRIPT_TAG.
Bug: v8:11420,v8:11429
Change-Id: I0286002674255ff4ba8f5d865df372a3e2975b16
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2713104
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73047}
2021-02-23 13:09:16 +00:00
|
|
|
script_b->GetUnboundScript()->GetId(),
|
|
|
|
v8::CpuProfileNode::kNoLineNumberInfo,
|
|
|
|
CpuProfileNode::kNoColumnNumberInfo, root);
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* baz = GetChild(env, script, "baz");
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b", true,
|
2019-03-15 19:42:03 +00:00
|
|
|
script_b->GetUnboundScript()->GetId(), 3, 16, script);
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* foo = GetChild(env, baz, "foo");
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), foo, "foo", "script_a", false,
|
2019-03-15 19:42:03 +00:00
|
|
|
script_a->GetUnboundScript()->GetId(), 4, 1, baz);
|
2015-12-08 18:44:23 +00:00
|
|
|
const v8::CpuProfileNode* bar = GetChild(env, foo, "bar");
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), bar, "bar", "script_a", false,
|
2019-03-15 19:42:03 +00:00
|
|
|
script_a->GetUnboundScript()->GetId(), 5, 14, foo);
|
2013-10-10 13:15:47 +00:00
|
|
|
}
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2018-01-03 11:11:11 +00:00
|
|
|
TEST(FunctionDetailsInlining) {
|
2022-04-28 14:22:23 +00:00
|
|
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return;
|
2018-01-03 11:11:11 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2018-01-03 11:11:11 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
ProfilerHelper helper(env);
|
|
|
|
|
|
|
|
// alpha is in a_script, beta in b_script. beta is
|
|
|
|
// inlined in alpha, but it should be attributed to b_script.
|
|
|
|
|
|
|
|
v8::Local<v8::Script> script_b = CompileWithOrigin(
|
|
|
|
"function beta(k) {\n"
|
|
|
|
" let sum = 2;\n"
|
|
|
|
" for(let i = 0; i < k; i ++) {\n"
|
|
|
|
" sum += i;\n"
|
|
|
|
" sum = sum + 'a';\n"
|
|
|
|
" }\n"
|
|
|
|
" return sum;\n"
|
|
|
|
"}\n"
|
|
|
|
"\n",
|
2019-03-07 18:54:37 +00:00
|
|
|
"script_b", true);
|
2018-01-03 11:11:11 +00:00
|
|
|
|
|
|
|
v8::Local<v8::Script> script_a = CompileWithOrigin(
|
|
|
|
"function alpha(p) {\n"
|
|
|
|
" let res = beta(p);\n"
|
|
|
|
" res = res + res;\n"
|
|
|
|
" return res;\n"
|
|
|
|
"}\n"
|
|
|
|
"let p = 2;\n"
|
|
|
|
"\n"
|
|
|
|
"\n"
|
|
|
|
"// Warm up before profiling or the inlining doesn't happen.\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(alpha);\n"
|
2018-01-03 11:11:11 +00:00
|
|
|
"p = alpha(p);\n"
|
|
|
|
"p = alpha(p);\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(alpha);\n"
|
|
|
|
"p = alpha(p);\n"
|
|
|
|
"\n"
|
|
|
|
"\n"
|
|
|
|
"startProfiling();\n"
|
|
|
|
"for(let i = 0; i < 10000; i++) {\n"
|
|
|
|
" p = alpha(p);\n"
|
|
|
|
"}\n"
|
|
|
|
"stopProfiling();\n"
|
|
|
|
"\n"
|
|
|
|
"\n",
|
2019-03-07 18:54:37 +00:00
|
|
|
"script_a", false);
|
2018-01-03 11:11:11 +00:00
|
|
|
|
|
|
|
script_b->Run(env).ToLocalChecked();
|
|
|
|
script_a->Run(env).ToLocalChecked();
|
|
|
|
|
|
|
|
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
|
2020-08-31 14:09:16 +00:00
|
|
|
reinterpret_cast<const i::CpuProfile*>(profile)->Print();
|
2018-01-03 11:11:11 +00:00
|
|
|
// The tree should look like this:
|
|
|
|
// 0 (root) 0 #1
|
|
|
|
// 5 (program) 0 #6
|
[profiler] Clean up CodeEvent tags
Clean-up and slightly unify the CodeEvent tags:
* Remove INTERPRETED_FUNCTION_TAG. It was only used for interpreter
trampoline copies, which are used for
--interpreted-frames-native-stack. However, even actual bytecode
compilation doesn't use INTERPRETED_FUNCTION_TAG, so we can remove
it for simplicity.
* The tag used by the above is now the same as for the bytecode
creation event, i.e. EVAL_TAG, SCRIPT_TAG, FUNCTION_TAG or
LAZY_COMPILE, depending on whether this was a script, and eval, an
eager or a lazy compile (respectively.
* Baseline was also using INTERPRETED_FUNCTION_TAG, so now it does the
same thing as above.
* Existing code is now logged as FUNCTION_TAG rather than
LAZY_COMPILE, because we lost the laziness information.
* The SCRIPT_TAG is set based on the SharedFunctionInfo flags, not
the compilation flags, so that eager inner functions are labelled as
FUNCTION_TAG rather than SCRIPT_TAG.
Bug: v8:11420,v8:11429
Change-Id: I0286002674255ff4ba8f5d865df372a3e2975b16
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2713104
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73047}
2021-02-23 13:09:16 +00:00
|
|
|
// 2 14 #2 script_a:0
|
2018-01-03 11:11:11 +00:00
|
|
|
// ;;; deopted at script_id: 14 position: 299 with reason 'Insufficient
|
|
|
|
// type feedback for call'.
|
|
|
|
// 1 alpha 14 #4 script_a:1
|
|
|
|
// 9 beta 13 #5 script_b:0
|
|
|
|
// 0 startProfiling 0 #3
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
2019-03-15 19:42:03 +00:00
|
|
|
CHECK_EQ(root->GetParent(), nullptr);
|
2018-01-03 11:11:11 +00:00
|
|
|
const v8::CpuProfileNode* script = GetChild(env, root, "");
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), script, "", "script_a", false,
|
[profiler] Clean up CodeEvent tags
Clean-up and slightly unify the CodeEvent tags:
* Remove INTERPRETED_FUNCTION_TAG. It was only used for interpreter
trampoline copies, which are used for
--interpreted-frames-native-stack. However, even actual bytecode
compilation doesn't use INTERPRETED_FUNCTION_TAG, so we can remove
it for simplicity.
* The tag used by the above is now the same as for the bytecode
creation event, i.e. EVAL_TAG, SCRIPT_TAG, FUNCTION_TAG or
LAZY_COMPILE, depending on whether this was a script, and eval, an
eager or a lazy compile (respectively.
* Baseline was also using INTERPRETED_FUNCTION_TAG, so now it does the
same thing as above.
* Existing code is now logged as FUNCTION_TAG rather than
LAZY_COMPILE, because we lost the laziness information.
* The SCRIPT_TAG is set based on the SharedFunctionInfo flags, not
the compilation flags, so that eager inner functions are labelled as
FUNCTION_TAG rather than SCRIPT_TAG.
Bug: v8:11420,v8:11429
Change-Id: I0286002674255ff4ba8f5d865df372a3e2975b16
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2713104
Reviewed-by: Yang Guo <yangguo@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73047}
2021-02-23 13:09:16 +00:00
|
|
|
script_a->GetUnboundScript()->GetId(),
|
|
|
|
v8::CpuProfileNode::kNoLineNumberInfo,
|
|
|
|
v8::CpuProfileNode::kNoColumnNumberInfo, root);
|
2018-01-03 11:11:11 +00:00
|
|
|
const v8::CpuProfileNode* alpha = FindChild(env, script, "alpha");
|
|
|
|
// Return early if profiling didn't sample alpha.
|
|
|
|
if (!alpha) return;
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), alpha, "alpha", "script_a", false,
|
2019-03-15 19:42:03 +00:00
|
|
|
script_a->GetUnboundScript()->GetId(), 1, 15, script);
|
2018-01-03 11:11:11 +00:00
|
|
|
const v8::CpuProfileNode* beta = FindChild(env, alpha, "beta");
|
|
|
|
if (!beta) return;
|
2019-03-07 18:54:37 +00:00
|
|
|
CheckFunctionDetails(env->GetIsolate(), beta, "beta", "script_b", true,
|
2019-03-15 19:42:03 +00:00
|
|
|
script_b->GetUnboundScript()->GetId(), 1, 14, alpha);
|
2018-01-03 11:11:11 +00:00
|
|
|
}
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2020-09-14 18:42:22 +00:00
|
|
|
static const char* pre_profiling_osr_script = R"(
|
2020-10-09 07:49:05 +00:00
|
|
|
const kMinIterationDurationMs = 1;
|
2020-09-14 18:42:22 +00:00
|
|
|
function whenPass(pass, optDuration) {
|
|
|
|
if (pass == 5) startProfiling();
|
|
|
|
}
|
|
|
|
function hot(optDuration, deoptDuration) {
|
|
|
|
%PrepareFunctionForOptimization(hot);
|
|
|
|
for (let pass = 0; pass <= optDuration + deoptDuration; pass++) {
|
2020-10-09 07:49:05 +00:00
|
|
|
const startTime = Date.now();
|
2020-09-14 18:42:22 +00:00
|
|
|
// Let a few passes go by to ensure we have enough feeback info
|
2022-02-21 14:01:31 +00:00
|
|
|
if (pass == 3) %OptimizeOsr();
|
2020-09-14 18:42:22 +00:00
|
|
|
// Force deoptimization. %DeoptimizeNow and %DeoptimizeFunction don't
|
|
|
|
// doptimize OSRs.
|
|
|
|
if (pass == optDuration) whenPass = () => {};
|
|
|
|
whenPass(pass, optDuration);
|
2020-10-09 07:49:05 +00:00
|
|
|
while (Date.now() - startTime < kMinIterationDurationMs) {
|
2020-09-14 18:42:22 +00:00
|
|
|
for (let j = 0; j < 1000; j++) {
|
|
|
|
x = Math.random() * j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
function notHot(optDuration, deoptDuration) {
|
|
|
|
hot(optDuration, deoptDuration);
|
|
|
|
stopProfiling()
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
// Testing profiling of OSR code that was OSR optimized before profiling
|
2020-09-18 15:15:21 +00:00
|
|
|
// started. Currently the behavior is not quite right so we're currently
|
|
|
|
// testing a deopt event being sent to the sampling thread for a function
|
|
|
|
// it knows nothing about. This deopt does mean we start getting samples
|
|
|
|
// for hot so we expect some samples, just fewer than for notHot.
|
2020-09-14 18:42:22 +00:00
|
|
|
//
|
|
|
|
// We should get something like:
|
|
|
|
// 0 (root):0 3 0 #1
|
|
|
|
// 12 (garbage collector):0 3 0 #5
|
|
|
|
// 5 notHot:22 0 4 #2
|
|
|
|
// 85 hot:5 0 4 #6
|
|
|
|
// 0 whenPass:2 0 4 #3
|
|
|
|
// 0 startProfiling:0 2 0 #4
|
2020-09-18 15:15:21 +00:00
|
|
|
//
|
|
|
|
// But currently get something like:
|
|
|
|
// 0 (root):0 3 0 #1
|
|
|
|
// 12 (garbage collector):0 3 0 #5
|
|
|
|
// 57 notHot:22 0 4 #2
|
|
|
|
// 33 hot:5 0 4 #6
|
|
|
|
// 0 whenPass:2 0 4 #3
|
|
|
|
// 0 startProfiling:0 2 0 #4
|
|
|
|
|
2020-09-14 18:42:22 +00:00
|
|
|
TEST(StartProfilingAfterOsr) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
ProfilerHelper helper(env);
|
2020-10-08 12:39:38 +00:00
|
|
|
helper.profiler()->SetSamplingInterval(100);
|
2020-09-14 18:42:22 +00:00
|
|
|
CompileRun(pre_profiling_osr_script);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "notHot");
|
|
|
|
|
2020-10-08 12:39:38 +00:00
|
|
|
int32_t profiling_optimized_ms = 120;
|
2020-09-14 18:42:22 +00:00
|
|
|
int32_t profiling_deoptimized_ms = 40;
|
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_optimized_ms),
|
|
|
|
v8::Integer::New(env->GetIsolate(), profiling_deoptimized_ms)};
|
|
|
|
function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
|
|
|
|
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
|
|
|
|
CHECK(profile);
|
|
|
|
reinterpret_cast<const i::CpuProfile*>(profile)->Print();
|
|
|
|
|
|
|
|
const CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* notHotNode = GetChild(env, root, "notHot");
|
|
|
|
const v8::CpuProfileNode* hotNode = GetChild(env, notHotNode, "hot");
|
2020-09-18 15:15:21 +00:00
|
|
|
USE(hotNode);
|
|
|
|
// If/when OSR sampling is fixed the following CHECK_GT could/should be
|
|
|
|
// uncommented and the node = node line deleted.
|
|
|
|
// CHECK_GT(hotNode->GetHitCount(), notHotNode->GetHitCount());
|
2020-09-14 18:42:22 +00:00
|
|
|
}
|
|
|
|
|
2013-12-11 14:39:18 +00:00
|
|
|
TEST(DontStopOnFinishedProfileDelete) {
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2014-01-17 10:52:00 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2016-07-08 15:00:23 +00:00
|
|
|
v8::CpuProfiler* profiler = v8::CpuProfiler::New(env->GetIsolate());
|
2013-12-18 08:59:09 +00:00
|
|
|
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::String> outer = v8_str("outer");
|
2014-03-28 09:24:49 +00:00
|
|
|
profiler->StartProfiling(outer);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::String> inner = v8_str("inner");
|
2014-03-28 09:24:49 +00:00
|
|
|
profiler->StartProfiling(inner);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
v8::CpuProfile* inner_profile = profiler->StopProfiling(inner);
|
2013-12-11 14:39:18 +00:00
|
|
|
CHECK(inner_profile);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(1, iprofiler->GetProfilesCount());
|
2014-03-28 09:24:49 +00:00
|
|
|
inner_profile->Delete();
|
2017-10-13 16:33:03 +00:00
|
|
|
inner_profile = nullptr;
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2013-12-11 14:39:18 +00:00
|
|
|
|
2014-03-28 09:24:49 +00:00
|
|
|
v8::CpuProfile* outer_profile = profiler->StopProfiling(outer);
|
2013-12-11 14:39:18 +00:00
|
|
|
CHECK(outer_profile);
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(1, iprofiler->GetProfilesCount());
|
2014-03-28 09:24:49 +00:00
|
|
|
outer_profile->Delete();
|
2017-10-13 16:33:03 +00:00
|
|
|
outer_profile = nullptr;
|
2013-12-18 08:59:09 +00:00
|
|
|
CHECK_EQ(0, iprofiler->GetProfilesCount());
|
2016-07-08 15:00:23 +00:00
|
|
|
profiler->Dispose();
|
2013-12-11 14:39:18 +00:00
|
|
|
}
|
2015-02-10 14:32:42 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
const char* GetBranchDeoptReason(v8::Local<v8::Context> context,
|
|
|
|
i::CpuProfile* iprofile, const char* branch[],
|
2015-03-09 14:43:29 +00:00
|
|
|
int length) {
|
|
|
|
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
|
2017-10-13 16:33:03 +00:00
|
|
|
const ProfileNode* iopt_function = nullptr;
|
2015-12-08 18:44:23 +00:00
|
|
|
iopt_function = GetSimpleBranch(context, profile, branch, length);
|
2020-12-16 14:27:50 +00:00
|
|
|
if (iopt_function->deopt_infos().size() == 0) {
|
|
|
|
iopt_function = iopt_function->parent();
|
|
|
|
}
|
|
|
|
CHECK_LE(1U, iopt_function->deopt_infos().size());
|
2015-03-09 14:43:29 +00:00
|
|
|
return iopt_function->deopt_infos()[0].deopt_reason;
|
|
|
|
}
|
|
|
|
|
2015-03-05 10:37:56 +00:00
|
|
|
// deopt at top function
|
2015-02-10 14:32:42 +00:00
|
|
|
TEST(CollectDeoptEvents) {
|
2022-04-28 14:22:23 +00:00
|
|
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return;
|
2015-02-10 14:32:42 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2015-02-10 14:32:42 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
i::CpuProfiler* iprofiler =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(helper.profiler());
|
2015-02-10 14:32:42 +00:00
|
|
|
|
2015-03-09 14:43:29 +00:00
|
|
|
const char opt_source[] =
|
|
|
|
"function opt_function%d(value, depth) {\n"
|
|
|
|
" if (depth) return opt_function%d(value, depth - 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
" return 10 / value;\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"}\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"\n";
|
|
|
|
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
2021-06-17 15:43:55 +00:00
|
|
|
base::EmbeddedVector<char, sizeof(opt_source) + 100> buffer;
|
2021-06-22 13:27:00 +00:00
|
|
|
base::SNPrintF(buffer, opt_source, i, i);
|
2019-04-29 11:06:49 +00:00
|
|
|
v8::Script::Compile(env, v8_str(buffer.begin()))
|
2015-12-08 18:44:23 +00:00
|
|
|
.ToLocalChecked()
|
|
|
|
->Run(env)
|
|
|
|
.ToLocalChecked();
|
2015-03-09 14:43:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const char* source =
|
|
|
|
"startProfiling();\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(opt_function0);\n"
|
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function0(1, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"%OptimizeFunctionOnNextCall(opt_function0)\n"
|
|
|
|
"\n"
|
|
|
|
"opt_function0(1, 1);\n"
|
|
|
|
"\n"
|
|
|
|
"opt_function0(undefined, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(opt_function1);\n"
|
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function1(1, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"%OptimizeFunctionOnNextCall(opt_function1)\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function1(1, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function1(NaN, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(opt_function2);\n"
|
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function2(1, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"%OptimizeFunctionOnNextCall(opt_function2)\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
2015-03-09 14:43:29 +00:00
|
|
|
"opt_function2(1, 1);\n"
|
|
|
|
"\n"
|
|
|
|
"opt_function2(0, 1);\n"
|
2015-03-05 10:37:56 +00:00
|
|
|
"\n"
|
|
|
|
"stopProfiling();\n"
|
|
|
|
"\n";
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Script::Compile(env, v8_str(source))
|
|
|
|
.ToLocalChecked()
|
|
|
|
->Run(env)
|
|
|
|
.ToLocalChecked();
|
2015-02-10 14:32:42 +00:00
|
|
|
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
|
|
|
|
iprofile->Print();
|
2020-12-16 14:27:50 +00:00
|
|
|
/* The expected profile. Note that the deopt reasons can hang off either of
|
|
|
|
the two nodes for each function, depending on the exact timing at runtime.
|
2015-03-24 12:46:13 +00:00
|
|
|
[Top down]:
|
|
|
|
0 (root) 0 #1
|
|
|
|
23 32 #2
|
|
|
|
1 opt_function2 31 #7
|
|
|
|
1 opt_function2 31 #8
|
|
|
|
;;; deopted at script_id: 31 position: 106 with reason
|
|
|
|
'division by zero'.
|
|
|
|
2 opt_function0 29 #3
|
|
|
|
4 opt_function0 29 #4
|
|
|
|
;;; deopted at script_id: 29 position: 108 with reason 'not a
|
|
|
|
heap number'.
|
|
|
|
0 opt_function1 30 #5
|
|
|
|
1 opt_function1 30 #6
|
|
|
|
;;; deopted at script_id: 30 position: 108 with reason 'lost
|
|
|
|
precision or NaN'.
|
|
|
|
*/
|
|
|
|
|
2015-03-09 14:43:29 +00:00
|
|
|
{
|
|
|
|
const char* branch[] = {"", "opt_function0", "opt_function0"};
|
2016-12-09 15:05:48 +00:00
|
|
|
const char* deopt_reason =
|
|
|
|
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
|
|
|
|
if (deopt_reason != reason(i::DeoptimizeReason::kNotAHeapNumber) &&
|
|
|
|
deopt_reason != reason(i::DeoptimizeReason::kNotASmi)) {
|
2017-12-18 16:19:23 +00:00
|
|
|
FATAL("%s", deopt_reason);
|
2016-12-09 15:05:48 +00:00
|
|
|
}
|
2015-03-09 14:43:29 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
const char* branch[] = {"", "opt_function1", "opt_function1"};
|
2015-03-10 10:45:07 +00:00
|
|
|
const char* deopt_reason =
|
2015-12-08 18:44:23 +00:00
|
|
|
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
|
2016-07-18 09:23:28 +00:00
|
|
|
if (deopt_reason != reason(i::DeoptimizeReason::kNaN) &&
|
2016-12-09 15:05:48 +00:00
|
|
|
deopt_reason != reason(i::DeoptimizeReason::kLostPrecisionOrNaN) &&
|
|
|
|
deopt_reason != reason(i::DeoptimizeReason::kNotASmi)) {
|
2017-12-18 16:19:23 +00:00
|
|
|
FATAL("%s", deopt_reason);
|
2015-03-10 10:45:07 +00:00
|
|
|
}
|
2015-03-09 14:43:29 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
const char* branch[] = {"", "opt_function2", "opt_function2"};
|
2016-07-18 09:23:28 +00:00
|
|
|
CHECK_EQ(reason(i::DeoptimizeReason::kDivisionByZero),
|
2015-12-08 18:44:23 +00:00
|
|
|
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
|
2015-03-09 14:43:29 +00:00
|
|
|
}
|
2015-02-10 14:32:42 +00:00
|
|
|
iprofiler->DeleteProfile(iprofile);
|
|
|
|
}
|
2015-02-19 10:07:46 +00:00
|
|
|
|
|
|
|
TEST(SourceLocation) {
|
2022-04-28 14:22:23 +00:00
|
|
|
i::FLAG_always_turbofan = true;
|
2015-02-19 10:07:46 +00:00
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
|
|
|
|
|
|
|
const char* source =
|
|
|
|
"function CompareStatementWithThis() {\n"
|
|
|
|
" if (this === 1) {}\n"
|
|
|
|
"}\n"
|
|
|
|
"CompareStatementWithThis();\n";
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Script::Compile(env.local(), v8_str(source))
|
|
|
|
.ToLocalChecked()
|
|
|
|
->Run(env.local())
|
|
|
|
.ToLocalChecked();
|
2015-02-19 10:07:46 +00:00
|
|
|
}
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
static const char* inlined_source =
|
2016-11-22 10:14:36 +00:00
|
|
|
"function opt_function(left, right) { var k = left*right; return k + 1; "
|
|
|
|
"}\n";
|
2015-03-24 12:46:13 +00:00
|
|
|
// 0.........1.........2.........3.........4....*....5.........6......*..7
|
|
|
|
|
|
|
|
// deopt at the first level inlined function
|
|
|
|
TEST(DeoptAtFirstLevelInlinedSource) {
|
2022-04-28 14:22:23 +00:00
|
|
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return;
|
2015-03-24 12:46:13 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2015-03-24 12:46:13 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
i::CpuProfiler* iprofiler =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(helper.profiler());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
// 0.........1.........2.........3.........4.........5.........6.........7
|
|
|
|
const char* source =
|
2016-11-22 10:14:36 +00:00
|
|
|
"function test(left, right) { return opt_function(left, right); }\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"startProfiling();\n"
|
|
|
|
"\n"
|
2019-07-11 08:33:47 +00:00
|
|
|
"%EnsureFeedbackVectorForFunction(opt_function);\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(test);\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test(10, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(test)\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test(10, 10);\n"
|
|
|
|
"\n"
|
|
|
|
"test(undefined, 1e9);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"stopProfiling();\n"
|
|
|
|
"\n";
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
|
|
|
|
inlined_script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> script = v8_compile(source);
|
|
|
|
script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
int script_id = script->GetUnboundScript()->GetId();
|
|
|
|
|
|
|
|
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
|
|
|
|
iprofile->Print();
|
|
|
|
/* The expected profile output
|
|
|
|
[Top down]:
|
|
|
|
0 (root) 0 #1
|
|
|
|
10 30 #2
|
|
|
|
1 test 30 #3
|
|
|
|
;;; deopted at script_id: 29 position: 45 with reason 'not a
|
|
|
|
heap number'.
|
|
|
|
;;; Inline point: script_id 30 position: 36.
|
|
|
|
4 opt_function 29 #4
|
|
|
|
*/
|
|
|
|
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
|
|
|
|
|
|
|
|
const char* branch[] = {"", "test"};
|
|
|
|
const ProfileNode* itest_node =
|
2015-12-08 18:44:23 +00:00
|
|
|
GetSimpleBranch(env, profile, branch, arraysize(branch));
|
2015-04-08 16:13:24 +00:00
|
|
|
const std::vector<v8::CpuProfileDeoptInfo>& deopt_infos =
|
|
|
|
itest_node->deopt_infos();
|
2015-04-10 13:59:39 +00:00
|
|
|
CHECK_EQ(1U, deopt_infos.size());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
2015-04-08 16:13:24 +00:00
|
|
|
const v8::CpuProfileDeoptInfo& info = deopt_infos[0];
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK(reason(i::DeoptimizeReason::kNotASmi) == info.deopt_reason ||
|
|
|
|
reason(i::DeoptimizeReason::kNotAHeapNumber) == info.deopt_reason);
|
2015-04-10 13:59:39 +00:00
|
|
|
CHECK_EQ(2U, info.stack.size());
|
2015-03-24 12:46:13 +00:00
|
|
|
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK_LE(dist(offset(inlined_source, "*right"), info.stack[0].position), 1);
|
2015-03-24 12:46:13 +00:00
|
|
|
CHECK_EQ(script_id, info.stack[1].script_id);
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
iprofiler->DeleteProfile(iprofile);
|
|
|
|
}
|
|
|
|
|
|
|
|
// deopt at the second level inlined function
|
|
|
|
TEST(DeoptAtSecondLevelInlinedSource) {
|
2022-04-28 14:22:23 +00:00
|
|
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return;
|
2015-03-24 12:46:13 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2015-03-24 12:46:13 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
i::CpuProfiler* iprofiler =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(helper.profiler());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
// 0.........1.........2.........3.........4.........5.........6.........7
|
|
|
|
const char* source =
|
2016-11-22 10:14:36 +00:00
|
|
|
"function test2(left, right) { return opt_function(left, right); }\n"
|
|
|
|
"function test1(left, right) { return test2(left, right); } \n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"startProfiling();\n"
|
|
|
|
"\n"
|
2019-05-08 15:54:26 +00:00
|
|
|
"%EnsureFeedbackVectorForFunction(opt_function);\n"
|
|
|
|
"%EnsureFeedbackVectorForFunction(test2);\n"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(test1);\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test1(10, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(test1)\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test1(10, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test1(undefined, 1e9);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"stopProfiling();\n"
|
|
|
|
"\n";
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
|
|
|
|
inlined_script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> script = v8_compile(source);
|
|
|
|
script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
int script_id = script->GetUnboundScript()->GetId();
|
|
|
|
|
|
|
|
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
|
|
|
|
iprofile->Print();
|
|
|
|
/* The expected profile output
|
|
|
|
[Top down]:
|
|
|
|
0 (root) 0 #1
|
|
|
|
11 30 #2
|
|
|
|
1 test1 30 #3
|
|
|
|
;;; deopted at script_id: 29 position: 45 with reason 'not a
|
|
|
|
heap number'.
|
|
|
|
;;; Inline point: script_id 30 position: 37.
|
|
|
|
;;; Inline point: script_id 30 position: 103.
|
|
|
|
1 test2 30 #4
|
|
|
|
3 opt_function 29 #5
|
|
|
|
*/
|
|
|
|
|
|
|
|
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
|
|
|
|
|
|
|
|
const char* branch[] = {"", "test1"};
|
|
|
|
const ProfileNode* itest_node =
|
2015-12-08 18:44:23 +00:00
|
|
|
GetSimpleBranch(env, profile, branch, arraysize(branch));
|
2015-04-08 16:13:24 +00:00
|
|
|
const std::vector<v8::CpuProfileDeoptInfo>& deopt_infos =
|
|
|
|
itest_node->deopt_infos();
|
2015-04-10 13:59:39 +00:00
|
|
|
CHECK_EQ(1U, deopt_infos.size());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
2015-04-08 16:13:24 +00:00
|
|
|
const v8::CpuProfileDeoptInfo info = deopt_infos[0];
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK(reason(i::DeoptimizeReason::kNotASmi) == info.deopt_reason ||
|
|
|
|
reason(i::DeoptimizeReason::kNotAHeapNumber) == info.deopt_reason);
|
2015-04-10 13:59:39 +00:00
|
|
|
CHECK_EQ(3U, info.stack.size());
|
2015-03-24 12:46:13 +00:00
|
|
|
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK_LE(dist(offset(inlined_source, "*right"), info.stack[0].position), 1);
|
2015-03-24 12:46:13 +00:00
|
|
|
CHECK_EQ(script_id, info.stack[1].script_id);
|
2016-11-22 10:14:36 +00:00
|
|
|
CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
|
|
|
|
CHECK_EQ(offset(source, "test2(left, right);"), info.stack[2].position);
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
iprofiler->DeleteProfile(iprofile);
|
|
|
|
}
|
|
|
|
|
|
|
|
// deopt in untracked function
|
|
|
|
TEST(DeoptUntrackedFunction) {
|
2022-04-28 14:22:23 +00:00
|
|
|
if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_turbofan) return;
|
2015-03-24 12:46:13 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2015-03-24 12:46:13 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
2016-07-08 15:00:23 +00:00
|
|
|
ProfilerHelper helper(env);
|
|
|
|
i::CpuProfiler* iprofiler =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(helper.profiler());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
// 0.........1.........2.........3.........4.........5.........6.........7
|
|
|
|
const char* source =
|
|
|
|
"function test(left, right) { return opt_function(left, right); }\n"
|
|
|
|
"\n"
|
2019-05-08 15:54:26 +00:00
|
|
|
"%EnsureFeedbackVectorForFunction(opt_function);"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(test);\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test(10, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"%OptimizeFunctionOnNextCall(test)\n"
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test(10, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"startProfiling();\n" // profiler started after compilation.
|
|
|
|
"\n"
|
2016-11-22 10:14:36 +00:00
|
|
|
"test(undefined, 10);\n"
|
2015-03-24 12:46:13 +00:00
|
|
|
"\n"
|
|
|
|
"stopProfiling();\n"
|
|
|
|
"\n";
|
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
|
|
|
|
inlined_script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
|
2015-12-08 18:44:23 +00:00
|
|
|
v8::Local<v8::Script> script = v8_compile(source);
|
|
|
|
script->Run(env).ToLocalChecked();
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
|
|
|
|
iprofile->Print();
|
|
|
|
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
|
|
|
|
|
|
|
|
const char* branch[] = {"", "test"};
|
|
|
|
const ProfileNode* itest_node =
|
2015-12-08 18:44:23 +00:00
|
|
|
GetSimpleBranch(env, profile, branch, arraysize(branch));
|
2015-04-10 13:59:39 +00:00
|
|
|
CHECK_EQ(0U, itest_node->deopt_infos().size());
|
2015-03-24 12:46:13 +00:00
|
|
|
|
|
|
|
iprofiler->DeleteProfile(iprofile);
|
|
|
|
}
|
2016-10-06 18:14:03 +00:00
|
|
|
|
|
|
|
using v8::platform::tracing::TraceBuffer;
|
|
|
|
using v8::platform::tracing::TraceConfig;
|
|
|
|
using v8::platform::tracing::TraceObject;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2019-08-05 14:57:31 +00:00
|
|
|
#ifdef V8_USE_PERFETTO
|
|
|
|
class CpuProfilerListener : public platform::tracing::TraceEventListener {
|
|
|
|
public:
|
|
|
|
void ProcessPacket(const ::perfetto::protos::TracePacket& packet) {
|
2020-04-17 20:46:18 +00:00
|
|
|
auto& seq_state = sequence_state_[packet.trusted_packet_sequence_id()];
|
|
|
|
if (packet.incremental_state_cleared()) seq_state = SequenceState{};
|
|
|
|
|
|
|
|
if (!packet.has_track_event()) return;
|
|
|
|
|
|
|
|
// Update incremental state.
|
|
|
|
if (packet.has_interned_data()) {
|
|
|
|
const auto& interned_data = packet.interned_data();
|
|
|
|
for (const auto& it : interned_data.event_names()) {
|
|
|
|
CHECK_EQ(seq_state.event_names_.find(it.iid()),
|
|
|
|
seq_state.event_names_.end());
|
|
|
|
seq_state.event_names_[it.iid()] = it.name();
|
|
|
|
}
|
2019-08-05 14:57:31 +00:00
|
|
|
}
|
2020-04-17 20:46:18 +00:00
|
|
|
const auto& track_event = packet.track_event();
|
|
|
|
auto name = seq_state.event_names_[track_event.name_iid()];
|
|
|
|
if (name != "Profile" && name != "ProfileChunk") return;
|
|
|
|
|
|
|
|
CHECK_EQ(1, track_event.debug_annotations_size());
|
|
|
|
CHECK(track_event.debug_annotations()[0].has_legacy_json_value());
|
|
|
|
CHECK(!profile_id_ ||
|
|
|
|
track_event.legacy_event().unscoped_id() == profile_id_);
|
|
|
|
profile_id_ = track_event.legacy_event().unscoped_id();
|
|
|
|
result_json_ += result_json_.empty() ? "[" : ",\n";
|
|
|
|
result_json_ += track_event.debug_annotations()[0].legacy_json_value();
|
2019-08-05 14:57:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const std::string& result_json() {
|
|
|
|
result_json_ += "]";
|
|
|
|
return result_json_;
|
|
|
|
}
|
|
|
|
void Reset() {
|
|
|
|
result_json_.clear();
|
|
|
|
profile_id_ = 0;
|
2020-04-17 20:46:18 +00:00
|
|
|
sequence_state_.clear();
|
2019-08-05 14:57:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string result_json_;
|
|
|
|
uint64_t profile_id_ = 0;
|
2020-04-17 20:46:18 +00:00
|
|
|
|
|
|
|
struct SequenceState {
|
|
|
|
std::map<uint64_t, std::string> event_names_;
|
|
|
|
};
|
|
|
|
std::map<uint32_t, SequenceState> sequence_state_;
|
2019-08-05 14:57:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2016-10-06 18:14:03 +00:00
|
|
|
class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
|
|
|
|
public:
|
|
|
|
void AppendTraceEvent(TraceObject* trace_event) override {
|
2016-10-20 17:46:54 +00:00
|
|
|
if (trace_event->name() != std::string("Profile") &&
|
|
|
|
trace_event->name() != std::string("ProfileChunk"))
|
2016-10-06 18:14:03 +00:00
|
|
|
return;
|
|
|
|
CHECK(!profile_id_ || trace_event->id() == profile_id_);
|
|
|
|
CHECK_EQ(1, trace_event->num_args());
|
|
|
|
CHECK_EQ(TRACE_VALUE_TYPE_CONVERTABLE, trace_event->arg_types()[0]);
|
|
|
|
profile_id_ = trace_event->id();
|
|
|
|
v8::ConvertableToTraceFormat* arg =
|
|
|
|
trace_event->arg_convertables()[0].get();
|
2019-02-07 22:52:20 +00:00
|
|
|
result_json_ += result_json_.empty() ? "[" : ",\n";
|
2016-10-06 18:14:03 +00:00
|
|
|
arg->AppendAsTraceFormat(&result_json_);
|
|
|
|
}
|
2019-02-07 22:52:20 +00:00
|
|
|
void Flush() override { result_json_ += "]"; }
|
2016-10-06 18:14:03 +00:00
|
|
|
|
2019-02-07 22:52:20 +00:00
|
|
|
const std::string& result_json() const { return result_json_; }
|
|
|
|
void Reset() {
|
|
|
|
result_json_.clear();
|
|
|
|
profile_id_ = 0;
|
|
|
|
}
|
2016-10-06 18:14:03 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::string result_json_;
|
|
|
|
uint64_t profile_id_ = 0;
|
|
|
|
};
|
|
|
|
|
2019-08-05 14:57:31 +00:00
|
|
|
#endif // !V8_USE_PERFETTO
|
|
|
|
|
2016-10-06 18:14:03 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST(TracingCpuProfiler) {
|
2019-02-07 22:52:20 +00:00
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
|
|
|
v8::Context::Scope context_scope(env);
|
2016-10-06 18:14:03 +00:00
|
|
|
|
2019-02-07 22:52:20 +00:00
|
|
|
auto* tracing_controller =
|
|
|
|
static_cast<v8::platform::tracing::TracingController*>(
|
|
|
|
i::V8::GetCurrentPlatform()->GetTracingController());
|
2016-10-06 18:14:03 +00:00
|
|
|
|
2019-05-23 10:44:11 +00:00
|
|
|
#ifdef V8_USE_PERFETTO
|
|
|
|
std::ostringstream perfetto_output;
|
|
|
|
tracing_controller->InitializeForPerfetto(&perfetto_output);
|
2019-08-05 14:57:31 +00:00
|
|
|
CpuProfilerListener listener;
|
|
|
|
tracing_controller->SetTraceEventListenerForTesting(&listener);
|
|
|
|
#else
|
|
|
|
CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
|
|
|
|
TraceBuffer* ring_buffer =
|
|
|
|
TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
|
|
|
|
tracing_controller->Initialize(ring_buffer);
|
2019-05-23 10:44:11 +00:00
|
|
|
#endif
|
|
|
|
|
2019-02-07 22:52:20 +00:00
|
|
|
bool result = false;
|
|
|
|
for (int run_duration = 50; !result; run_duration += 50) {
|
|
|
|
TraceConfig* trace_config = new TraceConfig();
|
|
|
|
trace_config->AddIncludedCategory(
|
|
|
|
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
|
|
|
|
|
|
|
|
std::string test_code = R"(
|
|
|
|
function foo() {
|
|
|
|
let s = 0;
|
|
|
|
const endTime = Date.now() + )" +
|
|
|
|
std::to_string(run_duration) + R"(
|
|
|
|
while (Date.now() < endTime) s += Math.cos(s);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
foo();)";
|
|
|
|
|
2017-11-13 13:16:49 +00:00
|
|
|
tracing_controller->StartTracing(trace_config);
|
2019-02-07 22:52:20 +00:00
|
|
|
CompileRun(test_code.c_str());
|
2020-04-17 20:46:18 +00:00
|
|
|
#ifdef V8_USE_PERFETTO
|
|
|
|
TrackEvent::Flush();
|
|
|
|
#endif
|
2017-11-13 13:16:49 +00:00
|
|
|
tracing_controller->StopTracing();
|
2019-02-07 22:52:20 +00:00
|
|
|
|
2019-08-05 14:57:31 +00:00
|
|
|
#ifdef V8_USE_PERFETTO
|
|
|
|
std::string profile_json = listener.result_json();
|
|
|
|
listener.Reset();
|
|
|
|
#else
|
2019-02-07 22:52:20 +00:00
|
|
|
std::string profile_json = event_checker->result_json();
|
|
|
|
event_checker->Reset();
|
2019-08-05 14:57:31 +00:00
|
|
|
#endif
|
2019-02-07 22:52:20 +00:00
|
|
|
CHECK_LT(0u, profile_json.length());
|
|
|
|
printf("Profile JSON: %s\n", profile_json.c_str());
|
|
|
|
|
|
|
|
std::string profile_checker_code = R"(
|
|
|
|
function checkProfile(json) {
|
|
|
|
const profile_header = json[0];
|
|
|
|
if (typeof profile_header['startTime'] !== 'number')
|
|
|
|
return false;
|
2021-01-20 13:15:53 +00:00
|
|
|
return json.some(event => (event.lines || []).some(line => line)) &&
|
|
|
|
json.filter(e => e.cpuProfile && e.cpuProfile.nodes)
|
|
|
|
.some(e => e.cpuProfile.nodes
|
|
|
|
.some(n => n.callFrame.codeType == "JS"));
|
2019-02-07 22:52:20 +00:00
|
|
|
}
|
|
|
|
checkProfile()" + profile_json +
|
|
|
|
")";
|
|
|
|
result = CompileRunChecked(CcTest::isolate(), profile_checker_code.c_str())
|
|
|
|
->IsTrue();
|
2016-10-06 18:14:03 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 20:46:18 +00:00
|
|
|
#ifndef V8_USE_PERFETTO
|
2019-02-07 22:52:20 +00:00
|
|
|
static_cast<v8::platform::tracing::TracingController*>(
|
|
|
|
i::V8::GetCurrentPlatform()->GetTracingController())
|
|
|
|
->Initialize(nullptr);
|
2020-04-17 20:46:18 +00:00
|
|
|
#endif // !V8_USE_PERFETTO
|
2016-10-06 18:14:03 +00:00
|
|
|
}
|
2017-09-30 13:19:52 +00:00
|
|
|
|
2017-10-10 13:42:48 +00:00
|
|
|
TEST(Issue763073) {
|
|
|
|
class AllowNativesSyntax {
|
|
|
|
public:
|
|
|
|
AllowNativesSyntax()
|
|
|
|
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
|
|
|
|
trace_deopt_(i::FLAG_trace_deopt) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
i::FLAG_trace_deopt = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
~AllowNativesSyntax() {
|
|
|
|
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
|
|
|
|
i::FLAG_trace_deopt = trace_deopt_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool allow_natives_syntax_;
|
|
|
|
bool trace_deopt_;
|
|
|
|
};
|
|
|
|
|
|
|
|
AllowNativesSyntax allow_natives_syntax_scope;
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
|
|
|
CompileRun(
|
|
|
|
"function f() { return function g(x) { }; }"
|
|
|
|
// Create first closure, optimize it, and deoptimize it.
|
|
|
|
"var g = f();"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(g);\n"
|
2017-10-10 13:42:48 +00:00
|
|
|
"g(1);"
|
|
|
|
"%OptimizeFunctionOnNextCall(g);"
|
|
|
|
"g(1);"
|
|
|
|
"%DeoptimizeFunction(g);"
|
|
|
|
// Create second closure, and optimize it. This will create another
|
|
|
|
// optimized code object and put in the (shared) type feedback vector.
|
|
|
|
"var h = f();"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(h);\n"
|
2017-10-10 13:42:48 +00:00
|
|
|
"h(1);"
|
|
|
|
"%OptimizeFunctionOnNextCall(h);"
|
|
|
|
"h(1);");
|
|
|
|
|
|
|
|
// Start profiling.
|
|
|
|
v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("test");
|
|
|
|
|
|
|
|
// Here we test that the heap iteration upon profiling start is not
|
|
|
|
// confused by having a deoptimized code object for a closure while
|
|
|
|
// having a different optimized code object in the type feedback vector.
|
|
|
|
cpu_profiler->StartProfiling(profile_name);
|
|
|
|
v8::CpuProfile* p = cpu_profiler->StopProfiling(profile_name);
|
|
|
|
p->Delete();
|
|
|
|
cpu_profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
2017-11-07 01:39:59 +00:00
|
|
|
static const char* js_collect_sample_api_source =
|
|
|
|
"%NeverOptimizeFunction(start);\n"
|
|
|
|
"function start() {\n"
|
|
|
|
" CallStaticCollectSample();\n"
|
|
|
|
"}";
|
|
|
|
|
|
|
|
static void CallStaticCollectSample(
|
|
|
|
const v8::FunctionCallbackInfo<v8::Value>& info) {
|
|
|
|
v8::CpuProfiler::CollectSample(info.GetIsolate());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(StaticCollectSampleAPI) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallStaticCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallStaticCollectSample"));
|
|
|
|
env->Global()
|
|
|
|
->Set(env.local(), v8_str("CallStaticCollectSample"), func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
CompileRun(js_collect_sample_api_source);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
|
|
|
|
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100);
|
|
|
|
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
|
|
|
|
GetChild(env.local(), start_node, "CallStaticCollectSample");
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2017-12-05 17:27:04 +00:00
|
|
|
TEST(CodeEntriesMemoryLeak) {
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2017-12-05 17:27:04 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
|
|
|
|
std::string source = "function start() {}\n";
|
|
|
|
for (int i = 0; i < 1000; ++i) {
|
|
|
|
source += "function foo" + std::to_string(i) + "() { return " +
|
|
|
|
std::to_string(i) +
|
|
|
|
"; }\n"
|
|
|
|
"foo" +
|
|
|
|
std::to_string(i) + "();\n";
|
|
|
|
}
|
|
|
|
CompileRun(source.c_str());
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "start");
|
|
|
|
|
|
|
|
ProfilerHelper helper(env);
|
|
|
|
|
|
|
|
for (int j = 0; j < 100; ++j) {
|
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0);
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2018-05-04 00:19:12 +00:00
|
|
|
i::CpuProfiler* profiler =
|
|
|
|
reinterpret_cast<i::CpuProfiler*>(helper.profiler());
|
|
|
|
CHECK(!profiler->profiler_listener_for_test());
|
2017-12-05 17:27:04 +00:00
|
|
|
}
|
|
|
|
|
2017-12-12 22:19:44 +00:00
|
|
|
TEST(NativeFrameStackTrace) {
|
|
|
|
// A test for issue https://crbug.com/768540
|
|
|
|
// When a sample lands in a native function which has not EXIT frame
|
|
|
|
// stack frame iterator used to bail out and produce an empty stack trace.
|
|
|
|
// The source code below makes v8 call the
|
[runtime] Move string table off-heap
Changes the isolate's string table into an off-heap structure. This
allows the string table to be resized without allocating on the V8 heap,
and potentially triggering a GC. This allows existing strings to be
inserted into the string table without requiring allocation.
This has two important benefits:
1) It allows the deserializer to insert strings directly into the
string table, rather than having to defer string insertion until
deserialization completes.
2) It simplifies the concurrent string table lookup to allow resizing
the table inside the write lock, therefore eliminating the race
where two concurrent lookups could both resize the table.
The off-heap string table has the following properties:
1) The general hashmap behaviour matches the HashTable, i.e. open
addressing, power-of-two sized, quadratic probing. This could, of
course, now be changed.
2) The empty and deleted sentinels are changed to Smi 0 and 1,
respectively, to make those comparisons a bit cheaper and not
require roots access.
3) When the HashTable is resized, the old elements array is kept
alive in a linked list of previous arrays, so that concurrent
lookups don't lose the data they're accessing. This linked list
is cleared by the GC, as then we know that all threads are in
a safepoint.
4) The GC treats the hash table entries as weak roots, and only walks
them for non-live reference clearing and for evacuation.
5) Since there is no longer a FixedArray to serialize for the startup
snapshot, there is now a custom serialization of the string table,
and the string table root is considered unserializable during weak
root iteration. As a bonus, the custom serialization is more
efficient, as it skips non-string entries.
As a drive-by, rename LookupStringExists_NoAllocate to
TryStringToIndexOrLookupExisting, to make it clearer that it returns
a non-string for the case when the string is an array index. As another
drive-by, extract StringSet into a separate header.
Bug: v8:10729
Change-Id: I9c990fb2d74d1fe222920408670974a70e969bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2339104
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69270}
2020-08-06 10:59:55 +00:00
|
|
|
// v8::internal::StringTable::TryStringToIndexOrLookupExisting native function
|
2017-12-12 22:19:44 +00:00
|
|
|
// without producing an EXIT frame.
|
|
|
|
v8::HandleScope scope(CcTest::isolate());
|
2019-01-16 17:31:37 +00:00
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
2017-12-12 22:19:44 +00:00
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
|
|
|
|
const char* source = R"(
|
|
|
|
function jsFunction() {
|
|
|
|
var s = {};
|
|
|
|
for (var i = 0; i < 1e4; ++i) {
|
|
|
|
for (var j = 0; j < 100; j++) {
|
|
|
|
s['item' + j] = 'alph';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})";
|
|
|
|
|
|
|
|
CompileRun(source);
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "jsFunction");
|
|
|
|
|
|
|
|
ProfilerHelper helper(env);
|
|
|
|
|
2019-05-22 00:06:41 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0);
|
2017-12-12 22:19:44 +00:00
|
|
|
|
|
|
|
// Count the fraction of samples landing in 'jsFunction' (valid stack)
|
|
|
|
// vs '(program)' (no stack captured).
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* js_function = FindChild(root, "jsFunction");
|
|
|
|
const v8::CpuProfileNode* program = FindChild(root, "(program)");
|
|
|
|
if (program) {
|
|
|
|
unsigned js_function_samples = TotalHitCount(js_function);
|
|
|
|
unsigned program_samples = TotalHitCount(program);
|
|
|
|
double valid_samples_ratio =
|
|
|
|
1. * js_function_samples / (js_function_samples + program_samples);
|
|
|
|
i::PrintF("Ratio: %f\n", valid_samples_ratio);
|
|
|
|
// TODO(alph): Investigate other causes of dropped frames. The ratio
|
|
|
|
// should be close to 99%.
|
|
|
|
CHECK_GE(valid_samples_ratio, 0.3);
|
|
|
|
}
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
}
|
|
|
|
|
2018-04-11 13:09:15 +00:00
|
|
|
TEST(SourcePositionTable) {
|
2018-05-22 09:01:48 +00:00
|
|
|
i::SourcePositionTable info;
|
2018-04-11 13:09:15 +00:00
|
|
|
|
|
|
|
// Newly created tables should return NoLineNumberInfo for any lookup.
|
|
|
|
int no_info = v8::CpuProfileNode::kNoLineNumberInfo;
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(std::numeric_limits<int>::min()));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(0));
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(0));
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(1));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(9));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(10));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(11));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(19));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(20));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(21));
|
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(100));
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(100));
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(no_info, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
|
|
|
|
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
info.SetPosition(10, 1, SourcePosition::kNotInlined);
|
|
|
|
info.SetPosition(20, 2, SourcePosition::kNotInlined);
|
2018-04-11 13:09:15 +00:00
|
|
|
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
// The only valid return values are 1 or 2 - every pc maps to a line
|
|
|
|
// number.
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(std::numeric_limits<int>::min()));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(0));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(1));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(9));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(10));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(11));
|
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(19));
|
2019-01-02 12:19:06 +00:00
|
|
|
CHECK_EQ(1, info.GetSourceLineNumber(20));
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(2, info.GetSourceLineNumber(21));
|
|
|
|
CHECK_EQ(2, info.GetSourceLineNumber(100));
|
|
|
|
CHECK_EQ(2, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
|
2018-04-11 13:09:15 +00:00
|
|
|
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(0));
|
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(100));
|
|
|
|
|
2018-04-11 13:09:15 +00:00
|
|
|
// Test SetPosition behavior.
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
info.SetPosition(25, 3, 0);
|
2018-05-22 09:01:48 +00:00
|
|
|
CHECK_EQ(2, info.GetSourceLineNumber(21));
|
|
|
|
CHECK_EQ(3, info.GetSourceLineNumber(100));
|
|
|
|
CHECK_EQ(3, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
|
[cpu-profiler] Reduce the size of inlining information
Previously we stored the source position table, which stored a mapping
of pc offsets to line numbers, and the inline_locations, which stored a
mapping of pc offsets to stacks of {CodeEntry, line_number} pairs. This
was slightly wasteful because we had two different tables which were
both keyed on the pc offset and contained some overlapping information.
This CL combines the two tables in a way. The source position table now
maps a pc offset to a pair of {line_number, inlining_id}. If the
inlining_id is valid, then it can be used to look up the inlining stack
which is stored in inline_locations, but is now keyed by inlining_id
rather than pc offset. This also has the nice effect of de-duplicating
inline stacks which we previously duplicated.
The new structure is similar to how this data is stored by the compiler,
except that we convert 'source positions' (char offset in a file) into
line numbers as we go, because we only care about attributing ticks to
a given line.
Also remove the helper RecordInliningInfo() as this is only actually
used to add inline stacks by one caller (where it is now inlined). The
other callers would always bail out or are only called from
test-cpu-profiler.
Remove AddInlineStack and replace it with SetInlineStacks which adds all
of the stacks at once. We need to do it this way because the source pos
table is passed into the constructor of CodeEntry, so we need to create
it before the CodeEntry, but the inline stacks are not (they are part of
rare_data which is not always present), so we need to add them after
construction. Given that we calculate both the source pos table and the
inline stacks before construction, it's just easier to add them all at
once.
Also add a print() method to CodeEntry to make future debugging easier
as I'm constantly rewriting this locally.
Bug: v8:8575, v8:7719, v8:7203
Change-Id: I39324d6ea13d116d5da5d0a0d243cae76a749c79
Reviewed-on: https://chromium-review.googlesource.com/c/1392195
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58554}
2019-01-04 11:57:50 +00:00
|
|
|
|
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(21));
|
|
|
|
CHECK_EQ(0, info.GetInliningId(100));
|
2019-07-29 12:45:30 +00:00
|
|
|
|
|
|
|
// Test that subsequent SetPosition calls with the same pc_offset are ignored.
|
|
|
|
info.SetPosition(25, 4, SourcePosition::kNotInlined);
|
|
|
|
CHECK_EQ(2, info.GetSourceLineNumber(21));
|
|
|
|
CHECK_EQ(3, info.GetSourceLineNumber(100));
|
|
|
|
CHECK_EQ(3, info.GetSourceLineNumber(std::numeric_limits<int>::max()));
|
|
|
|
|
|
|
|
CHECK_EQ(SourcePosition::kNotInlined, info.GetInliningId(21));
|
|
|
|
CHECK_EQ(0, info.GetInliningId(100));
|
2018-04-11 13:09:15 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 23:29:24 +00:00
|
|
|
TEST(MultipleProfilers) {
|
|
|
|
std::unique_ptr<CpuProfiler> profiler1(new CpuProfiler(CcTest::i_isolate()));
|
|
|
|
std::unique_ptr<CpuProfiler> profiler2(new CpuProfiler(CcTest::i_isolate()));
|
|
|
|
profiler1->StartProfiling("1");
|
|
|
|
profiler2->StartProfiling("2");
|
|
|
|
profiler1->StopProfiling("1");
|
|
|
|
profiler2->StopProfiling("2");
|
|
|
|
}
|
|
|
|
|
2019-02-08 20:18:29 +00:00
|
|
|
// Tests that logged CodeCreateEvent calls do not crash a reused CpuProfiler.
|
|
|
|
// crbug.com/929928
|
|
|
|
TEST(CrashReusedProfiler) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(isolate));
|
|
|
|
profiler->StartProfiling("1");
|
|
|
|
profiler->StopProfiling("1");
|
|
|
|
|
|
|
|
profiler->StartProfiling("2");
|
2020-12-17 16:02:56 +00:00
|
|
|
CreateCode(isolate, &env);
|
2019-02-08 20:18:29 +00:00
|
|
|
profiler->StopProfiling("2");
|
|
|
|
}
|
|
|
|
|
2019-02-13 04:34:19 +00:00
|
|
|
// Tests that samples from different profilers on the same isolate do not leak
|
|
|
|
// samples to each other. See crbug.com/v8/8835.
|
|
|
|
TEST(MultipleProfilersSampleIndependently) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
// Create two profilers- one slow ticking one, and one fast ticking one.
|
|
|
|
// Ensure that the slow ticking profiler does not receive samples from the
|
|
|
|
// fast ticking one.
|
|
|
|
std::unique_ptr<CpuProfiler> slow_profiler(
|
|
|
|
new CpuProfiler(CcTest::i_isolate()));
|
|
|
|
slow_profiler->set_sampling_interval(base::TimeDelta::FromSeconds(1));
|
2019-05-22 00:06:41 +00:00
|
|
|
slow_profiler->StartProfiling("1", {kLeafNodeLineNumbers});
|
2019-02-13 04:34:19 +00:00
|
|
|
|
|
|
|
CompileRun(R"(
|
|
|
|
function start() {
|
|
|
|
let val = 1;
|
|
|
|
for (let i = 0; i < 10e3; i++) {
|
|
|
|
val = (val * 2) % 3;
|
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
|
|
|
ProfilerHelper helper(env.local());
|
2019-05-22 00:06:41 +00:00
|
|
|
v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0);
|
2019-02-13 04:34:19 +00:00
|
|
|
|
|
|
|
auto slow_profile = slow_profiler->StopProfiling("1");
|
|
|
|
CHECK_GT(profile->GetSamplesCount(), slow_profile->samples_count());
|
|
|
|
}
|
|
|
|
|
2018-10-31 14:29:55 +00:00
|
|
|
void ProfileSomeCode(v8::Isolate* isolate) {
|
|
|
|
v8::Isolate::Scope isolate_scope(isolate);
|
|
|
|
v8::HandleScope scope(isolate);
|
|
|
|
LocalContext context(isolate);
|
|
|
|
|
|
|
|
v8::CpuProfiler* profiler = v8::CpuProfiler::New(isolate);
|
|
|
|
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("1");
|
|
|
|
profiler->StartProfiling(profile_name);
|
|
|
|
const char* source = R"(
|
|
|
|
function foo() {
|
2018-11-15 12:46:06 +00:00
|
|
|
var x = 0;
|
2018-11-19 16:29:23 +00:00
|
|
|
for (var i = 0; i < 1e3; i++) {
|
2018-11-15 12:46:06 +00:00
|
|
|
for (var j = 0; j < 1e3; j++) {
|
2018-11-19 16:29:23 +00:00
|
|
|
x = i * j;
|
2018-10-31 14:29:55 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-15 12:46:06 +00:00
|
|
|
return x;
|
2018-10-31 14:29:55 +00:00
|
|
|
}
|
|
|
|
foo();
|
|
|
|
)";
|
|
|
|
|
|
|
|
CompileRun(source);
|
|
|
|
profiler->StopProfiling(profile_name);
|
|
|
|
profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
class IsolateThread : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
IsolateThread() : Thread(Options("IsolateThread")) {}
|
|
|
|
|
|
|
|
void Run() override {
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
ProfileSomeCode(isolate);
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Checking for crashes and TSAN issues with multiple isolates profiling.
|
|
|
|
TEST(MultipleIsolates) {
|
|
|
|
IsolateThread thread1;
|
|
|
|
IsolateThread thread2;
|
|
|
|
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(thread1.Start());
|
|
|
|
CHECK(thread2.Start());
|
2018-10-31 14:29:55 +00:00
|
|
|
|
|
|
|
thread1.Join();
|
|
|
|
thread2.Join();
|
|
|
|
}
|
|
|
|
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
// Varying called function frame sizes increases the chance of something going
|
|
|
|
// wrong if sampling an unlocked frame. We also prevent optimization to prevent
|
|
|
|
// inlining so each function call has its own frame.
|
|
|
|
const char* varying_frame_size_script = R"(
|
2021-01-14 14:06:12 +00:00
|
|
|
%NeverOptimizeFunction(maybeYield0);
|
|
|
|
%NeverOptimizeFunction(maybeYield1);
|
|
|
|
%NeverOptimizeFunction(maybeYield2);
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
%NeverOptimizeFunction(bar);
|
|
|
|
%NeverOptimizeFunction(foo);
|
2021-01-14 14:06:12 +00:00
|
|
|
function maybeYield0(n) {
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
YieldIsolate(Math.random() > yieldLimit);
|
|
|
|
}
|
2021-01-14 14:06:12 +00:00
|
|
|
function maybeYield1(n) {
|
|
|
|
YieldIsolate(Math.random() > yieldLimit);
|
|
|
|
}
|
|
|
|
function maybeYield2(n) {
|
|
|
|
YieldIsolate(Math.random() > yieldLimit);
|
|
|
|
}
|
|
|
|
maybeYield = [maybeYield0 ,maybeYield1, maybeYield2];
|
|
|
|
function bar(threadNumber, a, b, c, d) {
|
|
|
|
maybeYield[threadNumber](Math.random());
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
return a.length + b.length + c.length + d.length;
|
|
|
|
}
|
2021-01-14 14:06:12 +00:00
|
|
|
function foo(timeLimit, yieldProbability, threadNumber) {
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
yieldLimit = 1 - yieldProbability;
|
|
|
|
const startTime = Date.now();
|
|
|
|
for (let i = 0; i < 1e6; i++) {
|
2021-01-14 14:06:12 +00:00
|
|
|
maybeYield[threadNumber](1);
|
|
|
|
bar(threadNumber, "Hickory", "Dickory", "Doc", "Mouse");
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
YieldIsolate(Math.random() > 0.999);
|
|
|
|
if ((Date.now() - startTime) > timeLimit) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)";
|
|
|
|
|
|
|
|
class UnlockingThread : public v8::base::Thread {
|
|
|
|
public:
|
2021-01-14 14:06:12 +00:00
|
|
|
explicit UnlockingThread(v8::Local<v8::Context> env, int32_t threadNumber)
|
|
|
|
: Thread(Options("UnlockingThread")),
|
|
|
|
env_(CcTest::isolate(), env),
|
|
|
|
threadNumber_(threadNumber) {}
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
|
|
|
|
void Run() override {
|
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
|
|
|
v8::Locker locker(isolate);
|
|
|
|
v8::Isolate::Scope isolate_scope(isolate);
|
|
|
|
v8::HandleScope scope(isolate);
|
|
|
|
v8::Local<v8::Context> env = v8::Local<v8::Context>::New(isolate, env_);
|
2021-01-14 14:06:12 +00:00
|
|
|
Profile(env, threadNumber_);
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
}
|
|
|
|
|
2021-01-14 14:06:12 +00:00
|
|
|
static void Profile(v8::Local<v8::Context> env, int32_t threadNumber) {
|
|
|
|
CHECK_LT(threadNumber, maxThreads_);
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
v8::CpuProfiler* profiler = v8::CpuProfiler::New(isolate);
|
|
|
|
profiler->SetSamplingInterval(200);
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("1");
|
|
|
|
profiler->StartProfiling(profile_name);
|
|
|
|
int32_t time_limit = 200;
|
|
|
|
double yield_probability = 0.001;
|
|
|
|
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, time_limit),
|
2021-01-14 14:06:12 +00:00
|
|
|
v8::Number::New(isolate, yield_probability),
|
|
|
|
v8::Integer::New(isolate, threadNumber)};
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
v8::Local<v8::Function> function = GetFunction(env, "foo");
|
|
|
|
function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
|
2021-01-14 14:06:12 +00:00
|
|
|
const v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
|
|
|
|
const CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
for (int32_t number = 0; number < maxThreads_; number++) {
|
|
|
|
std::string maybeYield = "maybeYield" + std::to_string(number);
|
|
|
|
unsigned hit_count = TotalHitCount(root, maybeYield);
|
|
|
|
if (hit_count) CHECK_EQ(number, threadNumber);
|
|
|
|
}
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
v8::Persistent<v8::Context> env_;
|
2021-01-14 14:06:12 +00:00
|
|
|
int32_t threadNumber_;
|
|
|
|
static const int32_t maxThreads_ = 3;
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Checking for crashes with multiple thread/single Isolate profiling.
|
|
|
|
TEST(MultipleThreadsSingleIsolate) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
|
|
|
v8::Locker locker(isolate);
|
|
|
|
v8::HandleScope scope(isolate);
|
|
|
|
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
|
|
|
|
v8::Context::Scope context_scope(env);
|
|
|
|
CcTest::AddGlobalFunction(
|
|
|
|
env, "YieldIsolate", [](const v8::FunctionCallbackInfo<v8::Value>& info) {
|
|
|
|
v8::Isolate* isolate = info.GetIsolate();
|
|
|
|
if (!info[0]->IsTrue()) return;
|
|
|
|
v8::Unlocker unlocker(isolate);
|
|
|
|
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
|
|
|
|
});
|
|
|
|
|
|
|
|
CompileRun(varying_frame_size_script);
|
2021-01-14 14:06:12 +00:00
|
|
|
UnlockingThread thread1(env, 1);
|
|
|
|
UnlockingThread thread2(env, 2);
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
|
|
|
|
CHECK(thread1.Start());
|
|
|
|
CHECK(thread2.Start());
|
|
|
|
|
|
|
|
// For good measure, profile on our own thread
|
2021-01-14 14:06:12 +00:00
|
|
|
UnlockingThread::Profile(env, 0);
|
[cpu-profiler] Ensure sampled thread has Isolate lock under Windows
While the sampler checked if the sampled thread had the Isolate locked
(if locks are being used) under Linux, the check was not done under
Windows (or Fuchsia) which meant that in a multi-threading application
under Windows, thread locking was not checked making it prone to seg
faults and the like as the profiler would be using isolate->js_entry_sp
to determine the stack to walk but isolate->js_entry_sp is the stack
pointer for the thread that currently has the Isolate lock so, if the
sampled thread does not have the lock, the sampler woud be iterating
over the wrong stack, one that might actually be actively changing on
another thread. The fix was to move the lock check into CpuSampler
and Ticker (--prof) so all OSes would do the correct check.
The basic concept is that on all operating systems a CpuProfiler, and
so its corresponding CpuCampler, the profiler is tied to a thread.
This is not based on first principles or anything, it's simply the
way it works in V8, though it is a useful conceit as it makes
visualization and interpretation of profile data much easier.
To collect a sample on a thread associated with a profiler the thread
must be stopped for obvious reasons -- walking the stack of a running
thread is a formula for disaster. The mechanism for stopping a thread
is OS-specific and is done in sample.cc. There are currently three
basic approaches, one for Linux/Unix variants, one for Windows and one
for Fuchsia. The approaches vary as to which thread actually collects
the sample -- under Linux the sample is actually collected on the
(interrupted) sampled thread whereas under Fuchsia/Windows it's on
a separate thread.
However, in a multi-threaded environment (where Locker is used), it's
not sufficient for the sampled thread to be stopped. Because the stack
walk involves looking in the Isolate heap, no other thread can be
messing with the heap while the sample is collected. The only ways to
ensure this would be to either stop all threads whenever collecting a
sample, or to ensure that the thread being sampled holds the Isolate
lock so prevents other threads from messing with the heap. While there
might be something to be said for the "stop all threads" approach, the
current approach in V8 is to only stop the sampled thread so, if in a
multi-threaded environment, the profiler must check if the thread being
sampled holds the Isolate lock.
Since this check must be done, independent of which thread the sample
is being collected on (since it varies from OS to OS), the approach is
to save the thread id of the thread to be profiled/sampled when the
CpuSampler is instantiated (on all OSes it is instantiated on the
sampled thread) and then check that thread id against the Isolate lock
holder thread id before collecting a sample. If it matches, we know
sample.cc has stop the sampled thread, one way or another, and we know
that no other thread can mess with the heap (since the stopped thread
holds the Isolate lock) so it's safe to walk the stack and collect data
from the heap so the sample can be taken. It it doesn't match, we can't
safely collect the sample so we don't.
Bug: v8:10850
Change-Id: Iba6cabcd3e11a19c261c004103e37e806934dc6f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2411343
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69952}
2020-09-16 12:00:30 +00:00
|
|
|
{
|
|
|
|
v8::Unlocker unlocker(isolate);
|
|
|
|
thread1.Join();
|
|
|
|
thread2.Join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-15 19:45:50 +00:00
|
|
|
// Tests that StopProfiling doesn't wait for the next sample tick in order to
|
|
|
|
// stop, but rather exits early before a given wait threshold.
|
|
|
|
TEST(FastStopProfiling) {
|
|
|
|
static const base::TimeDelta kLongInterval = base::TimeDelta::FromSeconds(10);
|
|
|
|
static const base::TimeDelta kWaitThreshold = base::TimeDelta::FromSeconds(5);
|
|
|
|
|
|
|
|
std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
|
|
|
|
profiler->set_sampling_interval(kLongInterval);
|
2019-05-22 00:06:41 +00:00
|
|
|
profiler->StartProfiling("", {kLeafNodeLineNumbers});
|
2019-02-15 19:45:50 +00:00
|
|
|
|
|
|
|
v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
|
|
|
|
double start = platform->CurrentClockTimeMillis();
|
|
|
|
profiler->StopProfiling("");
|
|
|
|
double duration = platform->CurrentClockTimeMillis() - start;
|
|
|
|
|
|
|
|
CHECK_LT(duration, kWaitThreshold.InMillisecondsF());
|
|
|
|
}
|
|
|
|
|
2020-10-26 18:12:10 +00:00
|
|
|
// Tests that when current_profiles->size() is greater than the max allowable
|
|
|
|
// number of concurrent profiles (100), we don't allow a new Profile to be
|
|
|
|
// profiled
|
|
|
|
TEST(MaxSimultaneousProfiles) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
v8::CpuProfiler* profiler = v8::CpuProfiler::New(env->GetIsolate());
|
|
|
|
|
|
|
|
// Spin up first profiler. Verify that status is kStarted
|
|
|
|
CpuProfilingStatus firstStatus = profiler->StartProfiling(
|
|
|
|
v8_str("1us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 1});
|
|
|
|
|
|
|
|
CHECK_EQ(firstStatus, CpuProfilingStatus::kStarted);
|
|
|
|
|
|
|
|
// Spin up profiler with same title. Verify that status is kAlreadyStarted
|
|
|
|
CpuProfilingStatus startedStatus = profiler->StartProfiling(
|
|
|
|
v8_str("1us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 1});
|
|
|
|
|
|
|
|
CHECK_EQ(startedStatus, CpuProfilingStatus::kAlreadyStarted);
|
|
|
|
|
|
|
|
// Spin up 99 more profilers, maxing out CpuProfilersCollection.
|
|
|
|
// Check they all return status of kStarted
|
|
|
|
for (int i = 2; i <= CpuProfilesCollection::kMaxSimultaneousProfiles; i++) {
|
|
|
|
CpuProfilingStatus status =
|
|
|
|
profiler->StartProfiling(v8_str((std::to_string(i) + "us").c_str()),
|
|
|
|
{v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, i});
|
|
|
|
CHECK_EQ(status, CpuProfilingStatus::kStarted);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Spin up 101st profiler. Verify status is kErrorTooManyProfilers
|
|
|
|
CpuProfilingStatus errorStatus = profiler->StartProfiling(
|
|
|
|
v8_str("101us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 2});
|
|
|
|
|
|
|
|
CHECK_EQ(errorStatus, CpuProfilingStatus::kErrorTooManyProfilers);
|
|
|
|
|
|
|
|
// Clean up, otherwise will show a crash.
|
|
|
|
for (int i = 1; i <= CpuProfilesCollection::kMaxSimultaneousProfiles + 1;
|
|
|
|
i++) {
|
|
|
|
profiler->StopProfiling(v8_str((std::to_string(i) + "us").c_str()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 07:25:55 +00:00
|
|
|
TEST(LowPrecisionSamplingStartStopInternal) {
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2019-03-14 07:25:55 +00:00
|
|
|
CpuProfilesCollection profiles(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver code_observer(isolate, storage);
|
2020-10-01 07:07:48 +00:00
|
|
|
Symbolizer symbolizer(code_observer.code_map());
|
2019-03-14 07:25:55 +00:00
|
|
|
std::unique_ptr<ProfilerEventsProcessor> processor(
|
2020-10-01 07:07:48 +00:00
|
|
|
new SamplingEventsProcessor(
|
|
|
|
isolate, &symbolizer, &code_observer, &profiles,
|
|
|
|
v8::base::TimeDelta::FromMicroseconds(100), false));
|
2019-07-29 13:09:02 +00:00
|
|
|
CHECK(processor->Start());
|
2019-03-14 07:25:55 +00:00
|
|
|
processor->StopSynchronously();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LowPrecisionSamplingStartStopPublic) {
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
|
|
|
|
cpu_profiler->SetUsePreciseSampling(false);
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("");
|
|
|
|
cpu_profiler->StartProfiling(profile_name, true);
|
|
|
|
cpu_profiler->StopProfiling(profile_name);
|
|
|
|
cpu_profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
2019-04-24 20:21:58 +00:00
|
|
|
const char* naming_test_source = R"(
|
|
|
|
(function testAssignmentPropertyNamedFunction() {
|
|
|
|
let object = {};
|
|
|
|
object.propNamed = function () {
|
|
|
|
CallCollectSample();
|
|
|
|
};
|
|
|
|
object.propNamed();
|
|
|
|
})();
|
|
|
|
)";
|
|
|
|
|
|
|
|
TEST(StandardNaming) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
|
|
|
|
|
|
|
|
v8::CpuProfiler* profiler =
|
|
|
|
v8::CpuProfiler::New(env->GetIsolate(), kStandardNaming);
|
|
|
|
|
|
|
|
const auto profile_name = v8_str("");
|
|
|
|
profiler->StartProfiling(profile_name);
|
|
|
|
CompileRun(naming_test_source);
|
|
|
|
auto* profile = profiler->StopProfiling(profile_name);
|
|
|
|
|
|
|
|
auto* root = profile->GetTopDownRoot();
|
|
|
|
auto* toplevel = FindChild(root, "");
|
|
|
|
DCHECK(toplevel);
|
|
|
|
|
|
|
|
auto* prop_assignment_named_test =
|
|
|
|
GetChild(env.local(), toplevel, "testAssignmentPropertyNamedFunction");
|
|
|
|
CHECK(FindChild(prop_assignment_named_test, ""));
|
|
|
|
|
|
|
|
profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(DebugNaming) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
|
|
|
|
|
|
|
|
v8::CpuProfiler* profiler =
|
|
|
|
v8::CpuProfiler::New(env->GetIsolate(), kDebugNaming);
|
|
|
|
|
|
|
|
const auto profile_name = v8_str("");
|
|
|
|
profiler->StartProfiling(profile_name);
|
|
|
|
CompileRun(naming_test_source);
|
|
|
|
auto* profile = profiler->StopProfiling(profile_name);
|
|
|
|
|
|
|
|
auto* root = profile->GetTopDownRoot();
|
|
|
|
auto* toplevel = FindChild(root, "");
|
|
|
|
DCHECK(toplevel);
|
|
|
|
|
|
|
|
auto* prop_assignment_named_test =
|
|
|
|
GetChild(env.local(), toplevel, "testAssignmentPropertyNamedFunction");
|
|
|
|
CHECK(FindChild(prop_assignment_named_test, "object.propNamed"));
|
|
|
|
|
|
|
|
profiler->Dispose();
|
|
|
|
}
|
|
|
|
|
2019-05-08 19:16:42 +00:00
|
|
|
TEST(SampleLimit) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
CompileRun(R"(
|
|
|
|
function start() {
|
|
|
|
let val = 1;
|
|
|
|
for (let i = 0; i < 10e3; i++) {
|
|
|
|
val = (val * 2) % 3;
|
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
|
|
|
|
// Take 100 samples of `start`, but set the max samples to 50.
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
v8::CpuProfile* profile =
|
2019-05-22 00:06:41 +00:00
|
|
|
helper.Run(function, nullptr, 0, 100, 0,
|
2019-05-08 19:16:42 +00:00
|
|
|
v8::CpuProfilingMode::kLeafNodeLineNumbers, 50);
|
|
|
|
|
|
|
|
CHECK_EQ(profile->GetSamplesCount(), 50);
|
|
|
|
}
|
|
|
|
|
2019-05-14 20:24:49 +00:00
|
|
|
// Tests that a CpuProfile instance subsamples from a stream of tick samples
|
|
|
|
// appropriately.
|
|
|
|
TEST(ProflilerSubsampling) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2019-05-14 20:24:49 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-07-18 23:18:18 +00:00
|
|
|
ProfilerEventsProcessor* processor =
|
2020-12-21 21:42:34 +00:00
|
|
|
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
|
2019-07-18 23:18:18 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(1),
|
|
|
|
/* use_precise_sampling */ true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2019-05-14 20:24:49 +00:00
|
|
|
|
|
|
|
// Create a new CpuProfile that wants samples at 8us.
|
2022-04-05 22:45:41 +00:00
|
|
|
CpuProfile profile(&profiler, 1, "",
|
2019-05-22 00:06:41 +00:00
|
|
|
{v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
2019-05-14 20:24:49 +00:00
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 8});
|
|
|
|
// Verify that the first sample is always included.
|
|
|
|
CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(10)));
|
|
|
|
|
|
|
|
// 4 2us samples should result in one 8us sample.
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
|
|
|
|
// Profiles should expect the source sample interval to change, in which case
|
|
|
|
// they should still take the first sample elapsed after their interval.
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
|
|
|
|
CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(4)));
|
|
|
|
|
|
|
|
// Aligned samples (at 8us) are always included.
|
|
|
|
CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(8)));
|
|
|
|
|
|
|
|
// Samples with a rate of 0 should always be included.
|
|
|
|
CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that the base sampling rate of a CpuProfilesCollection is dynamically
|
|
|
|
// chosen based on the GCD of its child profiles.
|
|
|
|
TEST(DynamicResampling) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2019-05-14 20:24:49 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-07-18 23:18:18 +00:00
|
|
|
ProfilerEventsProcessor* processor =
|
2020-12-21 21:42:34 +00:00
|
|
|
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
|
2019-07-18 23:18:18 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(1),
|
|
|
|
/* use_precise_sampling */ true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2019-05-14 20:24:49 +00:00
|
|
|
|
|
|
|
// Set a 1us base sampling rate, dividing all possible intervals.
|
|
|
|
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(1));
|
|
|
|
|
|
|
|
// Verify that the sampling interval with no started profilers is unset.
|
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
|
|
|
|
|
|
|
|
// Add a 10us profiler, verify that the base sampling interval is as high as
|
|
|
|
// possible (10us).
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_10us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("10us",
|
2019-05-22 00:06:41 +00:00
|
|
|
{v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
2022-04-05 22:45:41 +00:00
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 10})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(10));
|
|
|
|
|
|
|
|
// Add a 5us profiler, verify that the base sampling interval is as high as
|
|
|
|
// possible given a 10us and 5us profiler (5us).
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_5us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 5})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(5));
|
|
|
|
|
|
|
|
// Add a 3us profiler, verify that the base sampling interval is 1us (due to
|
|
|
|
// coprime intervals).
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_3us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("3us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 3})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(1));
|
|
|
|
|
|
|
|
// Remove the 5us profiler, verify that the sample interval stays at 1us.
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_5us);
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(1));
|
|
|
|
|
|
|
|
// Remove the 10us profiler, verify that the sample interval becomes 3us.
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_10us);
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(3));
|
|
|
|
|
|
|
|
// Remove the 3us profiler, verify that the sample interval becomes unset.
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_3us);
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensures that when a non-unit base sampling interval is set on the profiler,
|
|
|
|
// that the sampling rate gets snapped to the nearest multiple prior to GCD
|
|
|
|
// computation.
|
|
|
|
TEST(DynamicResamplingWithBaseInterval) {
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2021-06-15 22:45:03 +00:00
|
|
|
CodeEntryStorage storage;
|
2019-05-14 20:24:49 +00:00
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
2021-06-15 22:45:03 +00:00
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
2020-12-21 21:42:34 +00:00
|
|
|
Symbolizer* symbolizer = new Symbolizer(code_observer->code_map());
|
2019-07-18 23:18:18 +00:00
|
|
|
ProfilerEventsProcessor* processor =
|
2020-12-21 21:42:34 +00:00
|
|
|
new SamplingEventsProcessor(isolate, symbolizer, code_observer, profiles,
|
2019-07-18 23:18:18 +00:00
|
|
|
v8::base::TimeDelta::FromMicroseconds(1),
|
|
|
|
/* use_precise_sampling */ true);
|
2020-10-01 07:07:48 +00:00
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
|
2020-12-21 21:42:34 +00:00
|
|
|
symbolizer, processor, code_observer);
|
2019-05-14 20:24:49 +00:00
|
|
|
|
|
|
|
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(7));
|
|
|
|
|
|
|
|
// Verify that the sampling interval with no started profilers is unset.
|
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
|
|
|
|
|
|
|
|
// Add a profiler with an unset sampling interval, verify that the common
|
|
|
|
// sampling interval is equal to the base.
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId unset_id =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("unset", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(7));
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(unset_id);
|
2019-05-14 20:24:49 +00:00
|
|
|
|
|
|
|
// Adding a 8us sampling interval rounds to a 14us base interval.
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_8us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("8us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 8})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(14));
|
|
|
|
|
|
|
|
// Adding a 4us sampling interval should cause a lowering to a 7us interval.
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_4us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("4us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 4})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(7));
|
|
|
|
|
|
|
|
// Removing the 4us sampling interval should restore the 14us sampling
|
|
|
|
// interval.
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_4us);
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(14));
|
|
|
|
|
|
|
|
// Removing the 8us sampling interval should unset the common sampling
|
|
|
|
// interval.
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_8us);
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
|
|
|
|
|
|
|
|
// A sampling interval of 0us should enforce all profiles to have a sampling
|
|
|
|
// interval of 0us (the only multiple of 0).
|
|
|
|
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(0));
|
2022-04-05 22:45:41 +00:00
|
|
|
ProfilerId id_5us =
|
|
|
|
profiles
|
|
|
|
->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, 5})
|
|
|
|
.id;
|
2019-05-14 20:24:49 +00:00
|
|
|
CHECK_EQ(profiles->GetCommonSamplingInterval(),
|
|
|
|
base::TimeDelta::FromMicroseconds(0));
|
2022-04-05 22:45:41 +00:00
|
|
|
profiles->StopProfiling(id_5us);
|
2019-05-14 20:24:49 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 23:18:18 +00:00
|
|
|
// Tests that functions compiled after a started profiler is stopped are still
|
|
|
|
// visible when the profiler is started again. (https://crbug.com/v8/9151)
|
|
|
|
TEST(Bug9151StaleCodeEntries) {
|
|
|
|
LocalContext env;
|
|
|
|
v8::HandleScope scope(env->GetIsolate());
|
|
|
|
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
|
|
|
|
|
|
|
|
v8::CpuProfiler* profiler =
|
|
|
|
v8::CpuProfiler::New(env->GetIsolate(), kDebugNaming, kEagerLogging);
|
|
|
|
v8::Local<v8::String> profile_name = v8_str("");
|
|
|
|
|
|
|
|
// Warm up the profiler to create the initial code map.
|
|
|
|
profiler->StartProfiling(profile_name);
|
|
|
|
profiler->StopProfiling(profile_name);
|
|
|
|
|
|
|
|
// Log a function compilation (executed once to force a compilation).
|
|
|
|
CompileRun(R"(
|
|
|
|
function start() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
start();
|
|
|
|
)");
|
|
|
|
|
|
|
|
// Restart the profiler, and execute both the JS function and callback.
|
|
|
|
profiler->StartProfiling(profile_name, true);
|
|
|
|
CompileRun("start();");
|
|
|
|
v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
|
|
|
|
|
|
|
|
auto* root = profile->GetTopDownRoot();
|
|
|
|
auto* toplevel = GetChild(env.local(), root, "");
|
|
|
|
|
|
|
|
auto* start = FindChild(env.local(), toplevel, "start");
|
|
|
|
CHECK(start);
|
|
|
|
|
|
|
|
auto* callback = FindChild(env.local(), start, "CallCollectSample");
|
|
|
|
CHECK(callback);
|
|
|
|
}
|
|
|
|
|
2021-04-15 08:41:48 +00:00
|
|
|
// Tests that functions from other contexts aren't recorded when filtering for
|
|
|
|
// another context.
|
|
|
|
TEST(ContextIsolation) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
LocalContext execution_env;
|
|
|
|
i::HandleScope scope(CcTest::i_isolate());
|
|
|
|
|
|
|
|
// Install CollectSample callback for more deterministic sampling.
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
|
|
|
|
execution_env.local()->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(execution_env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
execution_env->Global()
|
|
|
|
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
ProfilerHelper helper(execution_env.local());
|
|
|
|
CompileRun(R"(
|
|
|
|
function optimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function unoptimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function start() {
|
|
|
|
// Test optimized functions
|
|
|
|
%PrepareFunctionForOptimization(optimized);
|
|
|
|
optimized();
|
|
|
|
optimized();
|
|
|
|
%OptimizeFunctionOnNextCall(optimized);
|
|
|
|
optimized();
|
|
|
|
|
|
|
|
// Test unoptimized functions
|
|
|
|
%NeverOptimizeFunction(unoptimized);
|
|
|
|
unoptimized();
|
|
|
|
|
|
|
|
// Test callback
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function =
|
|
|
|
GetFunction(execution_env.local(), "start");
|
|
|
|
|
|
|
|
v8::CpuProfile* same_context_profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
|
|
|
|
const v8::CpuProfileNode* root = same_context_profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* start_node = FindChild(root, "start");
|
|
|
|
CHECK(start_node);
|
|
|
|
const v8::CpuProfileNode* optimized_node = FindChild(start_node, "optimized");
|
|
|
|
CHECK(optimized_node);
|
|
|
|
const v8::CpuProfileNode* unoptimized_node =
|
|
|
|
FindChild(start_node, "unoptimized");
|
|
|
|
CHECK(unoptimized_node);
|
|
|
|
const v8::CpuProfileNode* callback_node =
|
|
|
|
FindChild(start_node, "CallCollectSample");
|
|
|
|
CHECK(callback_node);
|
|
|
|
|
|
|
|
{
|
|
|
|
LocalContext filter_env;
|
|
|
|
v8::CpuProfile* diff_context_profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, filter_env.local());
|
|
|
|
const v8::CpuProfileNode* diff_root =
|
|
|
|
diff_context_profile->GetTopDownRoot();
|
|
|
|
// Ensure that no children were recorded (including callbacks, builtins).
|
|
|
|
CHECK(!FindChild(diff_root, "start"));
|
2022-02-23 07:16:43 +00:00
|
|
|
|
|
|
|
CHECK_GT(diff_context_profile->GetSamplesCount(), 0);
|
|
|
|
for (int i = 0; i < diff_context_profile->GetSamplesCount(); i++) {
|
|
|
|
CHECK(diff_context_profile->GetSampleState(i) == StateTag::IDLE ||
|
|
|
|
// GC State do not have a context
|
|
|
|
diff_context_profile->GetSampleState(i) == StateTag::GC ||
|
|
|
|
// first frame and native code reports as external
|
|
|
|
diff_context_profile->GetSampleState(i) == StateTag::EXTERNAL);
|
|
|
|
}
|
2021-04-15 08:41:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-07 07:28:08 +00:00
|
|
|
void ValidateEmbedderState(v8::CpuProfile* profile,
|
|
|
|
EmbedderStateTag expected_tag) {
|
|
|
|
for (int i = 0; i < profile->GetSamplesCount(); i++) {
|
|
|
|
if (profile->GetSampleState(i) == StateTag::GC) {
|
|
|
|
// Samples captured during a GC do not have an EmbedderState
|
|
|
|
CHECK_EQ(profile->GetSampleEmbedderState(i), EmbedderStateTag::EMPTY);
|
|
|
|
} else {
|
|
|
|
CHECK_EQ(profile->GetSampleEmbedderState(i), expected_tag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that embedder states from other contexts aren't recorded
|
|
|
|
TEST(EmbedderContextIsolation) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
LocalContext execution_env;
|
|
|
|
i::HandleScope scope(CcTest::i_isolate());
|
|
|
|
|
|
|
|
v8::Isolate* isolate = execution_env.local()->GetIsolate();
|
|
|
|
|
|
|
|
// Install CollectSample callback for more deterministic sampling.
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate, CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(execution_env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
execution_env->Global()
|
|
|
|
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
v8::Local<v8::Context> diff_context = v8::Context::New(isolate);
|
|
|
|
{
|
|
|
|
CHECK_NULL(CcTest::i_isolate()->current_embedder_state());
|
|
|
|
// prepare other embedder state
|
|
|
|
EmbedderStateScope scope(isolate, diff_context, EmbedderStateTag::OTHER);
|
|
|
|
CHECK_EQ(CcTest::i_isolate()->current_embedder_state()->GetState(),
|
|
|
|
EmbedderStateTag::OTHER);
|
|
|
|
|
|
|
|
ProfilerHelper helper(execution_env.local());
|
|
|
|
CompileRun(R"(
|
|
|
|
function optimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function unoptimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function start() {
|
|
|
|
// Test optimized functions
|
|
|
|
%PrepareFunctionForOptimization(optimized);
|
|
|
|
optimized();
|
|
|
|
optimized();
|
|
|
|
%OptimizeFunctionOnNextCall(optimized);
|
|
|
|
optimized();
|
|
|
|
|
|
|
|
// Test unoptimized functions
|
|
|
|
%NeverOptimizeFunction(unoptimized);
|
|
|
|
unoptimized();
|
|
|
|
|
|
|
|
// Test callback
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function =
|
|
|
|
GetFunction(execution_env.local(), "start");
|
|
|
|
|
|
|
|
v8::CpuProfile* profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
|
|
|
|
ValidateEmbedderState(profile, EmbedderStateTag::EMPTY);
|
|
|
|
}
|
|
|
|
CHECK_NULL(CcTest::i_isolate()->current_embedder_state());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that embedder states from same context are recorded
|
|
|
|
TEST(EmbedderStatePropagate) {
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
LocalContext execution_env;
|
|
|
|
i::HandleScope scope(CcTest::i_isolate());
|
|
|
|
|
|
|
|
v8::Isolate* isolate = execution_env.local()->GetIsolate();
|
|
|
|
|
|
|
|
// Install CollectSample callback for more deterministic sampling.
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate, CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(execution_env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
execution_env->Global()
|
|
|
|
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
{
|
|
|
|
// prepare embedder state
|
|
|
|
EmbedderState embedderState(isolate, execution_env.local(),
|
|
|
|
EmbedderStateTag::OTHER);
|
|
|
|
CHECK_EQ(CcTest::i_isolate()->current_embedder_state(), &embedderState);
|
|
|
|
|
|
|
|
ProfilerHelper helper(execution_env.local());
|
|
|
|
CompileRun(R"(
|
|
|
|
function optimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function unoptimized() {
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
|
|
|
|
function start() {
|
|
|
|
// Test optimized functions
|
|
|
|
%PrepareFunctionForOptimization(optimized);
|
|
|
|
optimized();
|
|
|
|
optimized();
|
|
|
|
%OptimizeFunctionOnNextCall(optimized);
|
|
|
|
optimized();
|
|
|
|
|
|
|
|
// Test unoptimized functions
|
|
|
|
%NeverOptimizeFunction(unoptimized);
|
|
|
|
unoptimized();
|
|
|
|
|
|
|
|
// Test callback
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function =
|
|
|
|
GetFunction(execution_env.local(), "start");
|
|
|
|
|
|
|
|
v8::CpuProfile* profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
|
|
|
|
ValidateEmbedderState(profile, EmbedderStateTag::OTHER);
|
|
|
|
}
|
|
|
|
CHECK_NULL(CcTest::i_isolate()->current_embedder_state());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that embedder states from same context are recorded
|
|
|
|
// even after native context move
|
|
|
|
TEST(EmbedderStatePropagateNativeContextMove) {
|
|
|
|
// Reusing context addresses will cause this test to fail.
|
|
|
|
if (i::FLAG_gc_global || i::FLAG_stress_compaction ||
|
|
|
|
i::FLAG_stress_incremental_marking || i::FLAG_enable_third_party_heap) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
i::FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
LocalContext execution_env;
|
|
|
|
i::HandleScope scope(CcTest::i_isolate());
|
|
|
|
|
|
|
|
v8::Isolate* isolate = execution_env.local()->GetIsolate();
|
|
|
|
|
|
|
|
// Install CollectSample callback for more deterministic sampling.
|
|
|
|
v8::Local<v8::FunctionTemplate> func_template =
|
|
|
|
v8::FunctionTemplate::New(isolate, CallCollectSample);
|
|
|
|
v8::Local<v8::Function> func =
|
|
|
|
func_template->GetFunction(execution_env.local()).ToLocalChecked();
|
|
|
|
func->SetName(v8_str("CallCollectSample"));
|
|
|
|
execution_env->Global()
|
|
|
|
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
{
|
|
|
|
// prepare embedder state
|
|
|
|
EmbedderState embedderState(isolate, execution_env.local(),
|
|
|
|
EmbedderStateTag::OTHER);
|
|
|
|
CHECK_EQ(CcTest::i_isolate()->current_embedder_state(), &embedderState);
|
|
|
|
|
|
|
|
i::Address initial_address =
|
|
|
|
CcTest::i_isolate()->current_embedder_state()->native_context_address();
|
|
|
|
|
|
|
|
// Install a function that triggers the native context to be moved.
|
|
|
|
v8::Local<v8::FunctionTemplate> move_func_template =
|
|
|
|
v8::FunctionTemplate::New(
|
|
|
|
execution_env.local()->GetIsolate(),
|
|
|
|
[](const v8::FunctionCallbackInfo<v8::Value>& info) {
|
|
|
|
i::Isolate* isolate =
|
|
|
|
reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
|
|
|
i::heap::ForceEvacuationCandidate(
|
|
|
|
i::Page::FromHeapObject(isolate->raw_native_context()));
|
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
});
|
|
|
|
v8::Local<v8::Function> move_func =
|
|
|
|
move_func_template->GetFunction(execution_env.local()).ToLocalChecked();
|
|
|
|
move_func->SetName(v8_str("ForceNativeContextMove"));
|
|
|
|
execution_env->Global()
|
|
|
|
->Set(execution_env.local(), v8_str("ForceNativeContextMove"),
|
|
|
|
move_func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
ProfilerHelper helper(execution_env.local());
|
|
|
|
CompileRun(R"(
|
|
|
|
function start() {
|
|
|
|
ForceNativeContextMove();
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function =
|
|
|
|
GetFunction(execution_env.local(), "start");
|
|
|
|
|
|
|
|
v8::CpuProfile* profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
|
|
|
|
ValidateEmbedderState(profile, EmbedderStateTag::OTHER);
|
|
|
|
|
|
|
|
i::Address new_address =
|
|
|
|
CcTest::i_isolate()->current_embedder_state()->native_context_address();
|
|
|
|
CHECK_NE(initial_address, new_address);
|
|
|
|
}
|
|
|
|
CHECK_NULL(CcTest::i_isolate()->current_embedder_state());
|
|
|
|
}
|
|
|
|
|
2021-04-15 08:41:48 +00:00
|
|
|
// Tests that when a native context that's being filtered is moved, we continue
|
|
|
|
// to track its execution.
|
|
|
|
TEST(ContextFilterMovedNativeContext) {
|
2021-05-06 08:49:22 +00:00
|
|
|
if (i::FLAG_enable_third_party_heap) return;
|
2021-04-15 08:41:48 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
i::FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
LocalContext env;
|
|
|
|
i::HandleScope scope(CcTest::i_isolate());
|
|
|
|
|
|
|
|
{
|
|
|
|
// Install CollectSample callback for more deterministic sampling.
|
|
|
|
v8::Local<v8::FunctionTemplate> sample_func_template =
|
|
|
|
v8::FunctionTemplate::New(env.local()->GetIsolate(), CallCollectSample);
|
|
|
|
v8::Local<v8::Function> sample_func =
|
|
|
|
sample_func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
sample_func->SetName(v8_str("CallCollectSample"));
|
|
|
|
env->Global()
|
|
|
|
->Set(env.local(), v8_str("CallCollectSample"), sample_func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
// Install a function that triggers the native context to be moved.
|
|
|
|
v8::Local<v8::FunctionTemplate> move_func_template =
|
|
|
|
v8::FunctionTemplate::New(
|
|
|
|
env.local()->GetIsolate(),
|
|
|
|
[](const v8::FunctionCallbackInfo<v8::Value>& info) {
|
|
|
|
i::Isolate* isolate =
|
|
|
|
reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
|
|
|
i::heap::ForceEvacuationCandidate(
|
|
|
|
i::Page::FromHeapObject(isolate->raw_native_context()));
|
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
});
|
|
|
|
v8::Local<v8::Function> move_func =
|
|
|
|
move_func_template->GetFunction(env.local()).ToLocalChecked();
|
|
|
|
move_func->SetName(v8_str("ForceNativeContextMove"));
|
|
|
|
env->Global()
|
|
|
|
->Set(env.local(), v8_str("ForceNativeContextMove"), move_func)
|
|
|
|
.FromJust();
|
|
|
|
|
|
|
|
ProfilerHelper helper(env.local());
|
|
|
|
CompileRun(R"(
|
|
|
|
function start() {
|
|
|
|
ForceNativeContextMove();
|
|
|
|
CallCollectSample();
|
|
|
|
}
|
|
|
|
)");
|
|
|
|
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
|
|
|
|
|
|
|
|
v8::CpuProfile* profile = helper.Run(
|
|
|
|
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
|
|
|
|
v8::CpuProfilingOptions::kNoSampleLimit, env.local());
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* start_node = FindChild(root, "start");
|
|
|
|
CHECK(start_node);
|
|
|
|
|
|
|
|
// Verify that after moving the native context, CallCollectSample is still
|
|
|
|
// recorded.
|
|
|
|
const v8::CpuProfileNode* callback_node =
|
|
|
|
FindChild(start_node, "CallCollectSample");
|
|
|
|
CHECK(callback_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-19 09:24:16 +00:00
|
|
|
enum class EntryCountMode { kAll, kOnlyInlined };
|
|
|
|
|
|
|
|
// Count the number of unique source positions.
|
|
|
|
int GetSourcePositionEntryCount(i::Isolate* isolate, const char* source,
|
|
|
|
EntryCountMode mode = EntryCountMode::kAll) {
|
|
|
|
std::unordered_set<int64_t> raw_position_set;
|
2018-11-09 10:52:50 +00:00
|
|
|
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(
|
|
|
|
v8::Utils::OpenHandle(*CompileRun(source)));
|
2020-08-11 09:24:47 +00:00
|
|
|
if (function->ActiveTierIsIgnition()) return -1;
|
2021-12-13 14:41:58 +00:00
|
|
|
i::Handle<i::Code> code(i::FromCodeT(function->code()), isolate);
|
2018-11-09 10:52:50 +00:00
|
|
|
i::SourcePositionTableIterator iterator(
|
|
|
|
ByteArray::cast(code->source_position_table()));
|
2019-02-19 09:24:16 +00:00
|
|
|
|
2018-11-09 10:52:50 +00:00
|
|
|
while (!iterator.done()) {
|
2019-02-19 09:24:16 +00:00
|
|
|
if (mode == EntryCountMode::kAll ||
|
|
|
|
iterator.source_position().isInlined()) {
|
|
|
|
raw_position_set.insert(iterator.source_position().raw());
|
|
|
|
}
|
2018-11-09 10:52:50 +00:00
|
|
|
iterator.Advance();
|
|
|
|
}
|
2019-02-19 09:24:16 +00:00
|
|
|
return static_cast<int>(raw_position_set.size());
|
2018-11-09 10:52:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(DetailedSourcePositionAPI) {
|
|
|
|
i::FLAG_detailed_line_info = false;
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
|
|
|
|
const char* source =
|
|
|
|
"function fib(i) {"
|
|
|
|
" if (i <= 1) return 1; "
|
|
|
|
" return fib(i - 1) +"
|
|
|
|
" fib(i - 2);"
|
|
|
|
"}"
|
2019-04-30 11:04:41 +00:00
|
|
|
"%PrepareFunctionForOptimization(fib);\n"
|
2018-11-09 10:52:50 +00:00
|
|
|
"fib(5);"
|
|
|
|
"%OptimizeFunctionOnNextCall(fib);"
|
|
|
|
"fib(5);"
|
|
|
|
"fib";
|
|
|
|
{
|
|
|
|
v8::Isolate::Scope isolate_scope(isolate);
|
|
|
|
v8::HandleScope handle_scope(isolate);
|
|
|
|
v8::Local<v8::Context> context = v8::Context::New(isolate);
|
|
|
|
v8::Context::Scope context_scope(context);
|
|
|
|
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
|
|
|
|
|
|
|
CHECK(!i_isolate->NeedsDetailedOptimizedCodeLineInfo());
|
|
|
|
|
|
|
|
int non_detailed_positions = GetSourcePositionEntryCount(i_isolate, source);
|
|
|
|
|
|
|
|
v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
|
|
|
|
CHECK(i_isolate->NeedsDetailedOptimizedCodeLineInfo());
|
|
|
|
|
|
|
|
int detailed_positions = GetSourcePositionEntryCount(i_isolate, source);
|
|
|
|
|
|
|
|
CHECK((non_detailed_positions == -1 && detailed_positions == -1) ||
|
|
|
|
non_detailed_positions < detailed_positions);
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2019-02-19 09:24:16 +00:00
|
|
|
UNINITIALIZED_TEST(DetailedSourcePositionAPI_Inlining) {
|
|
|
|
i::FLAG_detailed_line_info = false;
|
2019-02-20 11:45:50 +00:00
|
|
|
i::FLAG_turbo_inlining = true;
|
2019-02-19 09:24:16 +00:00
|
|
|
i::FLAG_stress_inline = true;
|
2022-04-28 14:22:23 +00:00
|
|
|
i::FLAG_always_turbofan = false;
|
2019-02-19 09:24:16 +00:00
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
|
|
|
|
const char* source = R"(
|
|
|
|
function foo(x) {
|
|
|
|
return bar(x) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
function bar(x) {
|
|
|
|
var y = 1;
|
|
|
|
for (var i = 0; i < x; ++i) {
|
|
|
|
y = y * x;
|
|
|
|
}
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2019-05-08 15:54:26 +00:00
|
|
|
%EnsureFeedbackVectorForFunction(bar);
|
2019-04-30 11:04:41 +00:00
|
|
|
%PrepareFunctionForOptimization(foo);
|
2019-02-19 09:24:16 +00:00
|
|
|
foo(5);
|
|
|
|
%OptimizeFunctionOnNextCall(foo);
|
|
|
|
foo(5);
|
|
|
|
foo;
|
|
|
|
)";
|
|
|
|
|
|
|
|
{
|
|
|
|
v8::Isolate::Scope isolate_scope(isolate);
|
|
|
|
v8::HandleScope handle_scope(isolate);
|
|
|
|
v8::Local<v8::Context> context = v8::Context::New(isolate);
|
|
|
|
v8::Context::Scope context_scope(context);
|
|
|
|
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
|
|
|
|
|
|
|
CHECK(!i_isolate->NeedsDetailedOptimizedCodeLineInfo());
|
|
|
|
|
|
|
|
int non_detailed_positions =
|
|
|
|
GetSourcePositionEntryCount(i_isolate, source, EntryCountMode::kAll);
|
|
|
|
int non_detailed_inlined_positions = GetSourcePositionEntryCount(
|
|
|
|
i_isolate, source, EntryCountMode::kOnlyInlined);
|
|
|
|
|
|
|
|
v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
|
|
|
|
CHECK(i_isolate->NeedsDetailedOptimizedCodeLineInfo());
|
|
|
|
|
|
|
|
int detailed_positions =
|
|
|
|
GetSourcePositionEntryCount(i_isolate, source, EntryCountMode::kAll);
|
|
|
|
int detailed_inlined_positions = GetSourcePositionEntryCount(
|
|
|
|
i_isolate, source, EntryCountMode::kOnlyInlined);
|
|
|
|
|
|
|
|
if (non_detailed_positions == -1) {
|
|
|
|
CHECK_EQ(non_detailed_positions, detailed_positions);
|
|
|
|
} else {
|
|
|
|
CHECK_LT(non_detailed_positions, detailed_positions);
|
|
|
|
CHECK_LT(non_detailed_inlined_positions, detailed_inlined_positions);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2020-11-18 11:19:56 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct FastApiReceiver {
|
2021-05-12 08:54:52 +00:00
|
|
|
static void FastCallback(v8::Local<v8::Object> receiver, int argument,
|
2021-02-10 08:35:18 +00:00
|
|
|
v8::FastApiCallbackOptions& options) {
|
|
|
|
// TODO(mslekova): The fallback is not used by the test. Replace this
|
|
|
|
// with a CHECK.
|
2021-05-12 08:54:52 +00:00
|
|
|
if (!IsValidUnwrapObject(*receiver)) {
|
2022-01-23 20:59:53 +00:00
|
|
|
options.fallback = true;
|
2020-11-18 11:19:56 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
FastApiReceiver* receiver_ptr =
|
2021-05-12 08:54:52 +00:00
|
|
|
GetInternalField<FastApiReceiver>(*receiver);
|
2020-11-18 11:19:56 +00:00
|
|
|
|
|
|
|
receiver_ptr->result_ |= ApiCheckerResult::kFastCalled;
|
|
|
|
|
|
|
|
// Artificially slow down the callback with a predictable amount of time.
|
|
|
|
// This ensures the test has a relatively stable run time on various
|
|
|
|
// platforms and protects it from flakyness.
|
|
|
|
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void SlowCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
|
|
|
|
v8::Object* receiver_obj = v8::Object::Cast(*info.Holder());
|
|
|
|
if (!IsValidUnwrapObject(receiver_obj)) {
|
2021-04-15 08:10:06 +00:00
|
|
|
info.GetIsolate()->ThrowError("Called with a non-object.");
|
2020-11-18 11:19:56 +00:00
|
|
|
return;
|
|
|
|
}
|
2021-04-20 08:48:41 +00:00
|
|
|
FastApiReceiver* receiver = GetInternalField<FastApiReceiver>(receiver_obj);
|
2020-11-18 11:19:56 +00:00
|
|
|
|
|
|
|
receiver->result_ |= ApiCheckerResult::kSlowCalled;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DidCallFast() const { return (result_ & ApiCheckerResult::kFastCalled); }
|
|
|
|
bool DidCallSlow() const { return (result_ & ApiCheckerResult::kSlowCalled); }
|
|
|
|
|
|
|
|
ApiCheckerResultFlags result_ = ApiCheckerResult::kNotCalled;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
v8::Local<v8::Function> CreateApiCode(LocalContext* env) {
|
|
|
|
const char* foo_name = "foo";
|
|
|
|
const char* script =
|
|
|
|
"function foo(arg) {"
|
|
|
|
" for (let i = 0; i < arg; ++i) { receiver.api_func(i); }"
|
|
|
|
"}"
|
|
|
|
"%PrepareFunctionForOptimization(foo);"
|
|
|
|
"foo(42); foo(42);"
|
|
|
|
"%OptimizeFunctionOnNextCall(foo);";
|
|
|
|
CompileRun(script);
|
|
|
|
|
|
|
|
return GetFunction(env->local(), foo_name);
|
|
|
|
}
|
|
|
|
|
2022-04-05 22:45:41 +00:00
|
|
|
TEST(CanStartStopProfilerWithTitlesAndIds) {
|
|
|
|
TestSetup test_setup;
|
|
|
|
LocalContext env;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging);
|
|
|
|
ProfilerId anonymous_id_1 = profiler.StartProfiling().id;
|
|
|
|
ProfilerId title_id = profiler.StartProfiling("title").id;
|
|
|
|
ProfilerId anonymous_id_2 = profiler.StartProfiling().id;
|
|
|
|
|
|
|
|
CHECK_NE(anonymous_id_1, title_id);
|
|
|
|
CHECK_NE(anonymous_id_1, anonymous_id_2);
|
|
|
|
CHECK_NE(anonymous_id_2, title_id);
|
|
|
|
|
|
|
|
CpuProfile* profile_with_title = profiler.StopProfiling("title");
|
|
|
|
CHECK(profile_with_title);
|
|
|
|
CHECK_EQ(title_id, profile_with_title->id());
|
|
|
|
|
|
|
|
CpuProfile* profile_with_id = profiler.StopProfiling(anonymous_id_1);
|
|
|
|
CHECK(profile_with_id);
|
|
|
|
CHECK_EQ(anonymous_id_1, profile_with_id->id());
|
|
|
|
|
|
|
|
CpuProfile* profile_with_id_2 = profiler.StopProfiling(anonymous_id_2);
|
|
|
|
CHECK(profile_with_id_2);
|
|
|
|
CHECK_EQ(anonymous_id_2, profile_with_id_2->id());
|
|
|
|
}
|
|
|
|
|
2020-11-18 11:19:56 +00:00
|
|
|
TEST(FastApiCPUProfiler) {
|
|
|
|
#if !defined(V8_LITE_MODE) && !defined(USE_SIMULATOR)
|
2020-11-19 13:05:55 +00:00
|
|
|
// None of the following configurations include JSCallReducer.
|
2020-11-18 11:19:56 +00:00
|
|
|
if (i::FLAG_jitless) return;
|
|
|
|
|
2022-04-28 14:22:23 +00:00
|
|
|
FLAG_SCOPE(turbofan);
|
2021-06-30 14:17:08 +00:00
|
|
|
FLAG_SCOPE(turbo_fast_api_calls);
|
|
|
|
FLAG_SCOPE(allow_natives_syntax);
|
2022-04-28 14:22:23 +00:00
|
|
|
// Disable --always_turbofan, otherwise we haven't generated the necessary
|
2020-11-18 11:19:56 +00:00
|
|
|
// feedback to go down the "best optimization" path for the fast call.
|
2022-04-28 14:22:23 +00:00
|
|
|
FLAG_VALUE_SCOPE(always_turbofan, false);
|
2021-06-30 14:17:08 +00:00
|
|
|
FLAG_VALUE_SCOPE(prof_browser_mode, false);
|
2020-11-18 11:19:56 +00:00
|
|
|
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
LocalContext env;
|
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
|
|
|
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
|
|
|
i_isolate->set_embedder_wrapper_type_index(kV8WrapperTypeIndex);
|
|
|
|
i_isolate->set_embedder_wrapper_object_index(kV8WrapperObjectIndex);
|
|
|
|
|
|
|
|
i::HandleScope scope(i_isolate);
|
|
|
|
|
|
|
|
// Setup the fast call.
|
|
|
|
FastApiReceiver receiver;
|
|
|
|
|
|
|
|
v8::TryCatch try_catch(isolate);
|
|
|
|
|
2021-01-20 17:45:29 +00:00
|
|
|
v8::CFunction c_func = v8::CFunction::Make(FastApiReceiver::FastCallback);
|
2020-11-18 11:19:56 +00:00
|
|
|
|
|
|
|
Local<v8::FunctionTemplate> receiver_templ = v8::FunctionTemplate::New(
|
|
|
|
isolate, FastApiReceiver::SlowCallback, v8::Local<v8::Value>(),
|
2021-12-03 15:15:28 +00:00
|
|
|
v8::Local<v8::Signature>(), 1, v8::ConstructorBehavior::kThrow,
|
2020-11-18 11:19:56 +00:00
|
|
|
v8::SideEffectType::kHasSideEffect, &c_func);
|
|
|
|
|
|
|
|
v8::Local<v8::ObjectTemplate> object_template =
|
|
|
|
v8::ObjectTemplate::New(isolate);
|
|
|
|
object_template->SetInternalFieldCount(kV8WrapperObjectIndex + 1);
|
|
|
|
const char* api_func_str = "api_func";
|
|
|
|
object_template->Set(isolate, api_func_str, receiver_templ);
|
|
|
|
|
|
|
|
v8::Local<v8::Object> object =
|
|
|
|
object_template->NewInstance(env.local()).ToLocalChecked();
|
|
|
|
object->SetAlignedPointerInInternalField(kV8WrapperObjectIndex,
|
|
|
|
reinterpret_cast<void*>(&receiver));
|
|
|
|
|
|
|
|
int num_runs_arg = 100;
|
|
|
|
env->Global()->Set(env.local(), v8_str("receiver"), object).Check();
|
|
|
|
|
|
|
|
// Prepare the code.
|
|
|
|
v8::Local<v8::Function> function = CreateApiCode(&env);
|
|
|
|
|
|
|
|
// Setup and start CPU profiler.
|
|
|
|
v8::Local<v8::Value> args[] = {
|
|
|
|
v8::Integer::New(env->GetIsolate(), num_runs_arg)};
|
2021-07-12 21:32:54 +00:00
|
|
|
ProfilerHelper helper(env.local(), kEagerLogging);
|
2020-11-18 11:19:56 +00:00
|
|
|
// TODO(mslekova): We could tweak the following count to reduce test
|
|
|
|
// runtime, while still keeping the test stable.
|
|
|
|
unsigned external_samples = 1000;
|
|
|
|
v8::CpuProfile* profile =
|
|
|
|
helper.Run(function, args, arraysize(args), 0, external_samples);
|
|
|
|
|
|
|
|
// Check if the fast and slow callbacks got executed.
|
|
|
|
CHECK(receiver.DidCallFast());
|
|
|
|
CHECK(receiver.DidCallSlow());
|
|
|
|
CHECK(!try_catch.HasCaught());
|
|
|
|
|
|
|
|
// Check that generated profile has the expected structure.
|
|
|
|
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
|
|
|
|
const v8::CpuProfileNode* foo_node = GetChild(env.local(), root, "foo");
|
|
|
|
const v8::CpuProfileNode* api_func_node =
|
|
|
|
GetChild(env.local(), foo_node, api_func_str);
|
|
|
|
CHECK_NOT_NULL(api_func_node);
|
|
|
|
CHECK_EQ(api_func_node->GetSourceType(), CpuProfileNode::kCallback);
|
|
|
|
|
|
|
|
// Check that the CodeEntry is the expected one, i.e. the fast callback.
|
|
|
|
CodeEntry* code_entry =
|
|
|
|
reinterpret_cast<const ProfileNode*>(api_func_node)->entry();
|
|
|
|
CodeMap* code_map = reinterpret_cast<CpuProfile*>(profile)
|
|
|
|
->cpu_profiler()
|
|
|
|
->code_map_for_test();
|
|
|
|
CodeEntry* expected_code_entry =
|
|
|
|
code_map->FindEntry(reinterpret_cast<Address>(c_func.GetAddress()));
|
|
|
|
CHECK_EQ(code_entry, expected_code_entry);
|
|
|
|
|
|
|
|
int foo_ticks = foo_node->GetHitCount();
|
|
|
|
int api_func_ticks = api_func_node->GetHitCount();
|
|
|
|
// Check that at least 80% of the samples in foo hit the fast callback.
|
|
|
|
CHECK_LE(foo_ticks, api_func_ticks * 0.2);
|
|
|
|
// The following constant in the CHECK is because above we expect at least
|
|
|
|
// 1000 samples with EXTERNAL type (see external_samples). Since the only
|
|
|
|
// thing that generates those kind of samples is the fast callback, then
|
|
|
|
// we're supposed to have close to 1000 ticks in its node. Since the CPU
|
|
|
|
// profiler is nondeterministic, we've allowed for some slack, otherwise
|
|
|
|
// this could be 1000 instead of 800.
|
|
|
|
CHECK_GE(api_func_ticks, 800);
|
|
|
|
|
|
|
|
profile->Delete();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-02-12 01:30:28 +00:00
|
|
|
TEST(BytecodeFlushEventsEagerLogging) {
|
|
|
|
#ifndef V8_LITE_MODE
|
2022-04-28 14:22:23 +00:00
|
|
|
FLAG_turbofan = false;
|
|
|
|
FLAG_always_turbofan = false;
|
2021-02-12 01:30:28 +00:00
|
|
|
i::FLAG_optimize_for_size = false;
|
|
|
|
#endif // V8_LITE_MODE
|
2021-03-25 13:24:58 +00:00
|
|
|
#if ENABLE_SPARKPLUG
|
|
|
|
FLAG_always_sparkplug = false;
|
|
|
|
#endif // ENABLE_SPARKPLUG
|
2021-02-12 01:30:28 +00:00
|
|
|
i::FLAG_flush_bytecode = true;
|
|
|
|
i::FLAG_allow_natives_syntax = true;
|
|
|
|
|
|
|
|
TestSetup test_setup;
|
|
|
|
ManualGCScope manual_gc_scope;
|
|
|
|
|
|
|
|
CcTest::InitializeVM();
|
|
|
|
v8::Isolate* isolate = CcTest::isolate();
|
|
|
|
Isolate* i_isolate = CcTest::i_isolate();
|
|
|
|
Factory* factory = i_isolate->factory();
|
|
|
|
|
|
|
|
CpuProfiler profiler(i_isolate, kDebugNaming, kEagerLogging);
|
|
|
|
CodeMap* code_map = profiler.code_map_for_test();
|
|
|
|
|
|
|
|
{
|
|
|
|
v8::HandleScope scope(isolate);
|
|
|
|
v8::Context::New(isolate)->Enter();
|
|
|
|
const char* source =
|
|
|
|
"function foo() {"
|
|
|
|
" var x = 42;"
|
|
|
|
" var y = 42;"
|
|
|
|
" var z = x + y;"
|
|
|
|
"};"
|
|
|
|
"foo()";
|
|
|
|
Handle<String> foo_name = factory->InternalizeUtf8String("foo");
|
|
|
|
|
|
|
|
// This compile will add the code to the compilation cache.
|
|
|
|
{
|
2021-09-27 08:19:16 +00:00
|
|
|
v8::HandleScope inner_scope(isolate);
|
2021-02-12 01:30:28 +00:00
|
|
|
CompileRun(source);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check function is compiled.
|
|
|
|
Handle<Object> func_value =
|
|
|
|
Object::GetProperty(i_isolate, i_isolate->global_object(), foo_name)
|
|
|
|
.ToHandleChecked();
|
|
|
|
CHECK(func_value->IsJSFunction());
|
|
|
|
Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
|
|
|
|
CHECK(function->shared().is_compiled());
|
|
|
|
|
|
|
|
i::BytecodeArray compiled_data =
|
|
|
|
function->shared().GetBytecodeArray(i_isolate);
|
|
|
|
i::Address bytecode_start = compiled_data.GetFirstBytecodeAddress();
|
|
|
|
|
|
|
|
CHECK(code_map->FindEntry(bytecode_start));
|
|
|
|
|
|
|
|
// The code will survive at least two GCs.
|
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
CHECK(function->shared().is_compiled());
|
|
|
|
|
|
|
|
// Simulate several GCs that use full marking.
|
|
|
|
const int kAgingThreshold = 6;
|
|
|
|
for (int i = 0; i < kAgingThreshold; i++) {
|
|
|
|
CcTest::CollectAllGarbage();
|
|
|
|
}
|
|
|
|
|
|
|
|
// foo should no longer be in the compilation cache
|
|
|
|
CHECK(!function->shared().is_compiled());
|
|
|
|
CHECK(!function->is_compiled());
|
|
|
|
|
|
|
|
CHECK(!code_map->FindEntry(bytecode_start));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-19 19:18:18 +00:00
|
|
|
// Ensure that unused code entries are removed after GC with eager logging.
|
|
|
|
TEST(ClearUnusedWithEagerLogging) {
|
|
|
|
ManualGCScope manual_gc;
|
|
|
|
TestSetup test_setup;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
2021-08-19 17:29:55 +00:00
|
|
|
CodeEntryStorage storage;
|
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
|
|
|
ProfilerCodeObserver* code_observer =
|
|
|
|
new ProfilerCodeObserver(isolate, storage);
|
|
|
|
|
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging, profiles, nullptr,
|
|
|
|
nullptr, code_observer);
|
2021-03-19 19:18:18 +00:00
|
|
|
|
|
|
|
CodeMap* code_map = profiler.code_map_for_test();
|
|
|
|
size_t initial_size = code_map->size();
|
2021-11-01 20:30:38 +00:00
|
|
|
size_t profiler_size = profiler.GetEstimatedMemoryUsage();
|
2021-03-19 19:18:18 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Create and run a new script and function, generating 2 code objects.
|
2021-03-25 13:23:20 +00:00
|
|
|
// Do this in a new context, so that some_func isn't retained by the
|
|
|
|
// context's global object past this scope.
|
2021-03-19 19:18:18 +00:00
|
|
|
i::HandleScope inner_scope(isolate);
|
2021-03-25 13:23:20 +00:00
|
|
|
LocalContext env;
|
2021-03-19 19:18:18 +00:00
|
|
|
CompileRun(
|
|
|
|
"function some_func() {}"
|
|
|
|
"some_func();");
|
|
|
|
CHECK_GT(code_map->size(), initial_size);
|
2021-11-01 20:30:38 +00:00
|
|
|
CHECK_GT(profiler.GetEstimatedMemoryUsage(), profiler_size);
|
|
|
|
CHECK_GT(profiler.GetAllProfilersMemorySize(isolate), profiler_size);
|
2021-03-19 19:18:18 +00:00
|
|
|
}
|
|
|
|
|
2021-03-25 13:23:20 +00:00
|
|
|
// Clear the compilation cache so that there are no more references to the
|
|
|
|
// given two functions.
|
|
|
|
isolate->compilation_cache()->Clear();
|
|
|
|
|
|
|
|
CcTest::CollectAllGarbage();
|
2021-03-19 19:18:18 +00:00
|
|
|
|
|
|
|
// Verify that the CodeMap's size is unchanged post-GC.
|
|
|
|
CHECK_EQ(code_map->size(), initial_size);
|
2021-11-01 20:30:38 +00:00
|
|
|
CHECK_EQ(profiler.GetEstimatedMemoryUsage(), profiler_size);
|
|
|
|
CHECK_EQ(profiler.GetAllProfilersMemorySize(isolate), profiler_size);
|
2021-08-19 17:29:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that ProfilerCodeObserver doesn't compute estimated size when race
|
|
|
|
// condition potential
|
|
|
|
TEST(SkipEstimatedSizeWhenActiveProfiling) {
|
|
|
|
ManualGCScope manual_gc;
|
|
|
|
TestSetup test_setup;
|
|
|
|
i::Isolate* isolate = CcTest::i_isolate();
|
|
|
|
i::HandleScope scope(isolate);
|
|
|
|
|
|
|
|
CodeEntryStorage storage;
|
|
|
|
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
|
|
|
|
CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging, profiles, nullptr,
|
2021-11-01 20:30:38 +00:00
|
|
|
nullptr, new ProfilerCodeObserver(isolate, storage));
|
2021-08-19 17:29:55 +00:00
|
|
|
|
2021-11-01 20:30:38 +00:00
|
|
|
CHECK_GT(profiler.GetAllProfilersMemorySize(isolate), 0);
|
|
|
|
CHECK_GT(profiler.GetEstimatedMemoryUsage(), 0);
|
2021-08-19 17:29:55 +00:00
|
|
|
|
|
|
|
profiler.StartProfiling("");
|
2021-11-01 20:30:38 +00:00
|
|
|
CHECK_EQ(profiler.GetAllProfilersMemorySize(isolate), 0);
|
|
|
|
CHECK_EQ(profiler.GetEstimatedMemoryUsage(), 0);
|
2021-08-19 17:29:55 +00:00
|
|
|
|
|
|
|
profiler.StopProfiling("");
|
|
|
|
|
2021-11-01 20:30:38 +00:00
|
|
|
CHECK_GT(profiler.GetAllProfilersMemorySize(isolate), 0);
|
|
|
|
CHECK_GT(profiler.GetEstimatedMemoryUsage(), 0);
|
2021-03-19 19:18:18 +00:00
|
|
|
}
|
|
|
|
|
2017-09-30 13:19:52 +00:00
|
|
|
} // namespace test_cpu_profiler
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|