v8/test/cctest/test-code-pages.cc
Leszek Swirski 787bec0964 [sparkplug] Remove BaselineData, use Code directly
Remove the BaselineData intermediate structure for baseline code, and
write the baseline Code object into the SharedFunctionInfo directly. We
still need a pointer to the BytecodeArray/InterpreterData, so re-use the
Code object's deoptimization data slot for this (baseline code doesn't
have deoptimization data).

A consequence of this is that the BytecodeArray pointer becomes
immutable when there is baseline code. This means that we cannot install
a debug BytecodeArray while baseline code is active (we have to flush it
first), and we can't tier-up code with debug BytecodeArray to baseline.

Change-Id: I53b93ec4d4c64b833603d7992f246982fcd97596
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3118548
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76675}
2021-09-06 11:52:21 +00:00

564 lines
19 KiB
C++

// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/code-desc.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/spaces.h"
#include "src/libsampler/sampler.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace test_code_pages {
// We have three levels of support which have different behaviors to test.
// 1 - Have code range. ARM64 and x64
// 2 - Have code pages. ARM32 only
// 3 - Nothing - This feature does not work on other platforms.
#if defined(V8_TARGET_ARCH_ARM)
static const bool kHaveCodePages = true;
#else
static const bool kHaveCodePages = false;
#endif // defined(V8_TARGET_ARCH_ARM)
static const char* foo_source = R"(
function foo%d(a, b) {
let x = a * b;
let y = x ^ b;
let z = y / a;
return x + y - z;
};
%%PrepareFunctionForOptimization(foo%d);
foo%d(1, 2);
foo%d(1, 2);
%%OptimizeFunctionOnNextCall(foo%d);
foo%d(1, 2);
)";
std::string getFooCode(int n) {
constexpr size_t kMaxSize = 512;
char foo_replaced[kMaxSize];
CHECK_LE(n, 999999);
snprintf(foo_replaced, kMaxSize, foo_source, n, n, n, n, n, n);
return std::string(foo_replaced);
}
namespace {
bool PagesHasExactPage(std::vector<MemoryRange>* pages, Address search_page) {
void* addr = reinterpret_cast<void*>(search_page);
auto it =
std::find_if(pages->begin(), pages->end(),
[addr](const MemoryRange& r) { return r.start == addr; });
return it != pages->end();
}
bool PagesHasExactPage(std::vector<MemoryRange>* pages, Address search_page,
size_t size) {
void* addr = reinterpret_cast<void*>(search_page);
auto it = std::find_if(pages->begin(), pages->end(),
[addr, size](const MemoryRange& r) {
return r.start == addr && r.length_in_bytes == size;
});
return it != pages->end();
}
bool PagesContainsRange(std::vector<MemoryRange>* pages, Address search_address,
size_t size) {
byte* addr = reinterpret_cast<byte*>(search_address);
auto it =
std::find_if(pages->begin(), pages->end(), [=](const MemoryRange& r) {
const byte* page_start = reinterpret_cast<const byte*>(r.start);
const byte* page_end = page_start + r.length_in_bytes;
return addr >= page_start && (addr + size) <= page_end;
});
return it != pages->end();
}
bool PagesContainsAddress(std::vector<MemoryRange>* pages,
Address search_address) {
return PagesContainsRange(pages, search_address, 0);
}
} // namespace
TEST(CodeRangeCorrectContents) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange()) return;
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
const base::AddressRegion& code_region = i_isolate->heap()->code_region();
CHECK(!code_region.is_empty());
// We should only have the code range and the embedded code range.
CHECK_EQ(2, pages->size());
CHECK(PagesHasExactPage(pages, code_region.begin(), code_region.size()));
CHECK(PagesHasExactPage(
pages, reinterpret_cast<Address>(i_isolate->CurrentEmbeddedBlobCode()),
i_isolate->CurrentEmbeddedBlobCodeSize()));
if (i_isolate->is_short_builtin_calls_enabled()) {
// In this case embedded blob code must be included via code_region.
CHECK(PagesContainsRange(
pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
i_isolate->embedded_blob_code_size()));
} else {
CHECK(PagesHasExactPage(
pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
i_isolate->embedded_blob_code_size()));
}
}
TEST(CodePagesCorrectContents) {
if (!kHaveCodePages) return;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
// There might be other pages already.
CHECK_GE(pages->size(), 1);
const base::AddressRegion& code_region = i_isolate->heap()->code_region();
CHECK(code_region.is_empty());
// We should have the embedded code range even when there is no regular code
// range.
CHECK(PagesHasExactPage(
pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
i_isolate->embedded_blob_code_size()));
}
TEST(OptimizedCodeWithCodeRange) {
FLAG_allow_natives_syntax = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange()) return;
HandleScope scope(i_isolate);
std::string foo_str = getFooCode(1);
CompileRun(foo_str.c_str());
v8::Local<v8::Function> local_foo = v8::Local<v8::Function>::Cast(
env.local()->Global()->Get(env.local(), v8_str("foo1")).ToLocalChecked());
Handle<JSFunction> foo =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
AbstractCode abstract_code = foo->abstract_code(i_isolate);
// We don't produce optimized code when run with --no-opt.
if (!abstract_code.IsCode() && FLAG_opt == false) return;
CHECK(abstract_code.IsCode());
Code foo_code = abstract_code.GetCode();
CHECK(i_isolate->heap()->InSpace(foo_code, CODE_SPACE));
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
CHECK(PagesContainsAddress(pages, foo_code.address()));
}
TEST(OptimizedCodeWithCodePages) {
if (!kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
FLAG_allow_natives_syntax = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
const void* created_page = nullptr;
int num_foos_created = 0;
{
HandleScope scope(i_isolate);
size_t num_code_pages = 0;
size_t initial_num_code_pages = 0;
// Keep generating new code until a new code page is added to the list.
for (int n = 0; n < 999999; n++) {
// Compile and optimize the code and get a reference to it.
std::string foo_str = getFooCode(n);
char foo_name[10];
snprintf(foo_name, sizeof(foo_name), "foo%d", n);
CompileRun(foo_str.c_str());
v8::Local<v8::Function> local_foo =
v8::Local<v8::Function>::Cast(env.local()
->Global()
->Get(env.local(), v8_str(foo_name))
.ToLocalChecked());
Handle<JSFunction> foo =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*local_foo));
// If there is baseline code, check that it's only due to
// --always-sparkplug (if this check fails, we'll have to re-think this
// test).
if (foo->shared().HasBaselineCode()) {
CHECK(FLAG_always_sparkplug);
return;
}
AbstractCode abstract_code = foo->abstract_code(i_isolate);
// We don't produce optimized code when run with --no-opt.
if (!abstract_code.IsCode() && FLAG_opt == false) return;
CHECK(abstract_code.IsCode());
Code foo_code = abstract_code.GetCode();
CHECK(i_isolate->heap()->InSpace(foo_code, CODE_SPACE));
// Check that the generated code ended up in one of the code pages
// returned by GetCodePages().
byte* foo_code_ptr = reinterpret_cast<byte*>(foo_code.address());
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
// Wait until after we have created the first function to take the initial
// number of pages so that this test isn't brittle to irrelevant
// implementation details.
if (n == 0) {
initial_num_code_pages = pages->size();
}
num_code_pages = pages->size();
// Check that the code object was allocation on any of the pages returned
// by GetCodePages().
auto it = std::find_if(
pages->begin(), pages->end(), [foo_code_ptr](const MemoryRange& r) {
const byte* page_start = reinterpret_cast<const byte*>(r.start);
const byte* page_end = page_start + r.length_in_bytes;
return foo_code_ptr >= page_start && foo_code_ptr < page_end;
});
CHECK_NE(it, pages->end());
// Store the page that was created just for our functions - we expect it
// to be removed later.
if (num_code_pages > initial_num_code_pages) {
created_page = it->start;
num_foos_created = n + 1;
break;
}
}
CHECK_NOT_NULL(created_page);
}
// Now delete all our foos and force a GC and check that the page is removed
// from the list.
{
HandleScope scope(i_isolate);
for (int n = 0; n < num_foos_created; n++) {
char foo_name[10];
snprintf(foo_name, sizeof(foo_name), "foo%d", n);
env.local()
->Global()
->Set(env.local(), v8_str(foo_name), Undefined(isolate))
.Check();
}
}
CcTest::CollectGarbage(CODE_SPACE);
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
auto it = std::find_if(
pages->begin(), pages->end(),
[created_page](const MemoryRange& r) { return r.start == created_page; });
CHECK_EQ(it, pages->end());
}
TEST(LargeCodeObject) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
desc.buffer = instructions.get();
desc.buffer_size = instruction_size;
desc.instr_size = instruction_size;
desc.reloc_size = 0;
desc.constant_pool_size = 0;
desc.unwinding_info = nullptr;
desc.unwinding_info_size = 0;
desc.origin = nullptr;
Address stale_code_address;
{
HandleScope scope(i_isolate);
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION).Build();
CHECK(i_isolate->heap()->InSpace(*foo_code, CODE_LO_SPACE));
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(pages, foo_code->address()));
}
stale_code_address = foo_code->address();
}
// Delete the large code object.
CcTest::CollectGarbage(CODE_LO_SPACE);
CHECK(!i_isolate->heap()->InSpaceSlow(stale_code_address, CODE_LO_SPACE));
// Check that it was removed from CodePages.
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
CHECK(!PagesHasExactPage(pages, stale_code_address));
}
static constexpr size_t kBufSize = v8::Isolate::kMinCodePagesBufferSize;
class SignalSender : public sampler::Sampler {
public:
explicit SignalSender(v8::Isolate* isolate) : sampler::Sampler(isolate) {}
// Called during the signal/thread suspension.
void SampleStack(const v8::RegisterState& regs) override {
MemoryRange* code_pages_copy = code_pages_copy_.load();
CHECK_NOT_NULL(code_pages_copy);
size_t num_pages = isolate_->CopyCodePages(kBufSize, code_pages_copy);
CHECK_LE(num_pages, kBufSize);
sample_semaphore_.Signal();
}
// Called on the sampling thread to trigger a sample. Blocks until the sample
// is finished.
void SampleIntoVector(MemoryRange output_buffer[]) {
code_pages_copy_.store(output_buffer);
DoSample();
sample_semaphore_.Wait();
code_pages_copy_.store(nullptr);
}
private:
base::Semaphore sample_semaphore_{0};
std::atomic<MemoryRange*> code_pages_copy_{nullptr};
};
class SamplingThread : public base::Thread {
public:
explicit SamplingThread(SignalSender* signal_sender)
: base::Thread(base::Thread::Options("SamplingThread")),
signal_sender_(signal_sender) {}
// Blocks until a sample is taken.
void TriggerSample() { signal_sender_->SampleIntoVector(code_pages_copy_); }
void Run() override {
while (running_.load()) {
TriggerSample();
}
}
// Called from the main thread. Blocks until a sample is taken. Not
// thread-safe so do not call while this thread is running.
static std::vector<MemoryRange> DoSynchronousSample(v8::Isolate* isolate) {
MemoryRange code_pages_copy[kBufSize];
size_t num_pages = isolate->CopyCodePages(kBufSize, code_pages_copy);
DCHECK_LE(num_pages, kBufSize);
return std::vector<MemoryRange>{code_pages_copy,
&code_pages_copy[num_pages]};
}
void Stop() { running_.store(false); }
private:
std::atomic_bool running_{true};
SignalSender* signal_sender_;
MemoryRange code_pages_copy_[kBufSize];
};
TEST(LargeCodeObjectWithSignalHandler) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
desc.buffer = instructions.get();
desc.buffer_size = instruction_size;
desc.instr_size = instruction_size;
desc.reloc_size = 0;
desc.constant_pool_size = 0;
desc.unwinding_info = nullptr;
desc.unwinding_info_size = 0;
desc.origin = nullptr;
Address stale_code_address;
SignalSender signal_sender(isolate);
signal_sender.Start();
// Take an initial sample.
std::vector<MemoryRange> initial_pages =
SamplingThread::DoSynchronousSample(isolate);
SamplingThread sampling_thread(&signal_sender);
sampling_thread.StartSynchronously();
{
HandleScope scope(i_isolate);
Handle<Code> foo_code =
Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION).Build();
CHECK(i_isolate->heap()->InSpace(*foo_code, CODE_LO_SPACE));
// Do a synchronous sample to ensure that we capture the state with the
// extra code page.
sampling_thread.Stop();
sampling_thread.Join();
// Check that the page was added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(&pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(&pages, foo_code->address()));
}
stale_code_address = foo_code->address();
}
// Start async sampling again to detect threading issues.
sampling_thread.StartSynchronously();
// Delete the large code object.
CcTest::CollectGarbage(CODE_LO_SPACE);
CHECK(!i_isolate->heap()->InSpaceSlow(stale_code_address, CODE_LO_SPACE));
sampling_thread.Stop();
sampling_thread.Join();
std::vector<MemoryRange> pages = SamplingThread::DoSynchronousSample(isolate);
CHECK(!PagesHasExactPage(&pages, stale_code_address));
signal_sender.Stop();
}
TEST(Sorted) {
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
desc.buffer = instructions.get();
desc.buffer_size = instruction_size;
desc.instr_size = instruction_size;
desc.reloc_size = 0;
desc.constant_pool_size = 0;
desc.unwinding_info = nullptr;
desc.unwinding_info_size = 0;
desc.origin = nullptr;
// Take an initial sample.
std::vector<MemoryRange> initial_pages =
SamplingThread::DoSynchronousSample(isolate);
size_t initial_num_pages = initial_pages.size();
auto compare = [](const MemoryRange& a, const MemoryRange& b) {
return a.start < b.start;
};
{
HandleScope outer_scope(i_isolate);
Handle<Code> code1, code3;
Address code2_address;
code1 =
Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION).Build();
CHECK(i_isolate->heap()->InSpace(*code1, CODE_LO_SPACE));
{
HandleScope scope(i_isolate);
// Create three large code objects, we'll delete the middle one and check
// everything is still sorted.
Handle<Code> code2 =
Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION)
.Build();
CHECK(i_isolate->heap()->InSpace(*code2, CODE_LO_SPACE));
code3 = Factory::CodeBuilder(i_isolate, desc, CodeKind::WASM_FUNCTION)
.Build();
CHECK(i_isolate->heap()->InSpace(*code3, CODE_LO_SPACE));
code2_address = code2->address();
CHECK(i_isolate->heap()->InSpaceSlow(code1->address(), CODE_LO_SPACE));
CHECK(i_isolate->heap()->InSpaceSlow(code2->address(), CODE_LO_SPACE));
CHECK(i_isolate->heap()->InSpaceSlow(code3->address(), CODE_LO_SPACE));
// Check that the pages were added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 3);
}
CHECK(std::is_sorted(pages.begin(), pages.end(), compare));
code3 = scope.CloseAndEscape(code3);
}
CHECK(i_isolate->heap()->InSpaceSlow(code1->address(), CODE_LO_SPACE));
CHECK(i_isolate->heap()->InSpaceSlow(code2_address, CODE_LO_SPACE));
CHECK(i_isolate->heap()->InSpaceSlow(code3->address(), CODE_LO_SPACE));
// Delete code2.
CcTest::CollectGarbage(CODE_LO_SPACE);
CHECK(i_isolate->heap()->InSpaceSlow(code1->address(), CODE_LO_SPACE));
CHECK(!i_isolate->heap()->InSpaceSlow(code2_address, CODE_LO_SPACE));
CHECK(i_isolate->heap()->InSpaceSlow(code3->address(), CODE_LO_SPACE));
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 2);
}
CHECK(std::is_sorted(pages.begin(), pages.end(), compare));
}
}
} // namespace test_code_pages
} // namespace internal
} // namespace v8