[shared-struct, api] Support shared isolates in API

Currently the ability to create shared isolates is partially exposed to
API. Instead of fully exposing it, this CL makes shared isolate and
shared heap handling transparent to the embedder.

If a flag that requires the shared heap is true (currently
--shared-string-table and --harmony-struct), the first isolate created
in the process will create and attach to a process-wide shared isolate.
Subsequent isolates will attach to that shared isolate. When that first isolate is deleted, the shared isolate is also deleted.

Bug: v8:12547
Change-Id: Idaf2947bc354066c44f2d10243e10162b1b7e4d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3848825
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Owners-Override: Shu-yu Guo <syg@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82756}
This commit is contained in:
Shu-yu Guo 2022-08-26 15:07:04 -07:00 committed by V8 LUCI CQ
parent 8ff03afee0
commit 31e17fe62d
21 changed files with 616 additions and 665 deletions

View File

@ -294,12 +294,6 @@ class V8_EXPORT Isolate {
*/
FatalErrorCallback fatal_error_callback = nullptr;
OOMErrorCallback oom_error_callback = nullptr;
/**
* The following parameter is experimental and may change significantly.
* This is currently for internal testing.
*/
Isolate* experimental_attach_to_shared_isolate = nullptr;
};
/**

View File

@ -8699,11 +8699,6 @@ void Isolate::Initialize(Isolate* v8_isolate,
i_isolate->stack_guard()->SetStackLimit(limit);
}
if (params.experimental_attach_to_shared_isolate != nullptr) {
i_isolate->set_shared_isolate(reinterpret_cast<i::Isolate*>(
params.experimental_attach_to_shared_isolate));
}
// TODO(v8:2487): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
if (i_isolate->snapshot_blob() == nullptr) {

View File

@ -116,6 +116,14 @@ namespace internal {
#define COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL false
#endif
#if defined(V8_SHARED_RO_HEAP) && \
(!defined(V8_COMPRESS_POINTERS) || \
defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE))
#define V8_CAN_CREATE_SHARED_HEAP_BOOL true
#else
#define V8_CAN_CREATE_SHARED_HEAP_BOOL false
#endif
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL true
#else

View File

@ -484,7 +484,6 @@ std::atomic<int> Shell::unhandled_promise_rejections_{0};
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
Isolate* Shell::shared_isolate = nullptr;
bool check_d8_flag_contradictions = true;
ShellOptions Shell::options;
base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
@ -3699,9 +3698,6 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
void Shell::OnExit(v8::Isolate* isolate, bool dispose) {
platform::NotifyIsolateShutdown(g_default_platform, isolate);
isolate->Dispose();
if (shared_isolate) {
i::Isolate::Delete(reinterpret_cast<i::Isolate*>(shared_isolate));
}
// Simulate errors before disposing V8, as that resets flags (via
// FlagList::ResetAllFlags()), but error simulation reads the random seed.
@ -4274,7 +4270,6 @@ SourceGroup::IsolateThread::IsolateThread(SourceGroup* group)
void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
create_params.experimental_attach_to_shared_isolate = Shell::shared_isolate;
Isolate* isolate = Isolate::New(create_params);
Shell::SetWaitUntilDone(isolate, false);
D8Console console(isolate);
@ -4513,7 +4508,6 @@ void Worker::ProcessMessages() {
void Worker::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
create_params.experimental_attach_to_shared_isolate = Shell::shared_isolate;
isolate_ = Isolate::New(create_params);
task_runner_ = g_default_platform->GetForegroundTaskRunner(isolate_);
@ -5272,10 +5266,10 @@ class Serializer : public ValueSerializer::Delegate {
// isolate. No code ever runs in the shared Isolate, so locking it does not
// contend with long-running tasks.
{
DCHECK_EQ(reinterpret_cast<i::Isolate*>(isolate)->shared_isolate(),
reinterpret_cast<i::Isolate*>(Shell::shared_isolate));
v8::Locker locker(Shell::shared_isolate);
data_->shared_values_.emplace_back(Shell::shared_isolate, shared_value);
Isolate* shared_isolate = reinterpret_cast<Isolate*>(
reinterpret_cast<i::Isolate*>(isolate)->shared_isolate());
v8::Locker locker(shared_isolate);
data_->shared_values_.emplace_back(shared_isolate, shared_value);
}
return Just<uint32_t>(static_cast<uint32_t>(index));
}
@ -5347,9 +5341,10 @@ class Serializer : public ValueSerializer::Delegate {
size_t current_memory_usage_;
};
void SerializationData::ClearSharedValuesUnderLockIfNeeded() {
void SerializationData::ClearSharedValuesUnderLockIfNeeded(
Isolate* shared_isolate) {
if (shared_values_.empty()) return;
v8::Locker locker(Shell::shared_isolate);
v8::Locker locker(shared_isolate);
shared_values_.clear();
}
@ -5363,9 +5358,9 @@ class Deserializer : public ValueDeserializer::Delegate {
}
~Deserializer() {
DCHECK_EQ(reinterpret_cast<i::Isolate*>(isolate_)->shared_isolate(),
reinterpret_cast<i::Isolate*>(Shell::shared_isolate));
data_->ClearSharedValuesUnderLockIfNeeded();
Isolate* shared_isolate = reinterpret_cast<Isolate*>(
reinterpret_cast<i::Isolate*>(isolate_)->shared_isolate());
data_->ClearSharedValuesUnderLockIfNeeded(shared_isolate);
}
Deserializer(const Deserializer&) = delete;
@ -5503,10 +5498,6 @@ void Shell::WaitForRunningWorkers(const i::ParkedScope& parked) {
namespace {
bool HasFlagThatRequiresSharedIsolate() {
return i::FLAG_shared_string_table || i::FLAG_harmony_struct;
}
#ifdef V8_OS_POSIX
void d8_sigterm_handler(int signal, siginfo_t* info, void* context) {
// Dump stacktraces when terminating d8 instances with SIGTERM.
@ -5698,17 +5689,6 @@ int Shell::Main(int argc, char* argv[]) {
}
#endif // V8_ENABLE_WEBASSEMBLY
if (HasFlagThatRequiresSharedIsolate()) {
Isolate::CreateParams shared_create_params;
shared_create_params.constraints.ConfigureDefaults(
base::SysInfo::AmountOfPhysicalMemory(),
base::SysInfo::AmountOfVirtualMemory());
shared_create_params.array_buffer_allocator = Shell::array_buffer_allocator;
shared_isolate =
reinterpret_cast<Isolate*>(i::Isolate::NewShared(shared_create_params));
create_params.experimental_attach_to_shared_isolate = shared_isolate;
}
Isolate* isolate = Isolate::New(create_params);
{
@ -5776,8 +5756,6 @@ int Shell::Main(int argc, char* argv[]) {
// First run to produce the cache
Isolate::CreateParams create_params2;
create_params2.array_buffer_allocator = Shell::array_buffer_allocator;
create_params2.experimental_attach_to_shared_isolate =
Shell::shared_isolate;
// Use a different hash seed.
i::FLAG_hash_seed = i::FLAG_hash_seed ^ 1337;
Isolate* isolate2 = Isolate::New(create_params2);

View File

@ -153,7 +153,7 @@ class SerializationData {
return shared_values_;
}
void ClearSharedValuesUnderLockIfNeeded();
void ClearSharedValuesUnderLockIfNeeded(Isolate* shared_isolate);
private:
struct DataDeleter {
@ -689,7 +689,6 @@ class Shell : public i::AllStatic {
static const char* kPrompt;
static ShellOptions options;
static ArrayBuffer::Allocator* array_buffer_allocator;
static Isolate* shared_isolate;
static void SetWaitUntilDone(Isolate* isolate, bool value);
static void NotifyStartStreamingTask(Isolate* isolate);

View File

@ -499,6 +499,10 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
return hash;
}
base::LazyMutex Isolate::process_wide_shared_isolate_mutex_ =
LAZY_MUTEX_INITIALIZER;
Isolate* Isolate::process_wide_shared_isolate_{nullptr};
base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
std::atomic<bool> Isolate::isolate_key_created_{false};
@ -3244,14 +3248,75 @@ class TracingAccountingAllocator : public AccountingAllocator {
std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
// static
Isolate* Isolate::New() { return Isolate::Allocate(false); }
namespace {
bool HasFlagThatRequiresSharedHeap() {
return i::FLAG_shared_string_table || i::FLAG_harmony_struct;
}
} // namespace
// static
Isolate* Isolate::NewShared(const v8::Isolate::CreateParams& params) {
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
Isolate* isolate = Isolate::Allocate(true);
v8::Isolate::Initialize(reinterpret_cast<v8::Isolate*>(isolate), params);
Isolate* Isolate::GetProcessWideSharedIsolate(bool* created_shared_isolate) {
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) {
DCHECK(HasFlagThatRequiresSharedHeap());
FATAL(
"Build configuration does not support creating shared heap. The RO "
"heap must be shared, and pointer compression must either be off or "
"use a shared cage. V8 is compiled with RO heap %s and pointers %s.",
V8_SHARED_RO_HEAP_BOOL ? "SHARED" : "NOT SHARED",
!COMPRESS_POINTERS_BOOL ? "NOT COMPRESSED"
: (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
? "COMPRESSED IN SHARED CAGE"
: "COMPRESSED IN PER-ISOLATE CAGE"));
}
base::MutexGuard guard(process_wide_shared_isolate_mutex_.Pointer());
if (process_wide_shared_isolate_ == nullptr) {
process_wide_shared_isolate_ = Allocate(true);
// TODO(v8:12547): Make shared heap constraints programmatically
// configurable and tailored for the shared heap.
v8::Isolate::CreateParams params;
size_t initial_shared_heap_size =
static_cast<size_t>(FLAG_initial_shared_heap_size) * MB;
size_t max_shared_heap_size =
static_cast<size_t>(FLAG_max_shared_heap_size) * MB;
if (initial_shared_heap_size != 0 && max_shared_heap_size != 0) {
params.constraints.ConfigureDefaultsFromHeapSize(initial_shared_heap_size,
max_shared_heap_size);
} else {
params.constraints.ConfigureDefaults(
base::SysInfo::AmountOfPhysicalMemory(),
base::SysInfo::AmountOfVirtualMemory());
}
params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate::Initialize(
reinterpret_cast<v8::Isolate*>(process_wide_shared_isolate_), params);
*created_shared_isolate = true;
} else {
*created_shared_isolate = false;
}
return process_wide_shared_isolate_;
}
// static
void Isolate::DeleteProcessWideSharedIsolate() {
base::MutexGuard guard(process_wide_shared_isolate_mutex_.Pointer());
DCHECK_NOT_NULL(process_wide_shared_isolate_);
delete process_wide_shared_isolate_->array_buffer_allocator();
Delete(process_wide_shared_isolate_);
process_wide_shared_isolate_ = nullptr;
}
// static
Isolate* Isolate::New() {
Isolate* isolate = Allocate(false);
if (HasFlagThatRequiresSharedHeap()) {
// The Isolate that creates the shared Isolate, which is usually the main
// thread Isolate, owns the lifetime of shared heap.
bool created;
isolate->set_shared_isolate(GetProcessWideSharedIsolate(&created));
isolate->owns_shared_isolate_ = created;
}
return isolate;
}
@ -3295,6 +3360,9 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(isolate, nullptr);
isolate->set_thread_id(ThreadId::Current());
bool owns_shared_isolate = isolate->owns_shared_isolate_;
Isolate* maybe_shared_isolate = isolate->shared_isolate_;
isolate->Deinit();
#ifdef DEBUG
@ -3302,7 +3370,7 @@ void Isolate::Delete(Isolate* isolate) {
#endif // DEBUG
// Take ownership of the IsolateAllocator to ensure the Isolate memory will
// be available during Isolate descructor call.
// be available during Isolate destructor call.
std::unique_ptr<IsolateAllocator> isolate_allocator =
std::move(isolate->isolate_allocator_);
isolate->~Isolate();
@ -3311,6 +3379,14 @@ void Isolate::Delete(Isolate* isolate) {
// Restore the previous current isolate.
SetIsolateThreadLocals(saved_isolate, saved_data);
// The first isolate, which is usually the main thread isolate, owns the
// lifetime of the shared isolate.
if (owns_shared_isolate) {
DCHECK_NOT_NULL(maybe_shared_isolate);
USE(maybe_shared_isolate);
DeleteProcessWideSharedIsolate();
}
}
void Isolate::SetUpFromReadOnlyArtifacts(
@ -3362,19 +3438,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
handle_scope_data_.Initialize();
// A shared Isolate is used to support JavaScript shared memory features
// across Isolates. These features require all of the following to hold in the
// build configuration:
//
// 1. The RO space is shared, so e.g. immortal RO maps can be shared across
// Isolates.
// 2. HeapObjects are shareable across Isolates, which requires either
// pointers to be uncompressed (!COMPRESS_POINTER_BOOL), or that there is a
// single virtual memory reservation shared by all Isolates in the process
// for compressing pointers (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL).
CHECK_IMPLIES(is_shared_, V8_SHARED_RO_HEAP_BOOL &&
(!COMPRESS_POINTERS_BOOL ||
COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL));
CHECK_IMPLIES(is_shared_, V8_CAN_CREATE_SHARED_HEAP_BOOL);
#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
name##_ = (initial_value);
@ -3498,7 +3562,7 @@ void Isolate::Deinit() {
}
// All client isolates should already be detached.
if (is_shared()) global_safepoint()->AssertNoClients();
if (is_shared()) global_safepoint()->AssertNoClientsOnTearDown();
if (FLAG_print_deopt_stress) {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);

View File

@ -628,9 +628,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// new operator.
static Isolate* New();
// Creates a new shared Isolate object.
static Isolate* NewShared(const v8::Isolate::CreateParams& params);
// Deletes Isolate object. Must be used instead of delete operator.
// Destroys the non-default isolates.
// Sets default isolate into "has_been_disposed" state rather then destroying,
@ -2023,6 +2020,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool is_shared);
~Isolate();
static Isolate* Allocate(bool is_shared);
bool Init(SnapshotData* startup_snapshot_data,
SnapshotData* read_only_snapshot_data,
SnapshotData* shared_heap_snapshot_data, bool can_rehash);
@ -2032,10 +2031,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void InitializeCodeRanges();
void AddCodeMemoryRange(MemoryRange range);
// Common method to create an Isolate used by Isolate::New() and
// Isolate::NewShared().
static Isolate* Allocate(bool is_shared);
static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
void FireCallCompletedCallbackInternal(MicrotaskQueue* microtask_queue);
@ -2082,6 +2077,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
EntryStackItem* previous_item;
};
// When a feature flag that requires the shared heap is passed, a shared
// isolate is created to hold the shared allocations. The shared isolate is
// created by the first isolate to be created in the process, which is
// considered the main isolate and owns the lifetime of the shared
// isolate. The main isolate deletes the shared isolate when it itself is
// deleted.
static base::LazyMutex process_wide_shared_isolate_mutex_;
static Isolate* process_wide_shared_isolate_;
static Isolate* GetProcessWideSharedIsolate(bool* created_shared_isolate);
static void DeleteProcessWideSharedIsolate();
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey isolate_key_;
static std::atomic<bool> isolate_key_created_;
@ -2273,10 +2280,15 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// favor memory over runtime performance.
bool memory_savings_mode_active_ = false;
// Indicates wether the isolate owns shareable data.
// Indicates whether the isolate owns shareable data.
// Only false for client isolates attached to a shared isolate.
bool owns_shareable_data_ = true;
// True if this isolate is attached to a shared isolate, and this isolate is
// the main isolate in the process and owns the lifetime of the shared
// isolate.
bool owns_shared_isolate_ = false;
bool log_object_relocation_ = false;
#ifdef V8_EXTERNAL_CODE_SPACE
@ -2444,6 +2456,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Stores the shared isolate for this client isolate. nullptr for shared
// isolates or when no shared isolate is used.
//
// When non-null, it is identical to process_wide_shared_isolate_.
Isolate* shared_isolate_ = nullptr;
#ifdef V8_COMPRESS_POINTERS
@ -2495,6 +2509,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
friend class heap::HeapTester;
friend class GlobalSafepoint;
friend class TestSerializer;
friend class SharedHeapNoClientsTest;
};
#undef FIELD_ACCESSOR

View File

@ -719,6 +719,14 @@ DEFINE_BOOL(
// forwarding table.
DEFINE_NEG_IMPLICATION(shared_string_table, always_use_string_forwarding_table)
DEFINE_SIZE_T(initial_shared_heap_size, 0,
"initial size of the shared heap (in Mbytes); "
"other heap size flags (e.g. initial_heap_size) take precedence")
DEFINE_SIZE_T(
max_shared_heap_size, 0,
"max size of the shared heap (in Mbytes); "
"other heap size flags (e.g. max_shared_heap_size) take precedence")
DEFINE_BOOL(write_code_using_rwx, true,
"flip permissions to rwx to write page instead of rw")
DEFINE_NEG_IMPLICATION(jitless, write_code_using_rwx)

View File

@ -335,7 +335,14 @@ void GlobalSafepoint::RemoveClient(Isolate* client) {
client->shared_isolate_ = nullptr;
}
void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
void GlobalSafepoint::AssertNoClientsOnTearDown() {
DCHECK_WITH_MSG(
clients_head_ == nullptr,
"Shared heap must not have clients at teardown. The first isolate that "
"is created (in a process that has no isolates) owns the lifetime of the "
"shared heap and is considered the main isolate. The main isolate must "
"outlive all other isolates.");
}
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
// Safepoints need to be initiated on some main thread.

View File

@ -178,7 +178,7 @@ class GlobalSafepoint final {
}
}
void AssertNoClients();
void AssertNoClientsOnTearDown();
void AssertActive() { clients_mutex_.AssertHeld(); }

View File

@ -149,7 +149,6 @@ v8_source_set("cctest_sources") {
"heap/test-iterators.cc",
"heap/test-mark-compact.cc",
"heap/test-memory-measurement.cc",
"heap/test-shared-heap.cc",
"heap/test-spaces.cc",
"heap/test-unmapper.cc",
"heap/test-weak-references.cc",

View File

@ -1,326 +0,0 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "include/v8-array-buffer.h"
#include "include/v8-initialization.h"
#include "include/v8-isolate.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace {
const int kNumIterations = 2000;
template <typename Callback>
void SetupClientIsolateAndRunCallback(Isolate* shared_isolate,
Callback callback) {
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
create_params.experimental_attach_to_shared_isolate =
reinterpret_cast<v8::Isolate*>(shared_isolate);
v8::Isolate* client_isolate = v8::Isolate::New(create_params);
Isolate* i_client_isolate = reinterpret_cast<Isolate*>(client_isolate);
callback(client_isolate, i_client_isolate);
client_isolate->Dispose();
}
class SharedOldSpaceAllocationThread final : public v8::base::Thread {
public:
explicit SharedOldSpaceAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedOldSpaceAllocationThread")),
shared_(shared) {}
void Run() override {
SetupClientIsolateAndRunCallback(shared_, [](v8::Isolate* client_isolate,
Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
for (int i = 0; i < kNumIterations; i++) {
i_client_isolate->factory()->NewFixedArray(10,
AllocationType::kSharedOld);
}
CcTest::CollectGarbage(OLD_SPACE, i_client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(ConcurrentAllocationInSharedOldSpace) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedOldSpaceAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedOldSpaceAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
namespace {
class SharedLargeOldSpaceAllocationThread final : public v8::base::Thread {
public:
explicit SharedLargeOldSpaceAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedOldSpaceAllocationThread")),
shared_(shared) {}
void Run() override {
SetupClientIsolateAndRunCallback(
shared_, [](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
const int kNumIterations = 50;
for (int i = 0; i < kNumIterations; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> fixed_array =
i_client_isolate->factory()->NewFixedArray(
kMaxRegularHeapObjectSize / kTaggedSize,
AllocationType::kSharedOld);
CHECK(MemoryChunk::FromHeapObject(*fixed_array)->IsLargePage());
}
CcTest::CollectGarbage(OLD_SPACE, i_client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(ConcurrentAllocationInSharedLargeOldSpace) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedLargeOldSpaceAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedLargeOldSpaceAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
namespace {
class SharedMapSpaceAllocationThread final : public v8::base::Thread {
public:
explicit SharedMapSpaceAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedMapSpaceAllocationThread")),
shared_(shared) {}
void Run() override {
SetupClientIsolateAndRunCallback(
shared_, [](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
for (int i = 0; i < kNumIterations; i++) {
i_client_isolate->factory()->NewMap(
NATIVE_CONTEXT_TYPE, kVariableSizeSentinel,
TERMINAL_FAST_ELEMENTS_KIND, 0, AllocationType::kSharedMap);
}
CcTest::CollectGarbage(OLD_SPACE, i_client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(ConcurrentAllocationInSharedMapSpace) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedMapSpaceAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedMapSpaceAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
DCHECK_NULL(shared_isolate->heap()->new_space());
DCHECK_NULL(shared_isolate->heap()->new_lo_space());
CcTest::CollectGarbage(OLD_SPACE, shared_isolate);
Isolate::Delete(shared_isolate);
}
void AllocateInSharedHeap(Isolate* shared_isolate, int iterations = 100) {
SetupClientIsolateAndRunCallback(
shared_isolate,
[iterations](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope outer_scope(i_client_isolate);
std::vector<Handle<FixedArray>> arrays_in_handles;
const int kKeptAliveInHandle = 1000;
const int kKeptAliveInHeap = 100;
Handle<FixedArray> arrays_in_heap =
i_client_isolate->factory()->NewFixedArray(kKeptAliveInHeap,
AllocationType::kYoung);
for (int i = 0; i < kNumIterations * iterations; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
100, AllocationType::kSharedOld);
if (i < kKeptAliveInHandle) {
// Keep some of those arrays alive across GCs through handles.
arrays_in_handles.push_back(scope.CloseAndEscape(array));
}
if (i < kKeptAliveInHeap) {
// Keep some of those arrays alive across GCs through client heap
// references.
arrays_in_heap->set(i, *array);
}
i_client_isolate->factory()->NewFixedArray(100,
AllocationType::kYoung);
}
for (Handle<FixedArray> array : arrays_in_handles) {
CHECK_EQ(array->length(), 100);
}
for (int i = 0; i < kKeptAliveInHeap; i++) {
FixedArray array = FixedArray::cast(arrays_in_heap->get(i));
CHECK_EQ(array.length(), 100);
}
});
}
UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
FLAG_max_old_space_size = 8;
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
AllocateInSharedHeap(shared_isolate);
Isolate::Delete(shared_isolate);
}
namespace {
class SharedFixedArrayAllocationThread final : public v8::base::Thread {
public:
explicit SharedFixedArrayAllocationThread(Isolate* shared)
: v8::base::Thread(
base::Thread::Options("SharedFixedArrayAllocationThread")),
shared_(shared) {}
void Run() override { AllocateInSharedHeap(shared_, 5); }
Isolate* shared_;
};
} // namespace
UNINITIALIZED_TEST(SharedCollectionWithMultipleClients) {
FLAG_max_old_space_size = 8;
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
std::vector<std::unique_ptr<SharedFixedArrayAllocationThread>> threads;
const int kThreads = 4;
for (int i = 0; i < kThreads; i++) {
auto thread =
std::make_unique<SharedFixedArrayAllocationThread>(shared_isolate);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
Isolate::Delete(shared_isolate);
}
} // namespace internal
} // namespace v8

View File

@ -102,22 +102,6 @@ class TestSerializer {
return v8_isolate;
}
static v8::Isolate* NewIsolateFromBlob(const StartupBlobs& blobs) {
const bool kIsShared = false;
return NewIsolateFromBlob(blobs, kIsShared, nullptr);
}
static v8::Isolate* NewSharedIsolateFromBlob(const StartupBlobs& blobs) {
const bool kIsShared = true;
return NewIsolateFromBlob(blobs, kIsShared, nullptr);
}
static v8::Isolate* NewClientIsolateFromBlob(const StartupBlobs& blobs,
v8::Isolate* shared_isolate) {
const bool kIsShared = false;
return NewIsolateFromBlob(blobs, kIsShared, shared_isolate);
}
// Wraps v8::Isolate::New, but with a test isolate under the hood.
// Allows flexibility to bootstrap with or without snapshot even when
// the production Isolate class has one or the other behavior baked in.
@ -131,11 +115,57 @@ class TestSerializer {
return v8_isolate;
}
static v8::Isolate* NewIsolateFromBlob(const StartupBlobs& blobs) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
SnapshotData shared_space_snapshot(blobs.shared_space);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
const bool kIsShared = false;
v8::Isolate* v8_isolate =
NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(&startup_snapshot, &read_only_snapshot,
&shared_space_snapshot, false);
return v8_isolate;
}
static void InitializeProcessWideSharedIsolateFromBlob(
const StartupBlobs& blobs) {
base::MutexGuard guard(
i::Isolate::process_wide_shared_isolate_mutex_.Pointer());
CHECK_NULL(i::Isolate::process_wide_shared_isolate_);
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
SnapshotData shared_space_snapshot(blobs.shared_space);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
const bool kIsShared = true;
v8::Isolate* v8_isolate =
NewIsolate(kEnableSerializer, kGenerateHeap, kIsShared);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(&startup_snapshot, &read_only_snapshot,
&shared_space_snapshot, false);
i::Isolate::process_wide_shared_isolate_ = isolate;
}
static void DeleteProcessWideSharedIsolate() {
i::Isolate::DeleteProcessWideSharedIsolate();
}
private:
// Creates an Isolate instance configured for testing.
static v8::Isolate* NewIsolate(bool with_serializer, bool generate_heap,
bool is_shared) {
i::Isolate* isolate = i::Isolate::Allocate(is_shared);
i::Isolate* isolate;
if (is_shared) {
isolate = i::Isolate::Allocate(true);
} else {
isolate = i::Isolate::New();
}
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
if (with_serializer) isolate->enable_serializer();
@ -144,28 +174,6 @@ class TestSerializer {
return v8_isolate;
}
static v8::Isolate* NewIsolateFromBlob(const StartupBlobs& blobs,
bool is_shared,
v8::Isolate* shared_isolate) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
SnapshotData shared_space_snapshot(blobs.shared_space);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
CHECK_IMPLIES(is_shared, !shared_isolate);
v8::Isolate* v8_isolate =
NewIsolate(kEnableSerializer, kGenerateHeap, is_shared);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
if (shared_isolate) {
CHECK(!is_shared);
isolate->set_shared_isolate(reinterpret_cast<Isolate*>(shared_isolate));
}
isolate->Init(&startup_snapshot, &read_only_snapshot,
&shared_space_snapshot, false);
return v8_isolate;
}
};
static base::Vector<const byte> WritePayload(
@ -5083,8 +5091,12 @@ UNINITIALIZED_TEST(SharedStrings) {
// Test that deserializing with --shared-string-table deserializes into the
// shared Isolate.
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return;
// Make all the flags that require a shared heap false before creating the
// isolate to serialize.
FLAG_shared_string_table = false;
FLAG_harmony_struct = false;
v8::Isolate* isolate_to_serialize = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate_to_serialize);
@ -5092,11 +5104,9 @@ UNINITIALIZED_TEST(SharedStrings) {
FLAG_shared_string_table = true;
v8::Isolate* shared_isolate = TestSerializer::NewSharedIsolateFromBlob(blobs);
v8::Isolate* isolate1 =
TestSerializer::NewClientIsolateFromBlob(blobs, shared_isolate);
v8::Isolate* isolate2 =
TestSerializer::NewClientIsolateFromBlob(blobs, shared_isolate);
TestSerializer::InitializeProcessWideSharedIsolateFromBlob(blobs);
v8::Isolate* isolate1 = TestSerializer::NewIsolateFromBlob(blobs);
v8::Isolate* isolate2 = TestSerializer::NewIsolateFromBlob(blobs);
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
@ -5112,7 +5122,7 @@ UNINITIALIZED_TEST(SharedStrings) {
isolate1->Dispose();
}
isolate2->Dispose();
Isolate::Delete(reinterpret_cast<Isolate*>(shared_isolate));
TestSerializer::DeleteProcessWideSharedIsolate();
blobs.Dispose();
FreeCurrentEmbeddedBlob();

View File

@ -31,21 +31,21 @@ struct V8_NODISCARD IsolateWrapper {
// test shared string behavior. Because both are considered running, when
// disposing these Isolates, one must be parked to not cause a deadlock in the
// shared heap verification that happens on client Isolate disposal.
struct V8_NODISCARD IsolatePairWrapper {
IsolatePairWrapper(v8::Isolate* isolate1, v8::Isolate* isolate2)
: isolate1(isolate1), isolate2(isolate2) {}
struct V8_NODISCARD IsolateParkOnDisposeWrapper {
IsolateParkOnDisposeWrapper(v8::Isolate* isolate,
v8::Isolate* isolate_to_park)
: isolate(isolate), isolate_to_park(isolate_to_park) {}
~IsolatePairWrapper() {
~IsolateParkOnDisposeWrapper() {
{
i::ParkedScope parked(
reinterpret_cast<Isolate*>(isolate1)->main_thread_local_isolate());
isolate2->Dispose();
i::ParkedScope parked(reinterpret_cast<Isolate*>(isolate_to_park)
->main_thread_local_isolate());
isolate->Dispose();
}
isolate1->Dispose();
}
v8::Isolate* const isolate1;
v8::Isolate* const isolate2;
v8::Isolate* const isolate;
v8::Isolate* const isolate_to_park;
};
class MultiClientIsolateTest {
@ -55,30 +55,28 @@ class MultiClientIsolateTest {
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
shared_isolate_ =
reinterpret_cast<v8::Isolate*>(Isolate::NewShared(create_params));
main_isolate_ = v8::Isolate::New(create_params);
}
~MultiClientIsolateTest() { Isolate::Delete(i_shared_isolate()); }
~MultiClientIsolateTest() { main_isolate_->Dispose(); }
v8::Isolate* shared_isolate() const { return shared_isolate_; }
v8::Isolate* main_isolate() const { return main_isolate_; }
Isolate* i_shared_isolate() const {
return reinterpret_cast<Isolate*>(shared_isolate_);
Isolate* i_main_isolate() const {
return reinterpret_cast<Isolate*>(main_isolate_);
}
v8::Isolate* NewClientIsolate() {
CHECK_NOT_NULL(shared_isolate_);
CHECK_NOT_NULL(main_isolate_);
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
create_params.experimental_attach_to_shared_isolate = shared_isolate_;
return v8::Isolate::New(create_params);
}
private:
v8::Isolate* shared_isolate_;
v8::Isolate* main_isolate_;
};
UNINITIALIZED_TEST(InPlaceInternalizableStringsAreShared) {
@ -89,9 +87,7 @@ UNINITIALIZED_TEST(InPlaceInternalizableStringsAreShared) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolateWrapper isolate1_wrapper(test.NewClientIsolate());
v8::Isolate* isolate1 = isolate1_wrapper.isolate;
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
Isolate* i_isolate1 = test.i_main_isolate();
Factory* factory1 = i_isolate1->factory();
HandleScope handle_scope(i_isolate1);
@ -138,13 +134,11 @@ UNINITIALIZED_TEST(InPlaceInternalization) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolatePairWrapper isolates_wrapper(test.NewClientIsolate(),
test.NewClientIsolate());
v8::Isolate* isolate1 = isolates_wrapper.isolate1;
v8::Isolate* isolate2 = isolates_wrapper.isolate2;
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
IsolateParkOnDisposeWrapper isolate_wrapper(test.NewClientIsolate(),
test.main_isolate());
Isolate* i_isolate1 = test.i_main_isolate();
Factory* factory1 = i_isolate1->factory();
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate_wrapper.isolate);
Factory* factory2 = i_isolate2->factory();
HandleScope scope1(i_isolate1);
@ -206,13 +200,11 @@ UNINITIALIZED_TEST(YoungInternalization) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolatePairWrapper isolates_wrapper(test.NewClientIsolate(),
test.NewClientIsolate());
v8::Isolate* isolate1 = isolates_wrapper.isolate1;
v8::Isolate* isolate2 = isolates_wrapper.isolate2;
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
IsolateParkOnDisposeWrapper isolate_wrapper(test.NewClientIsolate(),
test.main_isolate());
Isolate* i_isolate1 = test.i_main_isolate();
Factory* factory1 = i_isolate1->factory();
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate_wrapper.isolate);
Factory* factory2 = i_isolate2->factory();
HandleScope scope1(i_isolate1);
@ -280,8 +272,7 @@ class ConcurrentStringThreadBase : public v8::base::Thread {
virtual void Teardown() {}
void Run() override {
IsolateWrapper isolate_wrapper(test_->NewClientIsolate());
isolate = isolate_wrapper.isolate;
i_isolate = reinterpret_cast<Isolate*>(isolate);
i_isolate = reinterpret_cast<Isolate*>(isolate_wrapper.isolate);
Setup();
@ -301,7 +292,6 @@ class ConcurrentStringThreadBase : public v8::base::Thread {
Teardown();
isolate = nullptr;
i_isolate = nullptr;
}
@ -313,7 +303,6 @@ class ConcurrentStringThreadBase : public v8::base::Thread {
protected:
using base::Thread::Join;
v8::Isolate* isolate;
Isolate* i_isolate;
MultiClientIsolateTest* test_;
Handle<FixedArray> shared_strings_;
@ -393,14 +382,11 @@ void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
constexpr int kThreads = 4;
constexpr int kStrings = 4096;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
HandleScope scope(i_isolate);
@ -479,15 +465,12 @@ UNINITIALIZED_TEST(ConcurrentStringTableLookup) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
constexpr int kTotalThreads = 4;
constexpr int kInternalizationThreads = 1;
constexpr int kStrings = 4096;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
HandleScope scope(i_isolate);
@ -557,9 +540,7 @@ UNINITIALIZED_TEST(StringShare) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
HandleScope scope(i_isolate);
@ -684,9 +665,7 @@ UNINITIALIZED_TEST(PromotionMarkCompact) {
FLAG_manual_evacuation_candidates_selection = true;
MultiClientIsolateTest test;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
Heap* heap = i_isolate->heap();
// Heap* shared_heap = test.i_shared_isolate()->heap();
@ -730,9 +709,7 @@ UNINITIALIZED_TEST(PromotionScavenge) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
Heap* heap = i_isolate->heap();
// Heap* shared_heap = test.i_shared_isolate()->heap();
@ -771,9 +748,7 @@ UNINITIALIZED_TEST(PromotionScavengeOldToShared) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
Heap* heap = i_isolate->heap();
ManualGCScope manual_gc(i_isolate);
@ -824,9 +799,7 @@ UNINITIALIZED_TEST(PromotionMarkCompactOldToShared) {
FLAG_manual_evacuation_candidates_selection = true;
MultiClientIsolateTest test;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
Heap* heap = i_isolate->heap();
ManualGCScope manual_gc(i_isolate);
@ -871,13 +844,10 @@ UNINITIALIZED_TEST(SharedStringsTransitionDuringGC) {
FLAG_shared_string_table = true;
MultiClientIsolateTest test;
constexpr int kStrings = 4096;
IsolateWrapper isolate_wrapper(test.NewClientIsolate());
v8::Isolate* isolate = isolate_wrapper.isolate;
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
MultiClientIsolateTest test;
Isolate* i_isolate = test.i_main_isolate();
Factory* factory = i_isolate->factory();
HandleScope scope(i_isolate);

View File

@ -413,6 +413,7 @@ v8_source_set("unittests_sources") {
"heap/persistent-handles-unittest.cc",
"heap/progressbar-unittest.cc",
"heap/safepoint-unittest.cc",
"heap/shared-heap-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
"heap/unmapper-unittest.cc",

View File

@ -11,27 +11,29 @@
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#if V8_CAN_CREATE_SHARED_HEAP_BOOL
namespace v8 {
namespace internal {
using GlobalSafepointTest = TestWithSharedIsolate;
using GlobalSafepointTest = TestJSSharedMemoryWithNativeContext;
namespace {
class ClientIsolateWithContextWrapper {
class IsolateWithContextWrapper {
public:
explicit ClientIsolateWithContextWrapper(v8::Isolate* shared_isolate)
: client_isolate_wrapper_(kNoCounters, kClientIsolate, shared_isolate),
isolate_scope_(client_isolate_wrapper_.isolate()),
handle_scope_(client_isolate_wrapper_.isolate()),
context_(v8::Context::New(client_isolate_wrapper_.isolate())),
explicit IsolateWithContextWrapper()
: isolate_wrapper_(kNoCounters),
isolate_scope_(isolate_wrapper_.isolate()),
handle_scope_(isolate_wrapper_.isolate()),
context_(v8::Context::New(isolate_wrapper_.isolate())),
context_scope_(context_) {}
v8::Isolate* v8_isolate() const { return client_isolate_wrapper_.isolate(); }
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
Isolate* isolate() const { return reinterpret_cast<Isolate*>(v8_isolate()); }
private:
IsolateWrapper client_isolate_wrapper_;
IsolateWrapper isolate_wrapper_;
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
v8::Local<v8::Context> context_;
@ -53,19 +55,17 @@ class ParkingThread : public v8::base::Thread {
class InfiniteLooperThread final : public ParkingThread {
public:
InfiniteLooperThread(v8::Isolate* shared_isolate,
ParkingSemaphore* sema_ready,
InfiniteLooperThread(ParkingSemaphore* sema_ready,
ParkingSemaphore* sema_execute_start,
ParkingSemaphore* sema_execute_complete)
: ParkingThread(Options("InfiniteLooperThread")),
shared_isolate_(shared_isolate),
sema_ready_(sema_ready),
sema_execute_start_(sema_execute_start),
sema_execute_complete_(sema_execute_complete) {}
void Run() override {
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_);
v8::Isolate* v8_isolate = client_isolate_wrapper.v8_isolate();
IsolateWithContextWrapper isolate_wrapper;
v8::Isolate* v8_isolate = isolate_wrapper.v8_isolate();
v8::Isolate::Scope isolate_scope(v8_isolate);
v8::HandleScope scope(v8_isolate);
@ -78,7 +78,7 @@ class InfiniteLooperThread final : public ParkingThread {
sema_ready_->Signal();
sema_execute_start_->ParkedWait(
client_isolate_wrapper.isolate()->main_thread_local_isolate());
isolate_wrapper.isolate()->main_thread_local_isolate());
USE(script->Run(context));
@ -86,7 +86,6 @@ class InfiniteLooperThread final : public ParkingThread {
}
private:
v8::Isolate* shared_isolate_;
ParkingSemaphore* sema_ready_;
ParkingSemaphore* sema_execute_start_;
ParkingSemaphore* sema_execute_complete_;
@ -95,26 +94,21 @@ class InfiniteLooperThread final : public ParkingThread {
} // namespace
TEST_F(GlobalSafepointTest, Interrupt) {
if (!IsJSSharedMemorySupported()) return;
v8::Isolate* shared_isolate = v8_isolate();
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate);
constexpr int kThreads = 4;
Isolate* isolate = client_isolate_wrapper.isolate();
Isolate* i_main_isolate = i_isolate();
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
ParkingSemaphore sema_execute_complete(0);
std::vector<std::unique_ptr<InfiniteLooperThread>> threads;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<InfiniteLooperThread>(
shared_isolate, &sema_ready, &sema_execute_start,
&sema_execute_complete);
&sema_ready, &sema_execute_start, &sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
LocalIsolate* local_isolate = i_main_isolate->main_thread_local_isolate();
for (int i = 0; i < kThreads; i++) {
sema_ready.ParkedWait(local_isolate);
}
@ -130,10 +124,9 @@ TEST_F(GlobalSafepointTest, Interrupt) {
// looping. Otherwise the safepoint may be reached during allocation, such
// as of FeedbackVectors, and we wouldn't be testing the interrupt check.
base::OS::Sleep(base::TimeDelta::FromMilliseconds(500));
GlobalSafepointScope global_safepoint(isolate);
reinterpret_cast<Isolate*>(shared_isolate)
->global_safepoint()
->IterateClientIsolates([](Isolate* client) {
GlobalSafepointScope global_safepoint(i_main_isolate);
i_main_isolate->shared_isolate()->global_safepoint()->IterateClientIsolates(
[](Isolate* client) {
client->stack_guard()->RequestTerminateExecution();
});
}
@ -150,3 +143,5 @@ TEST_F(GlobalSafepointTest, Interrupt) {
} // namespace internal
} // namespace v8
#endif // V8_CAN_CREATE_SHARED_HEAP

View File

@ -140,6 +140,11 @@ using TestWithHeapInternalsAndContext = //
WithContextMixin< //
TestWithHeapInternals>;
inline void CollectGarbage(i::AllocationSpace space, v8::Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectGarbage(
space, i::GarbageCollectionReason::kTesting);
}
inline void FullGC(v8::Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);

View File

@ -0,0 +1,269 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/platform/platform.h"
#include "src/heap/heap.h"
#include "src/heap/parked-scope.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#if V8_CAN_CREATE_SHARED_HEAP_BOOL
namespace v8 {
namespace internal {
using SharedHeapTest = TestJSSharedMemoryWithIsolate;
class SharedHeapNoClientsTest : public TestJSSharedMemoryWithPlatform {
public:
SharedHeapNoClientsTest() {
bool created;
shared_isolate_ = Isolate::GetProcessWideSharedIsolate(&created);
CHECK(created);
}
~SharedHeapNoClientsTest() override {
Isolate::DeleteProcessWideSharedIsolate();
}
v8::Isolate* shared_isolate() {
return reinterpret_cast<v8::Isolate*>(i_shared_isolate());
}
Isolate* i_shared_isolate() { return shared_isolate_; }
private:
Isolate* shared_isolate_;
};
namespace {
const int kNumIterations = 2000;
template <typename Callback>
void SetupClientIsolateAndRunCallback(Callback callback) {
IsolateWrapper isolate_wrapper(kNoCounters);
v8::Isolate* client_isolate = isolate_wrapper.isolate();
Isolate* i_client_isolate = reinterpret_cast<Isolate*>(client_isolate);
callback(client_isolate, i_client_isolate);
}
class SharedOldSpaceAllocationThread final : public ParkingThread {
public:
SharedOldSpaceAllocationThread()
: ParkingThread(base::Thread::Options("SharedOldSpaceAllocationThread")) {
}
void Run() override {
SetupClientIsolateAndRunCallback(
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
for (int i = 0; i < kNumIterations; i++) {
i_client_isolate->factory()->NewFixedArray(
10, AllocationType::kSharedOld);
}
CollectGarbage(OLD_SPACE, client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
};
} // namespace
TEST_F(SharedHeapTest, ConcurrentAllocationInSharedOldSpace) {
std::vector<std::unique_ptr<SharedOldSpaceAllocationThread>> threads;
const int kThreads = 4;
ParkedScope parked(i_isolate()->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<SharedOldSpaceAllocationThread>();
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->ParkedJoin(parked);
}
}
namespace {
class SharedLargeOldSpaceAllocationThread final : public ParkingThread {
public:
SharedLargeOldSpaceAllocationThread()
: ParkingThread(base::Thread::Options("SharedOldSpaceAllocationThread")) {
}
void Run() override {
SetupClientIsolateAndRunCallback(
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
const int kNumIterations = 50;
for (int i = 0; i < kNumIterations; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> fixed_array =
i_client_isolate->factory()->NewFixedArray(
kMaxRegularHeapObjectSize / kTaggedSize,
AllocationType::kSharedOld);
CHECK(MemoryChunk::FromHeapObject(*fixed_array)->IsLargePage());
}
CollectGarbage(OLD_SPACE, client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
};
} // namespace
TEST_F(SharedHeapTest, ConcurrentAllocationInSharedLargeOldSpace) {
std::vector<std::unique_ptr<SharedLargeOldSpaceAllocationThread>> threads;
const int kThreads = 4;
ParkedScope parked(i_isolate()->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<SharedLargeOldSpaceAllocationThread>();
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->ParkedJoin(parked);
}
}
namespace {
class SharedMapSpaceAllocationThread final : public ParkingThread {
public:
SharedMapSpaceAllocationThread()
: ParkingThread(base::Thread::Options("SharedMapSpaceAllocationThread")) {
}
void Run() override {
SetupClientIsolateAndRunCallback(
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
for (int i = 0; i < kNumIterations; i++) {
i_client_isolate->factory()->NewMap(
NATIVE_CONTEXT_TYPE, kVariableSizeSentinel,
TERMINAL_FAST_ELEMENTS_KIND, 0, AllocationType::kSharedMap);
}
CollectGarbage(OLD_SPACE, client_isolate);
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
client_isolate);
});
}
};
} // namespace
TEST_F(SharedHeapTest, ConcurrentAllocationInSharedMapSpace) {
std::vector<std::unique_ptr<SharedMapSpaceAllocationThread>> threads;
const int kThreads = 4;
ParkedScope parked(i_isolate()->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<SharedMapSpaceAllocationThread>();
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->ParkedJoin(parked);
}
}
TEST_F(SharedHeapNoClientsTest, SharedCollectionWithoutClients) {
DCHECK_NULL(i_shared_isolate()->heap()->new_space());
DCHECK_NULL(i_shared_isolate()->heap()->new_lo_space());
::v8::internal::CollectGarbage(OLD_SPACE, shared_isolate());
}
void AllocateInSharedHeap(int iterations = 100) {
SetupClientIsolateAndRunCallback([iterations](v8::Isolate* client_isolate,
Isolate* i_client_isolate) {
HandleScope outer_scope(i_client_isolate);
std::vector<Handle<FixedArray>> arrays_in_handles;
const int kKeptAliveInHandle = 1000;
const int kKeptAliveInHeap = 100;
Handle<FixedArray> arrays_in_heap =
i_client_isolate->factory()->NewFixedArray(kKeptAliveInHeap,
AllocationType::kYoung);
for (int i = 0; i < kNumIterations * iterations; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
100, AllocationType::kSharedOld);
if (i < kKeptAliveInHandle) {
// Keep some of those arrays alive across GCs through handles.
arrays_in_handles.push_back(scope.CloseAndEscape(array));
}
if (i < kKeptAliveInHeap) {
// Keep some of those arrays alive across GCs through client heap
// references.
arrays_in_heap->set(i, *array);
}
i_client_isolate->factory()->NewFixedArray(100, AllocationType::kYoung);
}
for (Handle<FixedArray> array : arrays_in_handles) {
CHECK_EQ(array->length(), 100);
}
for (int i = 0; i < kKeptAliveInHeap; i++) {
FixedArray array = FixedArray::cast(arrays_in_heap->get(i));
CHECK_EQ(array.length(), 100);
}
});
}
TEST_F(SharedHeapTest, SharedCollectionWithOneClient) {
FLAG_max_old_space_size = 8;
ParkedScope parked(i_isolate()->main_thread_local_isolate());
AllocateInSharedHeap();
}
namespace {
class SharedFixedArrayAllocationThread final : public ParkingThread {
public:
SharedFixedArrayAllocationThread()
: ParkingThread(
base::Thread::Options("SharedFixedArrayAllocationThread")) {}
void Run() override { AllocateInSharedHeap(5); }
};
} // namespace
TEST_F(SharedHeapTest, SharedCollectionWithMultipleClients) {
FLAG_max_old_space_size = 8;
std::vector<std::unique_ptr<SharedFixedArrayAllocationThread>> threads;
const int kThreads = 4;
ParkedScope parked(i_isolate()->main_thread_local_isolate());
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<SharedFixedArrayAllocationThread>();
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->ParkedJoin(parked);
}
}
} // namespace internal
} // namespace v8
#endif // V8_CAN_CREATE_SHARED_HEAP

View File

@ -9,63 +9,50 @@
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#if V8_CAN_CREATE_SHARED_HEAP_BOOL
namespace v8 {
namespace internal {
using JSAtomicsMutexTest = TestWithSharedIsolate;
using JSAtomicsConditionTest = TestWithSharedIsolate;
using JSAtomicsMutexTest = TestJSSharedMemoryWithNativeContext;
using JSAtomicsConditionTest = TestJSSharedMemoryWithNativeContext;
namespace {
class ClientIsolateWithContextWrapper final {
class IsolateWithContextWrapper final {
public:
explicit ClientIsolateWithContextWrapper(v8::Isolate* shared_isolate)
: client_isolate_wrapper_(kNoCounters, kClientIsolate, shared_isolate),
isolate_scope_(client_isolate_wrapper_.isolate()),
handle_scope_(client_isolate_wrapper_.isolate()),
context_(v8::Context::New(client_isolate_wrapper_.isolate())),
IsolateWithContextWrapper()
: isolate_wrapper_(kNoCounters),
isolate_scope_(isolate_wrapper_.isolate()),
handle_scope_(isolate_wrapper_.isolate()),
context_(v8::Context::New(isolate_wrapper_.isolate())),
context_scope_(context_) {}
v8::Isolate* v8_isolate() const { return client_isolate_wrapper_.isolate(); }
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
Isolate* isolate() const { return reinterpret_cast<Isolate*>(v8_isolate()); }
private:
IsolateWrapper client_isolate_wrapper_;
IsolateWrapper isolate_wrapper_;
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
v8::Local<v8::Context> context_;
v8::Context::Scope context_scope_;
};
class ParkingThread : public v8::base::Thread {
public:
explicit ParkingThread(const Options& options) : v8::base::Thread(options) {}
void ParkedJoin(const ParkedScope& scope) {
USE(scope);
Join();
}
private:
using base::Thread::Join;
};
class LockingThread final : public ParkingThread {
public:
LockingThread(v8::Isolate* shared_isolate, Handle<JSAtomicsMutex> mutex,
ParkingSemaphore* sema_ready,
LockingThread(Handle<JSAtomicsMutex> mutex, ParkingSemaphore* sema_ready,
ParkingSemaphore* sema_execute_start,
ParkingSemaphore* sema_execute_complete)
: ParkingThread(Options("LockingThread")),
shared_isolate_(shared_isolate),
mutex_(mutex),
sema_ready_(sema_ready),
sema_execute_start_(sema_execute_start),
sema_execute_complete_(sema_execute_complete) {}
void Run() override {
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_);
Isolate* isolate = client_isolate_wrapper.isolate();
IsolateWithContextWrapper isolate_wrapper;
Isolate* isolate = isolate_wrapper.isolate();
sema_ready_->Signal();
sema_execute_start_->ParkedWait(isolate->main_thread_local_isolate());
@ -81,7 +68,6 @@ class LockingThread final : public ParkingThread {
}
private:
v8::Isolate* shared_isolate_;
Handle<JSAtomicsMutex> mutex_;
ParkingSemaphore* sema_ready_;
ParkingSemaphore* sema_execute_start_;
@ -91,31 +77,24 @@ class LockingThread final : public ParkingThread {
} // namespace
TEST_F(JSAtomicsMutexTest, Contention) {
if (!IsJSSharedMemorySupported()) return;
FLAG_harmony_struct = true;
v8::Isolate* shared_isolate = v8_isolate();
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate);
constexpr int kThreads = 32;
Isolate* isolate = client_isolate_wrapper.isolate();
Isolate* i_main_isolate = i_isolate();
Handle<JSAtomicsMutex> contended_mutex =
isolate->factory()->NewJSAtomicsMutex();
i_main_isolate->factory()->NewJSAtomicsMutex();
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_start(0);
ParkingSemaphore sema_execute_complete(0);
std::vector<std::unique_ptr<LockingThread>> threads;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<LockingThread>(
shared_isolate, contended_mutex, &sema_ready, &sema_execute_start,
&sema_execute_complete);
auto thread = std::make_unique<LockingThread>(contended_mutex, &sema_ready,
&sema_execute_start,
&sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
LocalIsolate* local_isolate = isolate->main_thread_local_isolate();
LocalIsolate* local_isolate = i_main_isolate->main_thread_local_isolate();
for (int i = 0; i < kThreads; i++) {
sema_ready.ParkedWait(local_isolate);
}
@ -135,14 +114,12 @@ TEST_F(JSAtomicsMutexTest, Contention) {
namespace {
class WaitOnConditionThread final : public ParkingThread {
public:
WaitOnConditionThread(v8::Isolate* shared_isolate,
Handle<JSAtomicsMutex> mutex,
WaitOnConditionThread(Handle<JSAtomicsMutex> mutex,
Handle<JSAtomicsCondition> condition,
uint32_t* waiting_threads_count,
ParkingSemaphore* sema_ready,
ParkingSemaphore* sema_execute_complete)
: ParkingThread(Options("WaitOnConditionThread")),
shared_isolate_(shared_isolate),
mutex_(mutex),
condition_(condition),
waiting_threads_count_(waiting_threads_count),
@ -150,8 +127,8 @@ class WaitOnConditionThread final : public ParkingThread {
sema_execute_complete_(sema_execute_complete) {}
void Run() override {
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_);
Isolate* isolate = client_isolate_wrapper.isolate();
IsolateWithContextWrapper isolate_wrapper;
Isolate* isolate = isolate_wrapper.isolate();
sema_ready_->Signal();
@ -171,7 +148,6 @@ class WaitOnConditionThread final : public ParkingThread {
bool keep_waiting = true;
private:
v8::Isolate* shared_isolate_;
Handle<JSAtomicsMutex> mutex_;
Handle<JSAtomicsCondition> condition_;
uint32_t* waiting_threads_count_;
@ -181,19 +157,12 @@ class WaitOnConditionThread final : public ParkingThread {
} // namespace
TEST_F(JSAtomicsConditionTest, NotifyAll) {
if (!IsJSSharedMemorySupported()) return;
FLAG_harmony_struct = true;
v8::Isolate* shared_isolate = v8_isolate();
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate);
Isolate* client_isolate = client_isolate_wrapper.isolate();
constexpr uint32_t kThreads = 32;
Handle<JSAtomicsMutex> mutex = client_isolate->factory()->NewJSAtomicsMutex();
Isolate* i_main_isolate = i_isolate();
Handle<JSAtomicsMutex> mutex = i_main_isolate->factory()->NewJSAtomicsMutex();
Handle<JSAtomicsCondition> condition =
client_isolate->factory()->NewJSAtomicsCondition();
i_main_isolate->factory()->NewJSAtomicsCondition();
uint32_t waiting_threads_count = 0;
ParkingSemaphore sema_ready(0);
@ -201,20 +170,20 @@ TEST_F(JSAtomicsConditionTest, NotifyAll) {
std::vector<std::unique_ptr<WaitOnConditionThread>> threads;
for (uint32_t i = 0; i < kThreads; i++) {
auto thread = std::make_unique<WaitOnConditionThread>(
shared_isolate, mutex, condition, &waiting_threads_count, &sema_ready,
mutex, condition, &waiting_threads_count, &sema_ready,
&sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
LocalIsolate* local_isolate = client_isolate->main_thread_local_isolate();
LocalIsolate* local_isolate = i_main_isolate->main_thread_local_isolate();
for (uint32_t i = 0; i < kThreads; i++) {
sema_ready.ParkedWait(local_isolate);
}
// Wait until all threads are waiting on the condition.
for (;;) {
JSAtomicsMutex::LockGuard lock_guard(client_isolate, mutex);
JSAtomicsMutex::LockGuard lock_guard(i_main_isolate, mutex);
uint32_t count = waiting_threads_count;
if (count == kThreads) break;
}
@ -224,7 +193,7 @@ TEST_F(JSAtomicsConditionTest, NotifyAll) {
threads[i]->keep_waiting = false;
}
EXPECT_EQ(kThreads,
condition->Notify(client_isolate, JSAtomicsCondition::kAllWaiters));
condition->Notify(i_main_isolate, JSAtomicsCondition::kAllWaiters));
for (uint32_t i = 0; i < kThreads; i++) {
sema_execute_complete.ParkedWait(local_isolate);
@ -241,3 +210,5 @@ TEST_F(JSAtomicsConditionTest, NotifyAll) {
} // namespace internal
} // namespace v8
#endif // V8_CAN_CREATE_SHARED_HEAP

View File

@ -22,9 +22,7 @@ namespace {
CounterMap* kCurrentCounterMap = nullptr;
} // namespace
IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
IsolateSharedMode shared_mode,
v8::Isolate* shared_isolate_if_client)
IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
CHECK_NULL(kCurrentCounterMap);
@ -48,17 +46,7 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
};
}
if (shared_mode == kSharedIsolate) {
isolate_ = reinterpret_cast<v8::Isolate*>(
internal::Isolate::NewShared(create_params));
} else {
if (shared_mode == kClientIsolate) {
CHECK_NOT_NULL(shared_isolate_if_client);
create_params.experimental_attach_to_shared_isolate =
shared_isolate_if_client;
}
isolate_ = v8::Isolate::New(create_params);
}
isolate_ = v8::Isolate::New(create_params);
CHECK_NOT_NULL(isolate());
}

View File

@ -19,6 +19,7 @@
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
#include "src/handles/handles.h"
#include "src/heap/parked-scope.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/zone/accounting-allocator.h"
@ -44,7 +45,7 @@ class WithDefaultPlatformMixin : public TMixin {
v8::V8::Initialize();
}
~WithDefaultPlatformMixin() {
virtual ~WithDefaultPlatformMixin() {
CHECK_NOT_NULL(platform_.get());
v8::V8::Dispose();
v8::V8::DisposePlatform();
@ -56,21 +57,23 @@ class WithDefaultPlatformMixin : public TMixin {
std::unique_ptr<v8::Platform> platform_;
};
template <typename TMixin>
class WithJSSharedMemoryFeatureFlagsMixin : public TMixin {
public:
WithJSSharedMemoryFeatureFlagsMixin() { i::FLAG_harmony_struct = true; }
};
using CounterMap = std::map<std::string, int>;
enum CountersMode { kNoCounters, kEnableCounters };
enum IsolateSharedMode { kStandaloneIsolate, kSharedIsolate, kClientIsolate };
// RAII-like Isolate instance wrapper.
//
// It is the caller's responsibility to ensure that the shared Isolate outlives
// all client Isolates.
class IsolateWrapper final {
public:
IsolateWrapper(CountersMode counters_mode,
IsolateSharedMode shared_mode = kStandaloneIsolate,
v8::Isolate* shared_isolate_if_client = nullptr);
explicit IsolateWrapper(CountersMode counters_mode);
~IsolateWrapper();
IsolateWrapper(const IsolateWrapper&) = delete;
@ -90,7 +93,7 @@ class IsolateWrapper final {
template <typename TMixin, CountersMode kCountersMode = kNoCounters>
class WithIsolateMixin : public TMixin {
public:
WithIsolateMixin() : isolate_wrapper_(kCountersMode, kStandaloneIsolate) {}
WithIsolateMixin() : isolate_wrapper_(kCountersMode) {}
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
@ -98,37 +101,6 @@ class WithIsolateMixin : public TMixin {
v8::IsolateWrapper isolate_wrapper_;
};
// Warning: This is not a drop-in replacement for WithIsolateMixin!
//
// Users of WithMaybeSharedIsolateMixin, including TEST_F tests and classes that
// mix this class in, must explicit check IsJSSharedMemorySupported() before
// calling v8_isolate(). Creating shared Isolates is not supported on all build
// configurations.
template <typename TMixin, CountersMode kCountersMode = kNoCounters>
class WithMaybeSharedIsolateMixin : public TMixin {
public:
WithMaybeSharedIsolateMixin() {
if (IsJSSharedMemorySupported()) {
isolate_wrapper_.emplace(kCountersMode, kSharedIsolate);
}
}
bool IsJSSharedMemorySupported() const {
DCHECK_IMPLIES(
internal::ReadOnlyHeap::IsReadOnlySpaceShared(),
!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
return internal::ReadOnlyHeap::IsReadOnlySpaceShared();
}
v8::Isolate* v8_isolate() const {
DCHECK(IsJSSharedMemorySupported());
return isolate_wrapper_->isolate();
}
private:
base::Optional<v8::IsolateWrapper> isolate_wrapper_;
};
template <typename TMixin>
class WithIsolateScopeMixin : public TMixin {
public:
@ -440,10 +412,25 @@ using TestWithNativeContextAndZone = //
WithDefaultPlatformMixin< //
::testing::Test>>>>>>;
using TestWithSharedIsolate = //
WithMaybeSharedIsolateMixin< //
WithDefaultPlatformMixin<::testing::Test>, //
kNoCounters>;
using TestJSSharedMemoryWithPlatform = //
WithDefaultPlatformMixin< //
WithJSSharedMemoryFeatureFlagsMixin< //
::testing::Test>>;
// Using this will FATAL when !V8_CAN_CREATE_SHARED_HEAP_BOOL
using TestJSSharedMemoryWithIsolate = //
WithInternalIsolateMixin< //
WithIsolateScopeMixin< //
WithIsolateMixin< //
TestJSSharedMemoryWithPlatform>>>;
// Using this will FATAL when !V8_CAN_CREATE_SHARED_HEAP_BOOL
using TestJSSharedMemoryWithNativeContext = //
WithInternalIsolateMixin< //
WithContextMixin< //
WithIsolateScopeMixin< //
WithIsolateMixin< //
TestJSSharedMemoryWithPlatform>>>>;
class V8_NODISCARD SaveFlags {
public:
@ -546,6 +533,20 @@ Handle<FeedbackVector> NewFeedbackVector(Isolate* isolate, Spec* spec) {
return FeedbackVector::New(isolate, shared, closure_feedback_cell_array,
&is_compiled_scope);
}
class ParkingThread : public v8::base::Thread {
public:
explicit ParkingThread(const Options& options) : v8::base::Thread(options) {}
void ParkedJoin(const ParkedScope& scope) {
USE(scope);
Join();
}
private:
using v8::base::Thread::Join;
};
#ifdef V8_CC_GNU
#if V8_HOST_ARCH_X64