Revert "[heap] Attach to shared isolate after setting up main thread"

This reverts commit 929b83fb7b.

Reason for revert: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20UBSan/18725/overview

Original change's description:
> [heap] Attach to shared isolate after setting up main thread
>
> Attach to the shared isolate after the main thread was set up. Otherwise
> it could happen that a shared GC initiated from another isolate might
> see no threads are running and performs the safepoint operation in the
> middle of isolate deserialization.
>
> We use DisallowSafepoints to check that the isolate doesn't join a
> global safepoint before deserialization is complete. DisallowSafepoints
> used to prevent only invocations of Safepoint() but was updated to
> also prevent Park() and Unpark() invocations. Each state change could
> cause the thread to reach a safepoint, which would allow a shared GC
> to run.
>
> We now also DCHECK that every isolate has at least one local heap and
> that shared collections aren't started before deserialization is
> complete.
>
> Bug: v8:11708
> Change-Id: Iba3fb59dd951d5ee4fc9934158062287302fc279
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3221157
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Shu-yu Guo <syg@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#77424}

Bug: v8:11708
Change-Id: I0633150b6b40b297a335a39bf1a087ca93592e04
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3225937
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Owners-Override: Nico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77425}
This commit is contained in:
Nico Hartmann 2021-10-18 08:44:08 +00:00 committed by V8 LUCI CQ
parent 929b83fb7b
commit e784bf6180
9 changed files with 77 additions and 129 deletions

View File

@ -8481,7 +8481,7 @@ void Isolate::Initialize(Isolate* isolate,
}
if (params.experimental_attach_to_shared_isolate != nullptr) {
i_isolate->set_shared_isolate(reinterpret_cast<i::Isolate*>(
i_isolate->AttachToSharedIsolate(reinterpret_cast<i::Isolate*>(
params.experimental_attach_to_shared_isolate));
}

View File

@ -3125,7 +3125,6 @@ bool Isolate::LogObjectRelocation() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
DisallowHeapAllocation no_allocation;
tracing_cpu_profiler_.reset();
if (FLAG_stress_sampling_allocation_profiler > 0) {
@ -3211,7 +3210,9 @@ void Isolate::Deinit() {
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
DetachFromSharedIsolate();
if (shared_isolate_) {
DetachFromSharedIsolate();
}
heap_.TearDown();
@ -3658,6 +3659,12 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
date_cache_ = new DateCache();
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
if (OwnsStringTable()) {
string_table_ = std::make_shared<StringTable>(this);
} else {
DCHECK_NOT_NULL(shared_isolate_);
string_table_ = shared_isolate_->string_table_;
}
bigint_processor_ = bigint::Processor::New(new BigIntPlatform(this));
compiler_dispatcher_ = new LazyCompileDispatcher(
@ -3677,33 +3684,12 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
stack_guard()->InitThread(lock);
}
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
// The main thread LocalHeap needs to be set up when attaching to the shared
// isolate. Otherwise a global safepoint would find an isolate without
// LocalHeaps and not wait until this thread is ready for a GC.
AttachToSharedIsolate();
// We need to ensure that we do not let a shared GC run before this isolate is
// fully set up.
DisallowSafepoints no_shared_gc;
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp(main_thread_local_heap());
heap_.SetUp();
ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
heap_.SetUpSpaces();
if (OwnsStringTable()) {
string_table_ = std::make_shared<StringTable>(this);
} else {
// Only refer to shared string table after attaching to the shared isolate.
DCHECK_NOT_NULL(shared_isolate());
string_table_ = shared_isolate()->string_table_;
}
if (V8_SHORT_BUILTIN_CALLS_BOOL && FLAG_short_builtin_calls) {
// Check if the system has more than 4GB of physical memory by comparing the
// old space size with respective threshold value.
@ -3728,6 +3714,14 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
}
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
heap_.InitializeMainThreadLocalHeap(main_thread_local_heap());
if (shared_isolate_) heap()->InitSharedSpaces();
isolate_data_.external_reference_table()->Init(this);
#if V8_ENABLE_WEBASSEMBLY
@ -5169,30 +5163,19 @@ Address Isolate::store_to_stack_count_address(const char* function_name) {
return reinterpret_cast<Address>(&map[name].second);
}
void Isolate::AttachToSharedIsolate() {
DCHECK(!attached_to_shared_isolate_);
if (shared_isolate_) {
DCHECK(shared_isolate_->is_shared());
shared_isolate_->AppendAsClientIsolate(this);
}
#if DEBUG
attached_to_shared_isolate_ = true;
#endif // DEBUG
void Isolate::AttachToSharedIsolate(Isolate* shared) {
DCHECK(shared->is_shared());
DCHECK_NULL(shared_isolate_);
DCHECK(!heap_.HasBeenSetUp());
shared->AppendAsClientIsolate(this);
shared_isolate_ = shared;
}
void Isolate::DetachFromSharedIsolate() {
DCHECK(attached_to_shared_isolate_);
if (shared_isolate_) {
shared_isolate_->RemoveAsClientIsolate(this);
shared_isolate_ = nullptr;
}
#if DEBUG
attached_to_shared_isolate_ = false;
#endif // DEBUG
DCHECK_NOT_NULL(shared_isolate_);
shared_isolate_->RemoveAsClientIsolate(this);
shared_isolate_ = nullptr;
heap()->DeinitSharedSpaces();
}
void Isolate::AppendAsClientIsolate(Isolate* client) {

View File

@ -1822,18 +1822,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
};
bool is_shared() const { return is_shared_; }
Isolate* shared_isolate() const {
DCHECK(attached_to_shared_isolate_);
return shared_isolate_;
}
bool is_shared() { return is_shared_; }
Isolate* shared_isolate() { return shared_isolate_; }
void set_shared_isolate(Isolate* shared_isolate) {
DCHECK(shared_isolate->is_shared());
DCHECK_NULL(shared_isolate_);
DCHECK(!attached_to_shared_isolate_);
shared_isolate_ = shared_isolate;
}
void AttachToSharedIsolate(Isolate* shared);
void DetachFromSharedIsolate();
bool HasClientIsolates() const { return client_isolate_head_; }
@ -1969,12 +1962,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Returns the Exception sentinel.
Object ThrowInternal(Object exception, MessageLocation* location);
// These methods add/remove the isolate to/from the list of clients in the
// shared isolate. Isolates in the client list need to participate in a global
// safepoint.
void AttachToSharedIsolate();
void DetachFromSharedIsolate();
// Methods for appending and removing to/from client isolates list.
void AppendAsClientIsolate(Isolate* client);
void RemoveAsClientIsolate(Isolate* client);
@ -2270,13 +2257,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// isolates or when no shared isolate is used.
Isolate* shared_isolate_ = nullptr;
#if DEBUG
// Set to true once during isolate initialization right when attaching to the
// shared isolate. If there was no shared isolate given it will still be set
// to true. After this point invocations of shared_isolate() are valid.
bool attached_to_shared_isolate_ = false;
#endif // DEBUG
// A shared isolate will use these two fields to track all its client
// isolates.
base::Mutex client_isolate_mutex_;

View File

@ -2240,7 +2240,6 @@ size_t Heap::PerformGarbageCollection(
}
void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
CHECK(deserialization_complete());
DCHECK(!IsShared());
DCHECK_NOT_NULL(isolate()->shared_isolate());
@ -2267,7 +2266,6 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
: IsolateSafepoint::StopMainThread::kYes;
client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
DCHECK(client_heap->deserialization_complete());
client_heap->shared_old_allocator_->FreeLinearAllocationArea();
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
@ -5505,10 +5503,7 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
}
void Heap::SetUp(LocalHeap* main_thread_local_heap) {
DCHECK_NULL(main_thread_local_heap_);
main_thread_local_heap_ = main_thread_local_heap;
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
#endif
@ -5701,18 +5696,11 @@ void Heap::SetUpSpaces() {
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
}
if (isolate()->shared_isolate()) {
shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
shared_old_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
shared_map_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
}
main_thread_local_heap()->SetUpAllocators();
void Heap::InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap) {
DCHECK_NULL(main_thread_local_heap_);
main_thread_local_heap_ = main_thread_local_heap;
}
void Heap::InitializeHashSeed() {
@ -5995,12 +5983,6 @@ void Heap::TearDown() {
allocation_sites_to_pretenure_.reset();
shared_old_space_ = nullptr;
shared_old_allocator_.reset();
shared_map_space_ = nullptr;
shared_map_allocator_.reset();
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@ -6022,6 +6004,24 @@ void Heap::TearDown() {
memory_allocator_.reset();
}
void Heap::InitSharedSpaces() {
shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
shared_old_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
shared_map_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
}
void Heap::DeinitSharedSpaces() {
shared_old_space_ = nullptr;
shared_old_allocator_.reset();
shared_map_space_ = nullptr;
shared_map_allocator_.reset();
}
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
GCType gc_type, void* data) {
DCHECK_NOT_NULL(callback);

View File

@ -840,7 +840,7 @@ class Heap {
void ConfigureHeapDefault();
// Prepares the heap, setting up for deserialization.
void SetUp(LocalHeap* main_thread_local_heap);
void SetUp();
// Sets read-only heap and space.
void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
@ -872,6 +872,12 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp() const;
// Initialializes shared spaces.
void InitSharedSpaces();
// Removes shared spaces again.
void DeinitSharedSpaces();
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================

View File

@ -13,7 +13,6 @@
#include "src/execution/isolate.h"
#include "src/handles/local-handles.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
@ -53,7 +52,8 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
next_(nullptr),
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)),
marking_barrier_(new MarkingBarrier(this)) {
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
@ -69,8 +69,6 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
}
DCHECK_NULL(current_local_heap);
if (!is_main_thread()) current_local_heap = this;
SetUpAllocators();
}
LocalHeap::~LocalHeap() {
@ -78,7 +76,7 @@ LocalHeap::~LocalHeap() {
EnsureParkedBeforeDestruction();
heap_->safepoint()->RemoveLocalHeap(this, [this] {
old_space_allocator_->FreeLinearAllocationArea();
old_space_allocator_.FreeLinearAllocationArea();
if (!is_main_thread()) {
marking_barrier_->Publish();
@ -94,19 +92,6 @@ LocalHeap::~LocalHeap() {
DCHECK(gc_epilogue_callbacks_.empty());
}
void LocalHeap::SetUpAllocators() {
DCHECK_NULL(old_space_allocator_);
if (!heap_->old_space()) {
DCHECK(is_main_thread());
DCHECK(!heap_->deserialization_complete());
return;
}
old_space_allocator_ =
std::make_unique<ConcurrentAllocator>(this, heap_->old_space());
}
void LocalHeap::EnsurePersistentHandles() {
if (!persistent_handles_) {
persistent_handles_.reset(
@ -240,19 +225,19 @@ void LocalHeap::SafepointSlowPath() {
}
void LocalHeap::FreeLinearAllocationArea() {
old_space_allocator_->FreeLinearAllocationArea();
old_space_allocator_.FreeLinearAllocationArea();
}
void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_->MakeLinearAllocationAreaIterable();
old_space_allocator_.MakeLinearAllocationAreaIterable();
}
void LocalHeap::MarkLinearAllocationAreaBlack() {
old_space_allocator_->MarkLinearAllocationAreaBlack();
old_space_allocator_.MarkLinearAllocationAreaBlack();
}
void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_->UnmarkLinearAllocationArea();
old_space_allocator_.UnmarkLinearAllocationArea();
}
bool LocalHeap::TryPerformCollection() {

View File

@ -93,9 +93,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
Heap* AsHeap() { return heap(); }
MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
ConcurrentAllocator* old_space_allocator() {
return old_space_allocator_.get();
}
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
// Mark/Unmark linear allocation areas black. Used for black allocation.
void MarkLinearAllocationAreaBlack();
@ -249,7 +247,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationAlignment alignment);
void Park() {
DCHECK(AllowSafepoints::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = ThreadState::Running();
if (!state_.CompareExchangeWeak(expected, ThreadState::Parked())) {
ParkSlowPath();
@ -257,7 +255,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
}
void Unpark() {
DCHECK(AllowSafepoints::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
ThreadState expected = ThreadState::Parked();
if (!state_.CompareExchangeWeak(expected, ThreadState::Running())) {
UnparkSlowPath();
@ -273,8 +271,6 @@ class V8_EXPORT_PRIVATE LocalHeap {
void InvokeGCEpilogueCallbacksInSafepoint();
void SetUpAllocators();
Heap* heap_;
bool is_main_thread_;
@ -292,7 +288,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::vector<std::pair<GCEpilogueCallback*, void*>> gc_epilogue_callbacks_;
std::unique_ptr<ConcurrentAllocator> old_space_allocator_;
ConcurrentAllocator old_space_allocator_;
friend class CollectionBarrier;
friend class ConcurrentAllocator;

View File

@ -39,9 +39,6 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
int running = 0;
// There needs to be at least one LocalHeap for the main thread.
DCHECK_NOT_NULL(local_heaps_head_);
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&

View File

@ -159,14 +159,15 @@ class TestSerializer {
SnapshotData shared_space_snapshot(blobs.shared_space);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
CHECK_IMPLIES(is_shared, !shared_isolate);
if (is_shared) CHECK_NULL(shared_isolate);
v8::Isolate* v8_isolate =
NewIsolate(kEnableSerializer, kGenerateHeap, is_shared);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
if (shared_isolate) {
CHECK(!is_shared);
isolate->set_shared_isolate(reinterpret_cast<Isolate*>(shared_isolate));
isolate->AttachToSharedIsolate(
reinterpret_cast<Isolate*>(shared_isolate));
}
isolate->Init(&startup_snapshot, &read_only_snapshot,
&shared_space_snapshot, false);