Revert "[heap] Support multiple clients in shared GC"
This reverts commit 90a9d6cb13
.
Reason for revert: Seems to make some test to fail flakily. Revert for now until this is fixed.
Original change's description:
> [heap] Support multiple clients in shared GC
>
> Add support for safepointing multiple isolates as described in the
> design doc (link is below). A safepoint across multiple isolates is
> considered a global safepoint to distinguish it from regular safepoints.
>
> The basic idea behind the implementation is that we reach a
> safepoint for each client. What's new is that now also main threads
> need to participate in the safepointing protocol and need to give up
> control in time. The slow paths of Park(), Unpark() and Safepoint() on
> the main thread need to be adjusted for this reason as well.
>
> This CL introduces GlobalSafepoint and GlobalSafepointScope to mirror
> IsolateSafepoint and IsolateSafepointScope.
>
> This CL adds the type IgnoreLocalGCRequests, it is used to prevent
> Park() and Unpark() from honoring the request from background threads
> to perform a local GC. This is used heap-internally to not have GCs
> (or even nested GCs) in certain locations. E.g. when initiating a
> safepoint to perform a GC we don't want a "recursive" GC to occur.
>
> Design doc: https://docs.google.com/document/d/1y6C9zAACEr0sBYMIYk3YpXosnkF3Ak4CEuWJu1-3zXs/edit?usp=sharing
>
> Bug: v8:11708
> Change-Id: I5aca8f5f24873279271a53be3bb093fc92a1a1eb
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3009224
> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#77812}
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: v8:11708
Change-Id: I85fbf896c59492fc571b3bfaa7f9e3ea8a883260
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3275552
Auto-Submit: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77845}
This commit is contained in:
parent
c26cdb40b2
commit
2f98fb28d6
@ -56,9 +56,6 @@
|
||||
#include "src/handles/global-handles-inl.h"
|
||||
#include "src/handles/persistent-handles.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/local-heap.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
#include "src/ic/stub-cache.h"
|
||||
@ -3209,18 +3206,7 @@ void Isolate::Deinit() {
|
||||
// This stops cancelable tasks (i.e. concurrent marking tasks).
|
||||
// Stop concurrent tasks before destroying resources since they might still
|
||||
// use those.
|
||||
{
|
||||
IgnoreLocalGCRequests ignore_gc_requests(heap());
|
||||
ParkedScope parked_scope(main_thread_local_heap());
|
||||
cancelable_task_manager()->CancelAndWait();
|
||||
}
|
||||
|
||||
{
|
||||
// This isolate might have to park for a shared GC initiated by another
|
||||
// client isolate before it can actually detach from the shared isolate.
|
||||
AllowGarbageCollection allow_shared_gc;
|
||||
DetachFromSharedIsolate();
|
||||
}
|
||||
cancelable_task_manager()->CancelAndWait();
|
||||
|
||||
ReleaseSharedPtrs();
|
||||
|
||||
@ -3248,6 +3234,10 @@ void Isolate::Deinit() {
|
||||
// updated anymore.
|
||||
DumpAndResetStats();
|
||||
|
||||
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
|
||||
|
||||
DetachFromSharedIsolate();
|
||||
|
||||
heap_.TearDown();
|
||||
|
||||
main_thread_local_isolate_.reset();
|
||||
@ -3728,11 +3718,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
|
||||
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
|
||||
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
|
||||
|
||||
{
|
||||
IgnoreLocalGCRequests ignore_gc_requests(heap());
|
||||
main_thread_local_heap()->Unpark();
|
||||
}
|
||||
main_thread_local_heap()->Unpark();
|
||||
|
||||
// The main thread LocalHeap needs to be set up when attaching to the shared
|
||||
// isolate. Otherwise a global safepoint would find an isolate without
|
||||
|
@ -22,17 +22,14 @@ bool CollectionBarrier::WasGCRequested() {
|
||||
return collection_requested_.load();
|
||||
}
|
||||
|
||||
bool CollectionBarrier::TryRequestGC() {
|
||||
void CollectionBarrier::RequestGC() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (shutdown_requested_) return false;
|
||||
bool was_already_requested = collection_requested_.exchange(true);
|
||||
|
||||
if (!was_already_requested) {
|
||||
CHECK(!timer_.IsStarted());
|
||||
timer_.Start();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class BackgroundCollectionInterruptTask : public CancelableTask {
|
||||
@ -62,19 +59,8 @@ void CollectionBarrier::NotifyShutdownRequested() {
|
||||
|
||||
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(!timer_.IsStarted());
|
||||
collection_requested_.store(false);
|
||||
block_for_collection_ = false;
|
||||
collection_performed_ = true;
|
||||
cv_wakeup_.NotifyAll();
|
||||
}
|
||||
|
||||
void CollectionBarrier::CancelCollectionAndResumeThreads() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (timer_.IsStarted()) timer_.Stop();
|
||||
collection_requested_.store(false);
|
||||
block_for_collection_ = false;
|
||||
collection_performed_ = false;
|
||||
cv_wakeup_.NotifyAll();
|
||||
}
|
||||
|
||||
@ -86,10 +72,6 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
|
||||
// set before the next GC.
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (shutdown_requested_) return false;
|
||||
|
||||
// Collection was cancelled by the main thread.
|
||||
if (!collection_requested_.load()) return false;
|
||||
|
||||
first_thread = !block_for_collection_;
|
||||
block_for_collection_ = true;
|
||||
CHECK(timer_.IsStarted());
|
||||
@ -106,8 +88,7 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
|
||||
cv_wakeup_.Wait(&mutex_);
|
||||
}
|
||||
|
||||
// Collection may have been cancelled while blocking for it.
|
||||
return collection_performed_;
|
||||
return true;
|
||||
}
|
||||
|
||||
void CollectionBarrier::ActivateStackGuardAndPostTask() {
|
||||
|
@ -27,10 +27,8 @@ class CollectionBarrier {
|
||||
// Returns true when collection was requested.
|
||||
bool WasGCRequested();
|
||||
|
||||
// Requests a GC from the main thread. Returns whether GC was successfully
|
||||
// requested. Requesting a GC can fail when isolate shutdown was already
|
||||
// initiated.
|
||||
bool TryRequestGC();
|
||||
// Requests a GC from the main thread.
|
||||
void RequestGC();
|
||||
|
||||
// Resumes all threads waiting for GC when tear down starts.
|
||||
void NotifyShutdownRequested();
|
||||
@ -41,11 +39,7 @@ class CollectionBarrier {
|
||||
// Resumes threads waiting for collection.
|
||||
void ResumeThreadsAwaitingCollection();
|
||||
|
||||
// Cancels collection if one was requested and resumes threads waiting for GC.
|
||||
void CancelCollectionAndResumeThreads();
|
||||
|
||||
// This is the method use by background threads to request and wait for GC.
|
||||
// Returns whether a GC was performed.
|
||||
bool AwaitCollectionBackground(LocalHeap* local_heap);
|
||||
|
||||
private:
|
||||
@ -56,21 +50,8 @@ class CollectionBarrier {
|
||||
base::Mutex mutex_;
|
||||
base::ConditionVariable cv_wakeup_;
|
||||
base::ElapsedTimer timer_;
|
||||
|
||||
// Flag that main thread checks whether a GC was requested from the background
|
||||
// thread.
|
||||
std::atomic<bool> collection_requested_{false};
|
||||
|
||||
// This flag is used to detect whether to block for the GC. Only set if the
|
||||
// main thread was actually running and is unset when GC resumes background
|
||||
// threads.
|
||||
bool block_for_collection_ = false;
|
||||
|
||||
// Set to true when a GC was performed, false in case it was canceled because
|
||||
// the main thread parked itself without running the GC.
|
||||
bool collection_performed_ = false;
|
||||
|
||||
// Will be set as soon as Isolate starts tear down.
|
||||
bool shutdown_requested_ = false;
|
||||
};
|
||||
|
||||
|
@ -124,7 +124,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
|
||||
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
|
||||
if (!result) return false;
|
||||
|
||||
if (IsBlackAllocationEnabled()) {
|
||||
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
|
||||
Address top = result->first;
|
||||
Address limit = top + result->second;
|
||||
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
|
||||
@ -149,19 +149,13 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
|
||||
|
||||
HeapObject object = HeapObject::FromAddress(result->first);
|
||||
|
||||
if (IsBlackAllocationEnabled()) {
|
||||
owning_heap()->incremental_marking()->MarkBlackBackground(object,
|
||||
object_size);
|
||||
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
|
||||
local_heap_->heap()->incremental_marking()->MarkBlackBackground(
|
||||
object, object_size);
|
||||
}
|
||||
|
||||
return AllocationResult(object);
|
||||
}
|
||||
|
||||
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
|
||||
return owning_heap()->incremental_marking()->black_allocation();
|
||||
}
|
||||
|
||||
Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -63,12 +63,6 @@ class ConcurrentAllocator {
|
||||
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
|
||||
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
|
||||
|
||||
bool IsBlackAllocationEnabled() const;
|
||||
|
||||
// Returns the Heap of space_. This might differ from the LocalHeap's Heap for
|
||||
// shared spaces.
|
||||
Heap* owning_heap() const;
|
||||
|
||||
LocalHeap* const local_heap_;
|
||||
PagedSpace* const space_;
|
||||
LocalAllocationBuffer lab_;
|
||||
|
@ -563,12 +563,11 @@ void GCTracer::Print() const {
|
||||
Output(
|
||||
"[%d:%p] "
|
||||
"%8.0f ms: "
|
||||
"%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
|
||||
"%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
|
||||
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
|
||||
base::OS::GetCurrentProcessId(),
|
||||
reinterpret_cast<void*>(heap_->isolate()),
|
||||
heap_->isolate()->time_millis_since_init(),
|
||||
heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
|
||||
heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
|
||||
current_.reduce_memory ? " (reduce)" : "",
|
||||
static_cast<double>(current_.start_object_size) / MB,
|
||||
static_cast<double>(current_.start_memory_size) / MB,
|
||||
|
@ -867,16 +867,6 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
|
||||
}
|
||||
}
|
||||
|
||||
IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
|
||||
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
|
||||
heap_->ignore_local_gc_requests_depth_++;
|
||||
}
|
||||
|
||||
IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
|
||||
DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
|
||||
heap_->ignore_local_gc_requests_depth_--;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -67,7 +67,6 @@
|
||||
#include "src/heap/objects-visiting-inl.h"
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/paged-spaces-inl.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
@ -1924,7 +1923,6 @@ void Heap::StartIncrementalMarking(int gc_flags,
|
||||
CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
|
||||
}
|
||||
|
||||
IgnoreLocalGCRequests ignore_gc_requests(this);
|
||||
SafepointScope safepoint(this);
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -2158,13 +2156,7 @@ size_t Heap::PerformGarbageCollection(
|
||||
|
||||
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
|
||||
|
||||
base::Optional<SafepointScope> safepoint_scope;
|
||||
|
||||
{
|
||||
AllowGarbageCollection allow_shared_gc;
|
||||
IgnoreLocalGCRequests ignore_gc_requests(this);
|
||||
safepoint_scope.emplace(this);
|
||||
}
|
||||
SafepointScope safepoint_scope(this);
|
||||
|
||||
collection_barrier_->StopTimeToCollectionTimer();
|
||||
|
||||
@ -5482,16 +5474,14 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
|
||||
isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
||||
if (IsSharedAllocationType(allocation)) {
|
||||
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
|
||||
|
||||
AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
} else {
|
||||
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
|
||||
}
|
||||
|
||||
{
|
||||
AlwaysAllocateScope scope(this);
|
||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||
}
|
||||
|
||||
if (alloc.To(&result)) {
|
||||
DCHECK(result != ReadOnlyRoots(this).exception());
|
||||
return result;
|
||||
@ -5885,21 +5875,11 @@ void Heap::StartTearDown() {
|
||||
// threads finish.
|
||||
collection_barrier_->NotifyShutdownRequested();
|
||||
|
||||
// Main thread isn't going to allocate anymore.
|
||||
main_thread_local_heap()->FreeLinearAllocationArea();
|
||||
|
||||
if (isolate()->shared_isolate()) {
|
||||
// Free LABs before detaching from the shared isolate.
|
||||
shared_old_allocator_->FreeLinearAllocationArea();
|
||||
shared_map_allocator_->FreeLinearAllocationArea();
|
||||
}
|
||||
|
||||
#ifdef VERIFY_HEAP
|
||||
// {StartTearDown} is called fairly early during Isolate teardown, so it's
|
||||
// a good time to run heap verification (if requested), before starting to
|
||||
// tear down parts of the Isolate.
|
||||
if (FLAG_verify_heap) {
|
||||
IgnoreLocalGCRequests ignore_gc_requests(this);
|
||||
SafepointScope scope(this);
|
||||
Verify();
|
||||
}
|
||||
|
@ -687,10 +687,6 @@ class Heap {
|
||||
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
|
||||
bool force_oom() const { return force_oom_; }
|
||||
|
||||
bool ignore_local_gc_requests() const {
|
||||
return ignore_local_gc_requests_depth_ > 0;
|
||||
}
|
||||
|
||||
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
|
||||
|
||||
bool IsGCWithoutStack() const;
|
||||
@ -2450,8 +2446,6 @@ class Heap {
|
||||
|
||||
std::unique_ptr<CollectionBarrier> collection_barrier_;
|
||||
|
||||
int ignore_local_gc_requests_depth_ = 0;
|
||||
|
||||
int gc_callbacks_depth_ = 0;
|
||||
|
||||
bool deserialization_complete_ = false;
|
||||
@ -2508,7 +2502,6 @@ class Heap {
|
||||
friend class GCTracer;
|
||||
friend class HeapObjectIterator;
|
||||
friend class ScavengeTaskObserver;
|
||||
friend class IgnoreLocalGCRequests;
|
||||
friend class IncrementalMarking;
|
||||
friend class IncrementalMarkingJob;
|
||||
friend class LargeObjectSpace;
|
||||
@ -2667,15 +2660,6 @@ class V8_NODISCARD CodePageMemoryModificationScope {
|
||||
DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
|
||||
};
|
||||
|
||||
class V8_NODISCARD IgnoreLocalGCRequests {
|
||||
public:
|
||||
explicit inline IgnoreLocalGCRequests(Heap* heap);
|
||||
inline ~IgnoreLocalGCRequests();
|
||||
|
||||
private:
|
||||
Heap* heap_;
|
||||
};
|
||||
|
||||
// Visitor class to verify interior pointers in spaces that do not contain
|
||||
// or care about intergenerational references. All heap object pointers have to
|
||||
// point into the heap to a location that has a map pointer at its first word.
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <memory>
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/optional.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
@ -18,7 +17,6 @@
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/heap-write-barrier.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/local-heap-inl.h"
|
||||
#include "src/heap/marking-barrier.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
@ -175,42 +173,13 @@ void LocalHeap::ParkSlowPath() {
|
||||
DCHECK(current_state.IsRunning());
|
||||
|
||||
if (is_main_thread()) {
|
||||
DCHECK(current_state.IsSafepointRequested() ||
|
||||
current_state.IsCollectionRequested());
|
||||
|
||||
if (current_state.IsSafepointRequested()) {
|
||||
ThreadState old_state = state_.SetParked();
|
||||
heap_->safepoint()->NotifyPark();
|
||||
if (old_state.IsCollectionRequested())
|
||||
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
|
||||
return;
|
||||
}
|
||||
|
||||
if (current_state.IsCollectionRequested()) {
|
||||
if (!heap()->ignore_local_gc_requests()) {
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
continue;
|
||||
}
|
||||
|
||||
DCHECK(!current_state.IsSafepointRequested());
|
||||
|
||||
if (state_.CompareExchangeStrong(current_state,
|
||||
current_state.SetParked())) {
|
||||
heap_->collection_barrier_->CancelCollectionAndResumeThreads();
|
||||
return;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
DCHECK(current_state.IsCollectionRequested());
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
} else {
|
||||
DCHECK(current_state.IsSafepointRequested());
|
||||
DCHECK(!current_state.IsCollectionRequested());
|
||||
|
||||
ThreadState old_state = state_.SetParked();
|
||||
CHECK(old_state.IsRunning());
|
||||
CHECK(old_state.IsSafepointRequested());
|
||||
CHECK(!old_state.IsCollectionRequested());
|
||||
|
||||
CHECK(state_.CompareExchangeStrong(current_state,
|
||||
current_state.SetParked()));
|
||||
heap_->safepoint()->NotifyPark();
|
||||
return;
|
||||
}
|
||||
@ -227,107 +196,54 @@ void LocalHeap::UnparkSlowPath() {
|
||||
DCHECK(current_state.IsParked());
|
||||
|
||||
if (is_main_thread()) {
|
||||
DCHECK(current_state.IsSafepointRequested() ||
|
||||
current_state.IsCollectionRequested());
|
||||
|
||||
if (current_state.IsSafepointRequested()) {
|
||||
SleepInUnpark();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current_state.IsCollectionRequested()) {
|
||||
DCHECK(!current_state.IsSafepointRequested());
|
||||
|
||||
if (!state_.CompareExchangeStrong(current_state,
|
||||
current_state.SetRunning()))
|
||||
continue;
|
||||
|
||||
if (!heap()->ignore_local_gc_requests()) {
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
DCHECK(current_state.IsCollectionRequested());
|
||||
CHECK(state_.CompareExchangeStrong(current_state,
|
||||
current_state.SetRunning()));
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
return;
|
||||
} else {
|
||||
DCHECK(current_state.IsSafepointRequested());
|
||||
DCHECK(!current_state.IsCollectionRequested());
|
||||
|
||||
SleepInUnpark();
|
||||
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
|
||||
ThreadKind::kBackground);
|
||||
heap_->safepoint()->WaitInUnpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LocalHeap::SleepInUnpark() {
|
||||
GCTracer::Scope::ScopeId scope_id;
|
||||
ThreadKind thread_kind;
|
||||
|
||||
if (is_main_thread()) {
|
||||
scope_id = GCTracer::Scope::UNPARK;
|
||||
thread_kind = ThreadKind::kMain;
|
||||
} else {
|
||||
scope_id = GCTracer::Scope::BACKGROUND_UNPARK;
|
||||
thread_kind = ThreadKind::kBackground;
|
||||
}
|
||||
|
||||
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
|
||||
heap_->safepoint()->WaitInUnpark();
|
||||
}
|
||||
|
||||
void LocalHeap::EnsureParkedBeforeDestruction() {
|
||||
DCHECK_IMPLIES(!is_main_thread(), IsParked());
|
||||
}
|
||||
|
||||
void LocalHeap::SafepointSlowPath() {
|
||||
#ifdef DEBUG
|
||||
ThreadState current_state = state_.load_relaxed();
|
||||
DCHECK(current_state.IsRunning());
|
||||
#endif
|
||||
|
||||
if (is_main_thread()) {
|
||||
DCHECK(current_state.IsSafepointRequested() ||
|
||||
current_state.IsCollectionRequested());
|
||||
|
||||
if (current_state.IsSafepointRequested()) {
|
||||
SleepInSafepoint();
|
||||
}
|
||||
|
||||
if (current_state.IsCollectionRequested()) {
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
}
|
||||
DCHECK(current_state.IsCollectionRequested());
|
||||
heap_->CollectGarbageForBackground(this);
|
||||
} else {
|
||||
DCHECK(current_state.IsSafepointRequested());
|
||||
DCHECK(!current_state.IsCollectionRequested());
|
||||
|
||||
SleepInSafepoint();
|
||||
TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
|
||||
ThreadKind::kBackground);
|
||||
|
||||
// Parking the running thread here is an optimization. We do not need to
|
||||
// wake this thread up to reach the next safepoint.
|
||||
ThreadState old_state = state_.SetParked();
|
||||
CHECK(old_state.IsRunning());
|
||||
CHECK(old_state.IsSafepointRequested());
|
||||
CHECK(!old_state.IsCollectionRequested());
|
||||
|
||||
heap_->safepoint()->WaitInSafepoint();
|
||||
|
||||
Unpark();
|
||||
}
|
||||
}
|
||||
|
||||
void LocalHeap::SleepInSafepoint() {
|
||||
GCTracer::Scope::ScopeId scope_id;
|
||||
ThreadKind thread_kind;
|
||||
|
||||
if (is_main_thread()) {
|
||||
scope_id = GCTracer::Scope::SAFEPOINT;
|
||||
thread_kind = ThreadKind::kMain;
|
||||
} else {
|
||||
scope_id = GCTracer::Scope::BACKGROUND_SAFEPOINT;
|
||||
thread_kind = ThreadKind::kBackground;
|
||||
}
|
||||
|
||||
TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
|
||||
|
||||
// Parking the running thread here is an optimization. We do not need to
|
||||
// wake this thread up to reach the next safepoint.
|
||||
ThreadState old_state = state_.SetParked();
|
||||
CHECK(old_state.IsRunning());
|
||||
CHECK(old_state.IsSafepointRequested());
|
||||
CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread());
|
||||
|
||||
heap_->safepoint()->WaitInSafepoint();
|
||||
|
||||
base::Optional<IgnoreLocalGCRequests> ignore_gc_requests;
|
||||
if (is_main_thread()) ignore_gc_requests.emplace(heap());
|
||||
Unpark();
|
||||
}
|
||||
|
||||
void LocalHeap::FreeLinearAllocationArea() {
|
||||
old_space_allocator_->FreeLinearAllocationArea();
|
||||
code_space_allocator_->FreeLinearAllocationArea();
|
||||
@ -354,7 +270,7 @@ bool LocalHeap::TryPerformCollection() {
|
||||
return true;
|
||||
} else {
|
||||
DCHECK(IsRunning());
|
||||
if (!heap_->collection_barrier_->TryRequestGC()) return false;
|
||||
heap_->collection_barrier_->RequestGC();
|
||||
|
||||
LocalHeap* main_thread = heap_->main_thread_local_heap();
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/condition-variable.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
@ -279,8 +278,6 @@ class V8_EXPORT_PRIVATE LocalHeap {
|
||||
void UnparkSlowPath();
|
||||
void EnsureParkedBeforeDestruction();
|
||||
void SafepointSlowPath();
|
||||
void SleepInSafepoint();
|
||||
void SleepInUnpark();
|
||||
|
||||
void EnsurePersistentHandles();
|
||||
|
||||
@ -311,12 +308,10 @@ class V8_EXPORT_PRIVATE LocalHeap {
|
||||
|
||||
friend class CollectionBarrier;
|
||||
friend class ConcurrentAllocator;
|
||||
friend class GlobalSafepoint;
|
||||
friend class IsolateSafepoint;
|
||||
friend class Heap;
|
||||
friend class Isolate;
|
||||
friend class ParkedScope;
|
||||
friend class SafepointScope;
|
||||
friend class UnparkedScope;
|
||||
};
|
||||
|
||||
|
@ -44,8 +44,6 @@ class V8_NODISCARD UnparkedScope {
|
||||
LocalHeap* const local_heap_;
|
||||
};
|
||||
|
||||
// Scope that automatically parks the thread while blocking on the given
|
||||
// base::Mutex.
|
||||
class V8_NODISCARD ParkedMutexGuard {
|
||||
public:
|
||||
explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex)
|
||||
|
@ -7,9 +7,6 @@
|
||||
#include <atomic>
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/optional.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/handles/handles.h"
|
||||
#include "src/handles/local-handles.h"
|
||||
#include "src/handles/persistent-handles.h"
|
||||
@ -19,7 +16,6 @@
|
||||
#include "src/heap/local-heap.h"
|
||||
#include "src/heap/parked-scope.h"
|
||||
#include "src/logging/counters-scopes.h"
|
||||
#include "src/objects/objects.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -27,46 +23,22 @@ namespace internal {
|
||||
IsolateSafepoint::IsolateSafepoint(Heap* heap)
|
||||
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
|
||||
|
||||
void IsolateSafepoint::EnterLocalSafepointScope() {
|
||||
void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
|
||||
// Safepoints need to be initiated on the main thread.
|
||||
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
|
||||
DCHECK_NULL(LocalHeap::Current());
|
||||
|
||||
LockMutex(heap_->isolate()->main_thread_local_heap());
|
||||
if (++active_safepoint_scopes_ > 1) return;
|
||||
|
||||
TimedHistogramScope timer(
|
||||
heap_->isolate()->counters()->gc_time_to_safepoint());
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
|
||||
|
||||
barrier_.Arm();
|
||||
size_t running = SetSafepointRequestedFlags(IncludeMainThread::kNo);
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||
}
|
||||
|
||||
void IsolateSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
{
|
||||
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
|
||||
LockMutex(initiator->main_thread_local_heap());
|
||||
}
|
||||
CHECK_EQ(active_safepoint_scopes_.exchange(1), 0);
|
||||
local_heaps_mutex_.Lock();
|
||||
|
||||
barrier_.Arm();
|
||||
|
||||
size_t running =
|
||||
SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||
}
|
||||
|
||||
IsolateSafepoint::IncludeMainThread
|
||||
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
|
||||
const bool is_initiator = heap_->isolate() == initiator;
|
||||
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
|
||||
}
|
||||
|
||||
size_t IsolateSafepoint::SetSafepointRequestedFlags(
|
||||
IncludeMainThread include_main_thread) {
|
||||
size_t running = 0;
|
||||
int running = 0;
|
||||
|
||||
// There needs to be at least one LocalHeap for the main thread.
|
||||
DCHECK_NOT_NULL(local_heaps_head_);
|
||||
@ -74,7 +46,7 @@ size_t IsolateSafepoint::SetSafepointRequestedFlags(
|
||||
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
||||
local_heap = local_heap->next_) {
|
||||
if (local_heap->is_main_thread() &&
|
||||
include_main_thread == IncludeMainThread::kNo) {
|
||||
stop_main_thread == StopMainThread::kNo) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -87,42 +59,21 @@ size_t IsolateSafepoint::SetSafepointRequestedFlags(
|
||||
CHECK(!old_state.IsSafepointRequested());
|
||||
}
|
||||
|
||||
return running;
|
||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||
}
|
||||
|
||||
void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
|
||||
if (!local_heaps_mutex_.TryLock()) {
|
||||
ParkedScope parked_scope(local_heap);
|
||||
local_heaps_mutex_.Lock();
|
||||
}
|
||||
}
|
||||
void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
|
||||
// Safepoints need to be initiated on the main thread.
|
||||
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
|
||||
DCHECK_NULL(LocalHeap::Current());
|
||||
|
||||
void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
|
||||
local_heaps_mutex_.AssertHeld();
|
||||
CHECK_EQ(active_safepoint_scopes_.exchange(0), 1);
|
||||
ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
|
||||
barrier_.Disarm();
|
||||
local_heaps_mutex_.Unlock();
|
||||
}
|
||||
|
||||
void IsolateSafepoint::LeaveLocalSafepointScope() {
|
||||
local_heaps_mutex_.AssertHeld();
|
||||
DCHECK_GT(active_safepoint_scopes_, 0);
|
||||
if (--active_safepoint_scopes_ > 0) return;
|
||||
|
||||
if (--active_safepoint_scopes_ == 0) {
|
||||
ClearSafepointRequestedFlags(IncludeMainThread::kNo);
|
||||
barrier_.Disarm();
|
||||
}
|
||||
|
||||
local_heaps_mutex_.Unlock();
|
||||
}
|
||||
|
||||
void IsolateSafepoint::ClearSafepointRequestedFlags(
|
||||
IncludeMainThread include_main_thread) {
|
||||
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
||||
local_heap = local_heap->next_) {
|
||||
if (local_heap->is_main_thread() &&
|
||||
include_main_thread == IncludeMainThread::kNo) {
|
||||
stop_main_thread == StopMainThread::kNo) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -134,6 +85,10 @@ void IsolateSafepoint::ClearSafepointRequestedFlags(
|
||||
CHECK_IMPLIES(old_state.IsCollectionRequested(),
|
||||
local_heap->is_main_thread());
|
||||
}
|
||||
|
||||
barrier_.Disarm();
|
||||
|
||||
local_heaps_mutex_.Unlock();
|
||||
}
|
||||
|
||||
void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
|
||||
@ -158,7 +113,7 @@ void IsolateSafepoint::Barrier::Disarm() {
|
||||
}
|
||||
|
||||
void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(
|
||||
size_t running) {
|
||||
int running) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(IsArmed());
|
||||
while (stopped_ < running) {
|
||||
@ -193,8 +148,16 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
|
||||
}
|
||||
}
|
||||
|
||||
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
|
||||
safepoint_->EnterSafepointScope(IsolateSafepoint::StopMainThread::kNo);
|
||||
}
|
||||
|
||||
SafepointScope::~SafepointScope() {
|
||||
safepoint_->LeaveSafepointScope(IsolateSafepoint::StopMainThread::kNo);
|
||||
}
|
||||
|
||||
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
|
||||
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
|
||||
base::MutexGuard guard(&local_heaps_mutex_);
|
||||
LocalHeap* current = local_heaps_head_;
|
||||
|
||||
while (current) {
|
||||
@ -206,7 +169,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
|
||||
}
|
||||
|
||||
bool IsolateSafepoint::ContainsAnyLocalHeap() {
|
||||
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
|
||||
base::MutexGuard guard(&local_heaps_mutex_);
|
||||
return local_heaps_head_ != nullptr;
|
||||
}
|
||||
|
||||
@ -218,12 +181,6 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) {
|
||||
}
|
||||
}
|
||||
|
||||
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
|
||||
safepoint_->EnterLocalSafepointScope();
|
||||
}
|
||||
|
||||
SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
|
||||
|
||||
GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
|
||||
: shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
|
||||
|
||||
@ -247,11 +204,7 @@ void GlobalSafepoint::AppendClient(Isolate* client) {
|
||||
|
||||
void GlobalSafepoint::RemoveClient(Isolate* client) {
|
||||
DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
|
||||
|
||||
// A shared heap may have already acquired the client mutex to perform a
|
||||
// shared GC. We need to park the Isolate here to allow for a shared GC.
|
||||
IgnoreLocalGCRequests ignore_gc_requests(client->heap());
|
||||
ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
|
||||
base::MutexGuard guard(&clients_mutex_);
|
||||
|
||||
if (client->global_safepoint_next_client_isolate_) {
|
||||
client->global_safepoint_next_client_isolate_
|
||||
@ -275,7 +228,6 @@ void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
|
||||
|
||||
void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
if (!clients_mutex_.TryLock()) {
|
||||
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
|
||||
ParkedScope parked_scope(initiator->main_thread_local_heap());
|
||||
clients_mutex_.Lock();
|
||||
}
|
||||
@ -287,7 +239,9 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
|
||||
IterateClientIsolates([this, initiator](Isolate* client) {
|
||||
Heap* client_heap = client->heap();
|
||||
client_heap->safepoint()->EnterGlobalSafepointScope(initiator);
|
||||
CHECK_EQ(initiator, client);
|
||||
client_heap->safepoint()->EnterSafepointScope(
|
||||
IsolateSafepoint::StopMainThread::kNo);
|
||||
|
||||
USE(this);
|
||||
DCHECK_EQ(client->shared_isolate(), shared_isolate_);
|
||||
@ -296,9 +250,10 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
|
||||
}
|
||||
|
||||
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
|
||||
IterateClientIsolates([initiator](Isolate* client) {
|
||||
IterateClientIsolates([](Isolate* client) {
|
||||
Heap* client_heap = client->heap();
|
||||
client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
|
||||
client_heap->safepoint()->LeaveSafepointScope(
|
||||
IsolateSafepoint::StopMainThread::kNo);
|
||||
});
|
||||
|
||||
clients_mutex_.Unlock();
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
#include "src/base/platform/condition-variable.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/handles/persistent-handles.h"
|
||||
#include "src/heap/local-heap.h"
|
||||
#include "src/objects/visitors.h"
|
||||
@ -19,12 +18,21 @@ class Heap;
|
||||
class LocalHeap;
|
||||
class RootVisitor;
|
||||
|
||||
// Used to bring all threads with heap access in an isolate to a safepoint such
|
||||
// that e.g. a garbage collection can be performed.
|
||||
// Used to bring all threads with heap access to a safepoint such that e.g. a
|
||||
// garbage collection can be performed.
|
||||
class IsolateSafepoint final {
|
||||
public:
|
||||
explicit IsolateSafepoint(Heap* heap);
|
||||
|
||||
// Wait until unpark operation is safe again
|
||||
void WaitInUnpark();
|
||||
|
||||
// Enter the safepoint from a running thread
|
||||
void WaitInSafepoint();
|
||||
|
||||
// Running thread reached a safepoint by parking itself.
|
||||
void NotifyPark();
|
||||
|
||||
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
|
||||
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
|
||||
|
||||
@ -50,7 +58,7 @@ class IsolateSafepoint final {
|
||||
base::ConditionVariable cv_stopped_;
|
||||
bool armed_;
|
||||
|
||||
size_t stopped_ = 0;
|
||||
int stopped_ = 0;
|
||||
|
||||
bool IsArmed() { return armed_; }
|
||||
|
||||
@ -59,42 +67,23 @@ class IsolateSafepoint final {
|
||||
|
||||
void Arm();
|
||||
void Disarm();
|
||||
void WaitUntilRunningThreadsInSafepoint(size_t running);
|
||||
void WaitUntilRunningThreadsInSafepoint(int running);
|
||||
|
||||
void WaitInSafepoint();
|
||||
void WaitInUnpark();
|
||||
void NotifyPark();
|
||||
};
|
||||
|
||||
enum class IncludeMainThread { kYes, kNo };
|
||||
enum class StopMainThread { kYes, kNo };
|
||||
|
||||
// Wait until unpark operation is safe again.
|
||||
void WaitInUnpark();
|
||||
|
||||
// Enter the safepoint from a running thread.
|
||||
void WaitInSafepoint();
|
||||
|
||||
// Running thread reached a safepoint by parking itself.
|
||||
void NotifyPark();
|
||||
|
||||
void EnterLocalSafepointScope();
|
||||
void EnterGlobalSafepointScope(Isolate* initiator);
|
||||
|
||||
void LeaveLocalSafepointScope();
|
||||
void LeaveGlobalSafepointScope(Isolate* initiator);
|
||||
|
||||
IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
|
||||
|
||||
void LockMutex(LocalHeap* local_heap);
|
||||
|
||||
size_t SetSafepointRequestedFlags(IncludeMainThread include_main_thread);
|
||||
void ClearSafepointRequestedFlags(IncludeMainThread include_main_thread);
|
||||
void EnterSafepointScope(StopMainThread stop_main_thread);
|
||||
void LeaveSafepointScope(StopMainThread stop_main_thread);
|
||||
|
||||
template <typename Callback>
|
||||
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
|
||||
// Safepoint holds this lock in order to stop threads from starting or
|
||||
// stopping.
|
||||
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
|
||||
base::MutexGuard guard(&local_heaps_mutex_);
|
||||
|
||||
// Additional code protected from safepoint
|
||||
callback();
|
||||
@ -108,7 +97,7 @@ class IsolateSafepoint final {
|
||||
|
||||
template <typename Callback>
|
||||
void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
|
||||
base::RecursiveMutexGuard guard(&local_heaps_mutex_);
|
||||
base::MutexGuard guard(&local_heaps_mutex_);
|
||||
|
||||
// Additional code protected from safepoint
|
||||
callback();
|
||||
@ -124,12 +113,10 @@ class IsolateSafepoint final {
|
||||
Barrier barrier_;
|
||||
Heap* heap_;
|
||||
|
||||
// Mutex is used both for safepointing and adding/removing threads. A
|
||||
// RecursiveMutex is needed since we need to support nested SafepointScopes.
|
||||
base::RecursiveMutex local_heaps_mutex_;
|
||||
base::Mutex local_heaps_mutex_;
|
||||
LocalHeap* local_heaps_head_;
|
||||
|
||||
std::atomic<int> active_safepoint_scopes_;
|
||||
int active_safepoint_scopes_;
|
||||
|
||||
friend class Heap;
|
||||
friend class GlobalSafepoint;
|
||||
|
@ -575,7 +575,6 @@
|
||||
F(MINOR_MC_MARKING_DEQUE) \
|
||||
F(MINOR_MC_RESET_LIVENESS) \
|
||||
F(MINOR_MC_SWEEPING) \
|
||||
F(SAFEPOINT) \
|
||||
F(SCAVENGER) \
|
||||
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
|
||||
F(SCAVENGER_FAST_PROMOTE) \
|
||||
@ -592,8 +591,7 @@
|
||||
F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \
|
||||
F(TIME_TO_GLOBAL_SAFEPOINT) \
|
||||
F(TIME_TO_SAFEPOINT) \
|
||||
F(UNMAPPER) \
|
||||
F(UNPARK)
|
||||
F(UNMAPPER)
|
||||
|
||||
#define TRACER_BACKGROUND_SCOPES(F) \
|
||||
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
|
||||
|
@ -164,24 +164,22 @@ UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
|
||||
Isolate::Delete(shared_isolate);
|
||||
}
|
||||
|
||||
void AllocateInSharedHeap(Isolate* shared_isolate, int iterations = 100) {
|
||||
void AllocateInSharedHeap(Isolate* shared_isolate) {
|
||||
SetupClientIsolateAndRunCallback(
|
||||
shared_isolate,
|
||||
[iterations](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
|
||||
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
|
||||
HandleScope scope(i_client_isolate);
|
||||
std::vector<Handle<FixedArray>> arrays;
|
||||
const int kKeptAliveArrays = 1000;
|
||||
|
||||
for (int i = 0; i < kNumIterations * iterations; i++) {
|
||||
HandleScope scope(i_client_isolate);
|
||||
for (int i = 0; i < kNumIterations * 100; i++) {
|
||||
HandleScope new_scope(i_client_isolate);
|
||||
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
|
||||
100, AllocationType::kSharedOld);
|
||||
if (i < kKeptAliveArrays) {
|
||||
// Keep some of those arrays alive across GCs.
|
||||
arrays.push_back(scope.CloseAndEscape(array));
|
||||
arrays.push_back(new_scope.CloseAndEscape(array));
|
||||
}
|
||||
i_client_isolate->factory()->NewFixedArray(100,
|
||||
AllocationType::kYoung);
|
||||
}
|
||||
|
||||
for (Handle<FixedArray> array : arrays) {
|
||||
@ -205,46 +203,5 @@ UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
|
||||
Isolate::Delete(shared_isolate);
|
||||
}
|
||||
|
||||
namespace {
|
||||
class SharedFixedArrayAllocationThread final : public v8::base::Thread {
|
||||
public:
|
||||
explicit SharedFixedArrayAllocationThread(Isolate* shared)
|
||||
: v8::base::Thread(
|
||||
base::Thread::Options("SharedFixedArrayAllocationThread")),
|
||||
shared_(shared) {}
|
||||
|
||||
void Run() override { AllocateInSharedHeap(shared_, 5); }
|
||||
|
||||
Isolate* shared_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
UNINITIALIZED_TEST(SharedCollectionWithMultipleClients) {
|
||||
FLAG_max_old_space_size = 8;
|
||||
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
|
||||
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
|
||||
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
|
||||
|
||||
v8::Isolate::CreateParams create_params;
|
||||
create_params.array_buffer_allocator = allocator.get();
|
||||
Isolate* shared_isolate = Isolate::NewShared(create_params);
|
||||
|
||||
std::vector<std::unique_ptr<SharedFixedArrayAllocationThread>> threads;
|
||||
const int kThreads = 4;
|
||||
|
||||
for (int i = 0; i < kThreads; i++) {
|
||||
auto thread =
|
||||
std::make_unique<SharedFixedArrayAllocationThread>(shared_isolate);
|
||||
CHECK(thread->Start());
|
||||
threads.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
for (auto& thread : threads) {
|
||||
thread->Join();
|
||||
}
|
||||
|
||||
Isolate::Delete(shared_isolate);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user