[heap] Perform GC in a shared heap
This CL implements GC in a shared heap. A shared GC is started from an attached client isolate that fails to allocate a shared object. In order to perform a shared GC all other running client isolates need to be stopped and their roots need to be scanned. Bug: v8:11708 Change-Id: I45ac50e6b4a1e9270f9e39b69f9b8ee5e6e14134 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2964816 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Mythri Alle <mythria@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/master@{#75606}
This commit is contained in:
parent
7ac3b55a20
commit
9663bb31d8
@ -841,6 +841,11 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline constexpr bool IsSharedAllocationType(AllocationType kind) {
|
||||||
|
return kind == AllocationType::kSharedOld ||
|
||||||
|
kind == AllocationType::kSharedMap;
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
|
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
|
||||||
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
|
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
|
||||||
|
|
||||||
|
@ -1802,6 +1802,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
|||||||
|
|
||||||
bool HasClientIsolates() const { return client_isolate_head_; }
|
bool HasClientIsolates() const { return client_isolate_head_; }
|
||||||
|
|
||||||
|
template <typename Callback>
|
||||||
|
void IterateClientIsolates(Callback callback) {
|
||||||
|
for (Isolate* current = client_isolate_head_; current;
|
||||||
|
current = current->next_client_isolate_) {
|
||||||
|
callback(current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
base::Mutex* client_isolate_mutex() { return &client_isolate_mutex_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
|
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
|
||||||
bool is_shared);
|
bool is_shared);
|
||||||
|
@ -2172,9 +2172,6 @@ size_t Heap::PerformGarbageCollection(
|
|||||||
|
|
||||||
SafepointScope safepoint_scope(this);
|
SafepointScope safepoint_scope(this);
|
||||||
|
|
||||||
// Shared isolates cannot have any clients when running GC at the moment.
|
|
||||||
DCHECK_IMPLIES(IsShared(), !isolate()->HasClientIsolates());
|
|
||||||
|
|
||||||
collection_barrier_->StopTimeToCollectionTimer();
|
collection_barrier_->StopTimeToCollectionTimer();
|
||||||
|
|
||||||
#ifdef VERIFY_HEAP
|
#ifdef VERIFY_HEAP
|
||||||
@ -2262,6 +2259,50 @@ size_t Heap::PerformGarbageCollection(
|
|||||||
return freed_global_handles;
|
return freed_global_handles;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
|
||||||
|
DCHECK(!IsShared());
|
||||||
|
DCHECK_NOT_NULL(isolate()->shared_isolate());
|
||||||
|
|
||||||
|
isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
|
||||||
|
isolate(), gc_reason);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
|
||||||
|
GarbageCollectionReason gc_reason) {
|
||||||
|
DCHECK(IsShared());
|
||||||
|
base::MutexGuard guard(isolate()->client_isolate_mutex());
|
||||||
|
|
||||||
|
const char* collector_reason = nullptr;
|
||||||
|
GarbageCollector collector = MARK_COMPACTOR;
|
||||||
|
|
||||||
|
tracer()->Start(collector, gc_reason, collector_reason);
|
||||||
|
|
||||||
|
isolate()->IterateClientIsolates([initiator](Isolate* client) {
|
||||||
|
DCHECK_NOT_NULL(client->shared_isolate());
|
||||||
|
Heap* client_heap = client->heap();
|
||||||
|
|
||||||
|
GlobalSafepoint::StopMainThread stop_main_thread =
|
||||||
|
initiator == client ? GlobalSafepoint::StopMainThread::kNo
|
||||||
|
: GlobalSafepoint::StopMainThread::kYes;
|
||||||
|
|
||||||
|
client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
|
||||||
|
|
||||||
|
client_heap->shared_old_allocator_->FreeLinearAllocationArea();
|
||||||
|
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
|
||||||
|
});
|
||||||
|
|
||||||
|
PerformGarbageCollection(MARK_COMPACTOR);
|
||||||
|
|
||||||
|
isolate()->IterateClientIsolates([initiator](Isolate* client) {
|
||||||
|
GlobalSafepoint::StopMainThread stop_main_thread =
|
||||||
|
initiator == client ? GlobalSafepoint::StopMainThread::kNo
|
||||||
|
: GlobalSafepoint::StopMainThread::kYes;
|
||||||
|
client->heap()->safepoint()->LeaveSafepointScope(stop_main_thread);
|
||||||
|
});
|
||||||
|
|
||||||
|
tracer()->Stop(collector);
|
||||||
|
}
|
||||||
|
|
||||||
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
|
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
|
||||||
GCTracer::Scope::ScopeId scope_id;
|
GCTracer::Scope::ScopeId scope_id;
|
||||||
|
|
||||||
@ -4322,10 +4363,7 @@ void Heap::Verify() {
|
|||||||
SafepointScope safepoint_scope(this);
|
SafepointScope safepoint_scope(this);
|
||||||
HandleScope scope(isolate());
|
HandleScope scope(isolate());
|
||||||
|
|
||||||
MakeLocalHeapLabsIterable();
|
MakeHeapIterable();
|
||||||
|
|
||||||
// We have to wait here for the sweeper threads to have an iterable heap.
|
|
||||||
mark_compact_collector()->EnsureSweepingCompleted();
|
|
||||||
|
|
||||||
array_buffer_sweeper()->EnsureFinished();
|
array_buffer_sweeper()->EnsureFinished();
|
||||||
|
|
||||||
@ -4792,6 +4830,15 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Heap::IterateRootsIncludingClients(RootVisitor* v,
|
||||||
|
base::EnumSet<SkipRoot> options) {
|
||||||
|
IterateRoots(v, options);
|
||||||
|
|
||||||
|
isolate()->IterateClientIsolates([v, options](Isolate* client) {
|
||||||
|
client->heap()->IterateRoots(v, options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
|
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
|
||||||
isolate_->global_handles()->IterateWeakRoots(v);
|
isolate_->global_handles()->IterateWeakRoots(v);
|
||||||
}
|
}
|
||||||
@ -5340,8 +5387,12 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath(
|
|||||||
}
|
}
|
||||||
// Two GCs before panicking. In newspace will almost always succeed.
|
// Two GCs before panicking. In newspace will almost always succeed.
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
CollectGarbage(alloc.RetrySpace(),
|
if (IsSharedAllocationType(allocation)) {
|
||||||
GarbageCollectionReason::kAllocationFailure);
|
CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
|
||||||
|
} else {
|
||||||
|
CollectGarbage(alloc.RetrySpace(),
|
||||||
|
GarbageCollectionReason::kAllocationFailure);
|
||||||
|
}
|
||||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||||
if (alloc.To(&result)) {
|
if (alloc.To(&result)) {
|
||||||
DCHECK(result != ReadOnlyRoots(this).exception());
|
DCHECK(result != ReadOnlyRoots(this).exception());
|
||||||
@ -5360,7 +5411,12 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
|
|||||||
if (!result.is_null()) return result;
|
if (!result.is_null()) return result;
|
||||||
|
|
||||||
isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
isolate()->counters()->gc_last_resort_from_handles()->Increment();
|
||||||
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
|
if (IsSharedAllocationType(allocation)) {
|
||||||
|
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
|
||||||
|
} else {
|
||||||
|
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
AlwaysAllocateScope scope(this);
|
AlwaysAllocateScope scope(this);
|
||||||
alloc = AllocateRaw(size, allocation, origin, alignment);
|
alloc = AllocateRaw(size, allocation, origin, alignment);
|
||||||
|
@ -1026,6 +1026,10 @@ class Heap {
|
|||||||
int flags, GarbageCollectionReason gc_reason,
|
int flags, GarbageCollectionReason gc_reason,
|
||||||
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
||||||
|
|
||||||
|
// Performs garbage collection operation for the shared heap.
|
||||||
|
V8_EXPORT_PRIVATE void CollectSharedGarbage(
|
||||||
|
GarbageCollectionReason gc_reason);
|
||||||
|
|
||||||
// Reports and external memory pressure event, either performs a major GC or
|
// Reports and external memory pressure event, either performs a major GC or
|
||||||
// completes incremental marking in order to free external resources.
|
// completes incremental marking in order to free external resources.
|
||||||
void ReportExternalMemoryPressure();
|
void ReportExternalMemoryPressure();
|
||||||
@ -1060,6 +1064,9 @@ class Heap {
|
|||||||
|
|
||||||
// Iterates over the strong roots and the weak roots.
|
// Iterates over the strong roots and the weak roots.
|
||||||
void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
|
void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
|
||||||
|
void IterateRootsIncludingClients(RootVisitor* v,
|
||||||
|
base::EnumSet<SkipRoot> options);
|
||||||
|
|
||||||
// Iterates over entries in the smi roots list. Only interesting to the
|
// Iterates over entries in the smi roots list. Only interesting to the
|
||||||
// serializer/deserializer, since GC does not care about smis.
|
// serializer/deserializer, since GC does not care about smis.
|
||||||
void IterateSmiRoots(RootVisitor* v);
|
void IterateSmiRoots(RootVisitor* v);
|
||||||
@ -1799,6 +1806,10 @@ class Heap {
|
|||||||
GarbageCollector collector,
|
GarbageCollector collector,
|
||||||
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
|
||||||
|
|
||||||
|
// Performs garbage collection in the shared heap.
|
||||||
|
void PerformSharedGarbageCollection(Isolate* initiator,
|
||||||
|
GarbageCollectionReason gc_reason);
|
||||||
|
|
||||||
inline void UpdateOldSpaceLimits();
|
inline void UpdateOldSpaceLimits();
|
||||||
|
|
||||||
bool CreateInitialMaps();
|
bool CreateInitialMaps();
|
||||||
|
@ -121,7 +121,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void MarkingVerifier::VerifyRoots() {
|
void MarkingVerifier::VerifyRoots() {
|
||||||
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
heap_->IterateRootsIncludingClients(this,
|
||||||
|
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
|
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
|
||||||
@ -245,9 +246,10 @@ class FullMarkingVerifier : public MarkingVerifier {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
|
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
|
||||||
if (!heap_->IsShared() &&
|
if (heap_->IsShared() !=
|
||||||
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
|
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
CHECK(marking_state_->IsBlackOrGrey(heap_object));
|
CHECK(marking_state_->IsBlackOrGrey(heap_object));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,7 +307,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void EvacuationVerifier::VerifyRoots() {
|
void EvacuationVerifier::VerifyRoots() {
|
||||||
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
heap_->IterateRootsIncludingClients(this,
|
||||||
|
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
||||||
}
|
}
|
||||||
|
|
||||||
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
|
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
|
||||||
@ -356,6 +359,10 @@ class FullEvacuationVerifier : public EvacuationVerifier {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
|
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
|
||||||
|
if (heap_->IsShared() !=
|
||||||
|
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
|
||||||
|
return;
|
||||||
|
|
||||||
CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
|
CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
|
||||||
Heap::InToPage(heap_object));
|
Heap::InToPage(heap_object));
|
||||||
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
|
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
|
||||||
@ -1001,7 +1008,7 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
|
|||||||
HeapObject heap_object = HeapObject::cast(object);
|
HeapObject heap_object = HeapObject::cast(object);
|
||||||
BasicMemoryChunk* target_page =
|
BasicMemoryChunk* target_page =
|
||||||
BasicMemoryChunk::FromHeapObject(heap_object);
|
BasicMemoryChunk::FromHeapObject(heap_object);
|
||||||
if (!is_shared_heap_ && target_page->InSharedHeap()) return;
|
if (is_shared_heap_ != target_page->InSharedHeap()) return;
|
||||||
collector_->MarkRootObject(root, heap_object);
|
collector_->MarkRootObject(root, heap_object);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1629,10 +1636,16 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
|
|||||||
ObjectVisitor* custom_root_body_visitor) {
|
ObjectVisitor* custom_root_body_visitor) {
|
||||||
// Mark the heap roots including global variables, stack variables,
|
// Mark the heap roots including global variables, stack variables,
|
||||||
// etc., and all objects reachable from them.
|
// etc., and all objects reachable from them.
|
||||||
heap()->IterateRoots(root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
heap()->IterateRootsIncludingClients(
|
||||||
|
root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
|
||||||
|
|
||||||
// Custom marking for top optimized frame.
|
// Custom marking for top optimized frame.
|
||||||
ProcessTopOptimizedFrame(custom_root_body_visitor);
|
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
|
||||||
|
|
||||||
|
isolate()->IterateClientIsolates(
|
||||||
|
[this, custom_root_body_visitor](Isolate* client) {
|
||||||
|
ProcessTopOptimizedFrame(custom_root_body_visitor, client);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::VisitObject(HeapObject obj) {
|
void MarkCompactCollector::VisitObject(HeapObject obj) {
|
||||||
@ -1921,13 +1934,14 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
|
|||||||
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
|
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
|
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
|
||||||
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
|
Isolate* isolate) {
|
||||||
!it.done(); it.Advance()) {
|
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
|
||||||
|
it.Advance()) {
|
||||||
if (it.frame()->is_unoptimized()) return;
|
if (it.frame()->is_unoptimized()) return;
|
||||||
if (it.frame()->type() == StackFrame::OPTIMIZED) {
|
if (it.frame()->type() == StackFrame::OPTIMIZED) {
|
||||||
Code code = it.frame()->LookupCode();
|
Code code = it.frame()->LookupCode();
|
||||||
if (!code.CanDeoptAt(isolate(), it.frame()->pc())) {
|
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
|
||||||
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
|
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
@ -3984,8 +3998,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
|
|||||||
TRACE_GC(heap()->tracer(),
|
TRACE_GC(heap()->tracer(),
|
||||||
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
|
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
|
||||||
// The external string table is updated at the end.
|
// The external string table is updated at the end.
|
||||||
heap_->IterateRoots(&updating_visitor, base::EnumSet<SkipRoot>{
|
heap_->IterateRootsIncludingClients(
|
||||||
SkipRoot::kExternalStringTable});
|
&updating_visitor,
|
||||||
|
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -626,7 +626,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
|
|||||||
// If the call-site of the top optimized code was not prepared for
|
// If the call-site of the top optimized code was not prepared for
|
||||||
// deoptimization, then treat embedded pointers in the code as strong as
|
// deoptimization, then treat embedded pointers in the code as strong as
|
||||||
// otherwise they can die and try to deoptimize the underlying code.
|
// otherwise they can die and try to deoptimize the underlying code.
|
||||||
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
|
void ProcessTopOptimizedFrame(ObjectVisitor* visitor, Isolate* isolate);
|
||||||
|
|
||||||
// Drains the main thread marking work list. Will mark all pending objects
|
// Drains the main thread marking work list. Will mark all pending objects
|
||||||
// if no concurrent threads are running.
|
// if no concurrent threads are running.
|
||||||
|
@ -21,7 +21,7 @@ namespace internal {
|
|||||||
GlobalSafepoint::GlobalSafepoint(Heap* heap)
|
GlobalSafepoint::GlobalSafepoint(Heap* heap)
|
||||||
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
|
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
|
||||||
|
|
||||||
void GlobalSafepoint::EnterSafepointScope() {
|
void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
|
||||||
if (++active_safepoint_scopes_ > 1) return;
|
if (++active_safepoint_scopes_ > 1) return;
|
||||||
|
|
||||||
TimedHistogramScope timer(
|
TimedHistogramScope timer(
|
||||||
@ -37,10 +37,10 @@ void GlobalSafepoint::EnterSafepointScope() {
|
|||||||
|
|
||||||
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
||||||
local_heap = local_heap->next_) {
|
local_heap = local_heap->next_) {
|
||||||
if (local_heap->is_main_thread()) {
|
if (local_heap->is_main_thread() &&
|
||||||
|
stop_main_thread == StopMainThread::kNo) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
DCHECK(!local_heap->is_main_thread());
|
|
||||||
|
|
||||||
LocalHeap::ThreadState expected = local_heap->state_relaxed();
|
LocalHeap::ThreadState expected = local_heap->state_relaxed();
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ void GlobalSafepoint::EnterSafepointScope() {
|
|||||||
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
barrier_.WaitUntilRunningThreadsInSafepoint(running);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GlobalSafepoint::LeaveSafepointScope() {
|
void GlobalSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
|
||||||
DCHECK_GT(active_safepoint_scopes_, 0);
|
DCHECK_GT(active_safepoint_scopes_, 0);
|
||||||
if (--active_safepoint_scopes_ > 0) return;
|
if (--active_safepoint_scopes_ > 0) return;
|
||||||
|
|
||||||
@ -72,7 +72,8 @@ void GlobalSafepoint::LeaveSafepointScope() {
|
|||||||
|
|
||||||
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
|
||||||
local_heap = local_heap->next_) {
|
local_heap = local_heap->next_) {
|
||||||
if (local_heap->is_main_thread()) {
|
if (local_heap->is_main_thread() &&
|
||||||
|
stop_main_thread == StopMainThread::kNo) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,10 +152,12 @@ void GlobalSafepoint::Barrier::WaitInUnpark() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
|
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
|
||||||
safepoint_->EnterSafepointScope();
|
safepoint_->EnterSafepointScope(GlobalSafepoint::StopMainThread::kNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
|
SafepointScope::~SafepointScope() {
|
||||||
|
safepoint_->LeaveSafepointScope(GlobalSafepoint::StopMainThread::kNo);
|
||||||
|
}
|
||||||
|
|
||||||
bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
|
bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
|
||||||
base::MutexGuard guard(&local_heaps_mutex_);
|
base::MutexGuard guard(&local_heaps_mutex_);
|
||||||
|
@ -18,8 +18,8 @@ class Heap;
|
|||||||
class LocalHeap;
|
class LocalHeap;
|
||||||
class RootVisitor;
|
class RootVisitor;
|
||||||
|
|
||||||
// Used to bring all background threads with heap access to a safepoint such
|
// Used to bring all threads with heap access to a safepoint such that e.g. a
|
||||||
// that e.g. a garbage collection can be performed.
|
// garbage collection can be performed.
|
||||||
class GlobalSafepoint {
|
class GlobalSafepoint {
|
||||||
public:
|
public:
|
||||||
explicit GlobalSafepoint(Heap* heap);
|
explicit GlobalSafepoint(Heap* heap);
|
||||||
@ -74,8 +74,10 @@ class GlobalSafepoint {
|
|||||||
void NotifyPark();
|
void NotifyPark();
|
||||||
};
|
};
|
||||||
|
|
||||||
void EnterSafepointScope();
|
enum class StopMainThread { kYes, kNo };
|
||||||
void LeaveSafepointScope();
|
|
||||||
|
void EnterSafepointScope(StopMainThread stop_main_thread);
|
||||||
|
void LeaveSafepointScope(StopMainThread stop_main_thread);
|
||||||
|
|
||||||
template <typename Callback>
|
template <typename Callback>
|
||||||
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
|
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
|
||||||
@ -116,9 +118,10 @@ class GlobalSafepoint {
|
|||||||
|
|
||||||
int active_safepoint_scopes_;
|
int active_safepoint_scopes_;
|
||||||
|
|
||||||
friend class SafepointScope;
|
friend class Heap;
|
||||||
friend class LocalHeap;
|
friend class LocalHeap;
|
||||||
friend class PersistentHandles;
|
friend class PersistentHandles;
|
||||||
|
friend class SafepointScope;
|
||||||
};
|
};
|
||||||
|
|
||||||
class V8_NODISCARD SafepointScope {
|
class V8_NODISCARD SafepointScope {
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "src/common/globals.h"
|
#include "src/common/globals.h"
|
||||||
#include "src/handles/handles-inl.h"
|
#include "src/handles/handles-inl.h"
|
||||||
#include "src/heap/heap.h"
|
#include "src/heap/heap.h"
|
||||||
|
#include "src/objects/fixed-array-inl.h"
|
||||||
|
#include "src/objects/fixed-array.h"
|
||||||
#include "src/objects/heap-object.h"
|
#include "src/objects/heap-object.h"
|
||||||
#include "test/cctest/cctest.h"
|
#include "test/cctest/cctest.h"
|
||||||
|
|
||||||
@ -140,7 +142,7 @@ UNINITIALIZED_TEST(ConcurrentAllocationInSharedMapSpace) {
|
|||||||
Isolate::Delete(shared_isolate);
|
Isolate::Delete(shared_isolate);
|
||||||
}
|
}
|
||||||
|
|
||||||
UNINITIALIZED_TEST(SharedCollection) {
|
UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
|
||||||
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
|
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
|
||||||
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
|
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
|
||||||
|
|
||||||
@ -155,5 +157,43 @@ UNINITIALIZED_TEST(SharedCollection) {
|
|||||||
Isolate::Delete(shared_isolate);
|
Isolate::Delete(shared_isolate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AllocateInSharedSpace(Isolate* shared_isolate) {
|
||||||
|
SetupClientIsolateAndRunCallback(
|
||||||
|
shared_isolate,
|
||||||
|
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
|
||||||
|
HandleScope scope(i_client_isolate);
|
||||||
|
std::vector<Handle<FixedArray>> arrays;
|
||||||
|
const int kKeptAliveArrays = 1000;
|
||||||
|
|
||||||
|
for (int i = 0; i < kNumIterations * 100; i++) {
|
||||||
|
HandleScope scope(i_client_isolate);
|
||||||
|
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
|
||||||
|
100, AllocationType::kSharedOld);
|
||||||
|
if (i < kKeptAliveArrays) {
|
||||||
|
// Keep some of those arrays alive across GCs.
|
||||||
|
arrays.push_back(scope.CloseAndEscape(array));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Handle<FixedArray> array : arrays) {
|
||||||
|
CHECK_EQ(array->length(), 100);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
|
||||||
|
FLAG_max_old_space_size = 8;
|
||||||
|
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
|
||||||
|
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
|
||||||
|
|
||||||
|
v8::Isolate::CreateParams create_params;
|
||||||
|
create_params.array_buffer_allocator = allocator.get();
|
||||||
|
Isolate* shared_isolate = Isolate::NewShared(create_params);
|
||||||
|
|
||||||
|
AllocateInSharedSpace(shared_isolate);
|
||||||
|
|
||||||
|
Isolate::Delete(shared_isolate);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace v8
|
} // namespace v8
|
||||||
|
Loading…
Reference in New Issue
Block a user