diff --git a/src/heap/memory-allocator.cc b/src/heap/memory-allocator.cc index 2c9daa3ec4..a3d4f0029e 100644 --- a/src/heap/memory-allocator.cc +++ b/src/heap/memory-allocator.cc @@ -154,68 +154,55 @@ void MemoryAllocator::TearDown() { data_page_allocator_ = nullptr; } -class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask { +class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask { public: - explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper) - : CancelableTask(isolate), - unmapper_(unmapper), - tracer_(isolate->heap()->tracer()) {} + explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper) + : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {} - private: - void RunInternal() override { + void Run(JobDelegate* delegate) override { TRACE_BACKGROUND_GC(tracer_, GCTracer::BackgroundScope::BACKGROUND_UNMAPPER); - unmapper_->PerformFreeMemoryOnQueuedChunks(); - unmapper_->active_unmapping_tasks_--; - unmapper_->pending_unmapping_tasks_semaphore_.Signal(); + unmapper_->PerformFreeMemoryOnQueuedChunks( + delegate); if (FLAG_trace_unmapper) { - PrintIsolate(unmapper_->heap_->isolate(), - "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id()); + PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n"); } } + size_t GetMaxConcurrency(size_t worker_count) const override { + const size_t kTaskPerChunk = 8; + return std::min( + kMaxUnmapperTasks, + worker_count + + (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) / + kTaskPerChunk); + } + + private: Unmapper* const unmapper_; GCTracer* const tracer_; - DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask); + DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryJob); }; void MemoryAllocator::Unmapper::FreeQueuedChunks() { if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) { - if (!MakeRoomForNewTasks()) { - // kMaxUnmapperTasks are already running. Avoid creating any more. + if (job_handle_ && job_handle_->IsValid()) { + job_handle_->NotifyConcurrencyIncrease(); + } else { + job_handle_ = V8::GetCurrentPlatform()->PostJob( + TaskPriority::kUserVisible, + std::make_unique(heap_->isolate(), this)); if (FLAG_trace_unmapper) { - PrintIsolate(heap_->isolate(), - "Unmapper::FreeQueuedChunks: reached task limit (%d)\n", - kMaxUnmapperTasks); + PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n"); } - return; } - auto task = std::make_unique(heap_->isolate(), this); - if (FLAG_trace_unmapper) { - PrintIsolate(heap_->isolate(), - "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n", - task->id()); - } - DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks); - DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_); - DCHECK_GE(active_unmapping_tasks_, 0); - active_unmapping_tasks_++; - task_ids_[pending_unmapping_tasks_++] = task->id(); - V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task)); } else { PerformFreeMemoryOnQueuedChunks(); } } void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() { - for (int i = 0; i < pending_unmapping_tasks_; i++) { - if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) != - TryAbortResult::kTaskAborted) { - pending_unmapping_tasks_semaphore_.Wait(); - } - } - pending_unmapping_tasks_ = 0; - active_unmapping_tasks_ = 0; + if (job_handle_ && job_handle_->IsValid()) job_handle_->Join(); if (FLAG_trace_unmapper) { PrintIsolate( @@ -234,26 +221,18 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() { PerformFreeMemoryOnQueuedChunks(); } -bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() { - DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks); - - if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) { - // All previous unmapping tasks have been run to completion. - // Finalize those tasks to make room for new ones. - CancelAndWaitForPendingTasks(); - } - return pending_unmapping_tasks_ != kMaxUnmapperTasks; -} - -void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() { +void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks( + JobDelegate* delegate) { MemoryChunk* chunk = nullptr; while ((chunk = GetMemoryChunkSafe()) != nullptr) { allocator_->PerformFreeMemory(chunk); + if (delegate && delegate->ShouldYield()) return; } } template -void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { +void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks( + JobDelegate* delegate) { MemoryChunk* chunk = nullptr; if (FLAG_trace_unmapper) { PrintIsolate( @@ -266,6 +245,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); allocator_->PerformFreeMemory(chunk); if (pooled) AddMemoryChunkSafe(chunk); + if (delegate && delegate->ShouldYield()) return; } if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) { // The previous loop uncommitted any pages marked as pooled and added them @@ -273,13 +253,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { // though. while ((chunk = GetMemoryChunkSafe()) != nullptr) { allocator_->Free(chunk); + if (delegate && delegate->ShouldYield()) return; } } PerformFreeMemoryOnQueuedNonRegularChunks(); } void MemoryAllocator::Unmapper::TearDown() { - CHECK_EQ(0, pending_unmapping_tasks_); + CHECK(!job_handle_ || !job_handle_->IsValid()); PerformFreeMemoryOnQueuedChunks(); for (int i = 0; i < kNumberOfChunkQueues; i++) { DCHECK(chunks_[i].empty()); diff --git a/src/heap/memory-allocator.h b/src/heap/memory-allocator.h index 7f95c49629..179877e753 100644 --- a/src/heap/memory-allocator.h +++ b/src/heap/memory-allocator.h @@ -61,14 +61,10 @@ class MemoryAllocator { // chunks. class Unmapper { public: - class UnmapFreeMemoryTask; + class UnmapFreeMemoryJob; Unmapper(Heap* heap, MemoryAllocator* allocator) - : heap_(heap), - allocator_(allocator), - pending_unmapping_tasks_semaphore_(0), - pending_unmapping_tasks_(0), - active_unmapping_tasks_(0) { + : heap_(heap), allocator_(allocator) { chunks_[kRegular].reserve(kReservedQueueingSlots); chunks_[kPooled].reserve(kReservedQueueingSlots); } @@ -142,18 +138,16 @@ class MemoryAllocator { bool MakeRoomForNewTasks(); template - void PerformFreeMemoryOnQueuedChunks(); + void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr); - void PerformFreeMemoryOnQueuedNonRegularChunks(); + void PerformFreeMemoryOnQueuedNonRegularChunks( + JobDelegate* delegate = nullptr); Heap* const heap_; MemoryAllocator* const allocator_; base::Mutex mutex_; std::vector chunks_[kNumberOfChunkQueues]; - CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks]; - base::Semaphore pending_unmapping_tasks_semaphore_; - intptr_t pending_unmapping_tasks_; - std::atomic active_unmapping_tasks_; + std::unique_ptr job_handle_; friend class MemoryAllocator; };