diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 7bb816e8f2..0a86eeb1d4 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -436,13 +436,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { GCTracer::BackgroundScope::MC_BACKGROUND_MARKING); size_t kBytesUntilInterruptCheck = 64 * KB; int kObjectsUntilInterrupCheck = 1000; - LiveBytesMap* live_bytes = nullptr; - { - base::LockGuard guard(&task_state->lock); - live_bytes = &task_state->live_bytes; - } - ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_, - task_id); + ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes, + weak_objects_, task_id); double time_ms; size_t marked_bytes = 0; if (FLAG_trace_concurrent_marking) { @@ -451,48 +446,42 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) { } { TimedScope scope(&time_ms); - { - base::LockGuard guard(&task_state->lock); - bool done = false; - while (!done) { - size_t current_marked_bytes = 0; - int objects_processed = 0; - while (current_marked_bytes < kBytesUntilInterruptCheck && - objects_processed < kObjectsUntilInterrupCheck) { - HeapObject* object; - if (!shared_->Pop(task_id, &object)) { - done = true; - break; - } - objects_processed++; - Address new_space_top = heap_->new_space()->original_top(); - Address new_space_limit = heap_->new_space()->original_limit(); - Address addr = object->address(); - if (new_space_top <= addr && addr < new_space_limit) { - on_hold_->Push(task_id, object); - } else { - Map* map = object->synchronized_map(); - current_marked_bytes += visitor.Visit(map, object); - } - } - marked_bytes += current_marked_bytes; - base::AsAtomicWord::Relaxed_Store(&task_state->marked_bytes, - marked_bytes); - if (task_state->interrupt_request.Value()) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), - "ConcurrentMarking::Run Paused"); - task_state->interrupt_condition.Wait(&task_state->lock); - } else if (task_state->preemption_request.Value()) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), - "ConcurrentMarking::Run Preempted"); + + bool done = false; + while (!done) { + size_t current_marked_bytes = 0; + int objects_processed = 0; + while (current_marked_bytes < kBytesUntilInterruptCheck && + objects_processed < kObjectsUntilInterrupCheck) { + HeapObject* object; + if (!shared_->Pop(task_id, &object)) { + done = true; break; } + objects_processed++; + Address new_space_top = heap_->new_space()->original_top(); + Address new_space_limit = heap_->new_space()->original_limit(); + Address addr = object->address(); + if (new_space_top <= addr && addr < new_space_limit) { + on_hold_->Push(task_id, object); + } else { + Map* map = object->synchronized_map(); + current_marked_bytes += visitor.Visit(map, object); + } + } + marked_bytes += current_marked_bytes; + base::AsAtomicWord::Relaxed_Store(&task_state->marked_bytes, + marked_bytes); + if (task_state->preemption_request.Value()) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), + "ConcurrentMarking::Run Preempted"); + break; } - // The lock is also required to synchronize with worklist update after - // young generation GC. - bailout_->FlushToGlobal(task_id); - on_hold_->FlushToGlobal(task_id); } + shared_->FlushToGlobal(task_id); + bailout_->FlushToGlobal(task_id); + on_hold_->FlushToGlobal(task_id); + weak_objects_->weak_cells.FlushToGlobal(task_id); weak_objects_->transition_arrays.FlushToGlobal(task_id); base::AsAtomicWord::Relaxed_Store(&task_state->marked_bytes, 0); @@ -534,7 +523,6 @@ void ConcurrentMarking::ScheduleTasks() { "Scheduling concurrent marking task %d\n", i); } task_state_[i].preemption_request.SetValue(false); - task_state_[i].interrupt_request.SetValue(false); is_pending_[i] = true; ++pending_task_count_; Task* task = new Task(heap_->isolate(), this, &task_state_[i], i); @@ -557,10 +545,12 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() { } } -void ConcurrentMarking::Stop(StopRequest stop_request) { - if (!FLAG_concurrent_marking) return; +bool ConcurrentMarking::Stop(StopRequest stop_request) { + if (!FLAG_concurrent_marking) return false; base::LockGuard guard(&pending_lock_); + if (pending_task_count_ == 0) return false; + if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) { CancelableTaskManager* task_manager = heap_->isolate()->cancelable_task_manager(); @@ -582,6 +572,7 @@ void ConcurrentMarking::Stop(StopRequest stop_request) { for (int i = 1; i <= task_count_; i++) { DCHECK(!is_pending_[i]); } + return true; } void ConcurrentMarking::FlushLiveBytes( @@ -621,25 +612,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() { } ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking) - : concurrent_marking_(concurrent_marking) { - if (!FLAG_concurrent_marking) return; - // Request task_state for all tasks. - for (int i = 1; i <= kMaxTasks; i++) { - concurrent_marking_->task_state_[i].interrupt_request.SetValue(true); - } - // Now take a lock to ensure that the tasks are waiting. - for (int i = 1; i <= kMaxTasks; i++) { - concurrent_marking_->task_state_[i].lock.Lock(); - } + : concurrent_marking_(concurrent_marking), + resume_on_exit_(concurrent_marking_->Stop( + ConcurrentMarking::StopRequest::PREEMPT_TASKS)) { + DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking); } ConcurrentMarking::PauseScope::~PauseScope() { - if (!FLAG_concurrent_marking) return; - for (int i = kMaxTasks; i >= 1; i--) { - concurrent_marking_->task_state_[i].interrupt_request.SetValue(false); - concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll(); - concurrent_marking_->task_state_[i].lock.Unlock(); - } + if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded(); } } // namespace internal diff --git a/src/heap/concurrent-marking.h b/src/heap/concurrent-marking.h index 22f1633d68..16e9c7ff9d 100644 --- a/src/heap/concurrent-marking.h +++ b/src/heap/concurrent-marking.h @@ -30,14 +30,16 @@ using LiveBytesMap = class ConcurrentMarking { public: // When the scope is entered, the concurrent marking tasks - // are paused and are not looking at the heap objects. + // are preempted and are not looking at the heap objects, concurrent marking + // is resumed when the scope is exited. class PauseScope { public: explicit PauseScope(ConcurrentMarking* concurrent_marking); ~PauseScope(); private: - ConcurrentMarking* concurrent_marking_; + ConcurrentMarking* const concurrent_marking_; + const bool resume_on_exit_; }; enum class StopRequest { @@ -62,7 +64,10 @@ class ConcurrentMarking { // heap should not be moved while these are active (can be stopped safely via // Stop() or PauseScope). void ScheduleTasks(); - void Stop(StopRequest stop_request); + + // Stops concurrent marking per |stop_request|'s semantics. Returns true + // if concurrent marking was in progress, false otherwise. + bool Stop(StopRequest stop_request); void RescheduleTasksIfNeeded(); // Flushes the local live bytes into the given marking state. @@ -81,16 +86,6 @@ class ConcurrentMarking { // marker to give up the worker thread. base::AtomicValue preemption_request; - // When the concurrent marking task has this lock, then objects in the - // heap are guaranteed to not move. - base::Mutex lock; - // The main thread sets this flag to true, when it wants the concurrent - // maker to give up the lock. - base::AtomicValue interrupt_request; - // The concurrent marker waits on this condition until the request - // flag is cleared by the main thread. - base::ConditionVariable interrupt_condition; - LiveBytesMap live_bytes; size_t marked_bytes = 0; char cache_line_padding[64];