[Heap]: Convert Sweep to Job
max concurrency is inferred from queue size for OLD_SPACE & MAP_SPACE. Extra Sweeper::TearDown() in MarkCompactCollector::TearDown() is needed to cancel job. Change-Id: Iafba7d7d24e8f6e5c5a1d5c0348dea731f0ac224 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2480783 Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Cr-Commit-Position: refs/heads/master@{#70767}
This commit is contained in:
parent
db80ab3e1b
commit
795c0b1c7b
@ -434,6 +434,7 @@ void MarkCompactCollector::TearDown() {
|
||||
// Marking barriers of LocalHeaps will be published in their destructors.
|
||||
marking_worklists()->Clear();
|
||||
}
|
||||
sweeper()->TearDown();
|
||||
}
|
||||
|
||||
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
|
||||
|
@ -19,12 +19,8 @@ namespace internal {
|
||||
Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
|
||||
: heap_(heap),
|
||||
marking_state_(marking_state),
|
||||
num_tasks_(0),
|
||||
pending_sweeper_tasks_semaphore_(0),
|
||||
incremental_sweeper_pending_(false),
|
||||
sweeping_in_progress_(false),
|
||||
num_sweeping_tasks_(0),
|
||||
stop_sweeper_tasks_(false),
|
||||
iterability_task_semaphore_(0),
|
||||
iterability_in_progress_(false),
|
||||
iterability_task_started_(false),
|
||||
@ -32,10 +28,10 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
|
||||
|
||||
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
|
||||
: sweeper_(sweeper) {
|
||||
sweeper_->stop_sweeper_tasks_ = true;
|
||||
if (!sweeper_->sweeping_in_progress()) return;
|
||||
|
||||
sweeper_->AbortAndWaitForTasks();
|
||||
if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
|
||||
sweeper_->job_handle_->Cancel();
|
||||
|
||||
// Complete sweeping if there's nothing more to do.
|
||||
if (sweeper_->IsDoneSweeping()) {
|
||||
@ -49,7 +45,6 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
|
||||
}
|
||||
|
||||
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
|
||||
sweeper_->stop_sweeper_tasks_ = false;
|
||||
if (!sweeper_->sweeping_in_progress()) return;
|
||||
|
||||
sweeper_->StartSweeperTasks();
|
||||
@ -78,27 +73,17 @@ Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
|
||||
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
|
||||
}
|
||||
|
||||
class Sweeper::SweeperTask final : public CancelableTask {
|
||||
class Sweeper::SweeperJob final : public JobTask {
|
||||
public:
|
||||
SweeperTask(Isolate* isolate, Sweeper* sweeper,
|
||||
base::Semaphore* pending_sweeper_tasks,
|
||||
std::atomic<intptr_t>* num_sweeping_tasks,
|
||||
AllocationSpace space_to_start)
|
||||
: CancelableTask(isolate),
|
||||
sweeper_(sweeper),
|
||||
pending_sweeper_tasks_(pending_sweeper_tasks),
|
||||
num_sweeping_tasks_(num_sweeping_tasks),
|
||||
space_to_start_(space_to_start),
|
||||
tracer_(isolate->heap()->tracer()) {}
|
||||
SweeperJob(Isolate* isolate, Sweeper* sweeper)
|
||||
: sweeper_(sweeper), tracer_(isolate->heap()->tracer()) {}
|
||||
|
||||
~SweeperTask() override = default;
|
||||
~SweeperJob() override = default;
|
||||
|
||||
private:
|
||||
void RunInternal() final {
|
||||
void Run(JobDelegate* delegate) final {
|
||||
TRACE_BACKGROUND_GC(tracer_,
|
||||
GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
|
||||
DCHECK(IsValidSweepingSpace(space_to_start_));
|
||||
const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
|
||||
const int offset = delegate->GetTaskId();
|
||||
for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
|
||||
const AllocationSpace space_id = static_cast<AllocationSpace>(
|
||||
FIRST_GROWABLE_PAGED_SPACE +
|
||||
@ -106,19 +91,24 @@ class Sweeper::SweeperTask final : public CancelableTask {
|
||||
// Do not sweep code space concurrently.
|
||||
if (space_id == CODE_SPACE) continue;
|
||||
DCHECK(IsValidSweepingSpace(space_id));
|
||||
sweeper_->SweepSpaceFromTask(space_id);
|
||||
sweeper_->ConcurrentSweepSpace(space_id, delegate);
|
||||
}
|
||||
(*num_sweeping_tasks_)--;
|
||||
pending_sweeper_tasks_->Signal();
|
||||
}
|
||||
|
||||
size_t GetMaxConcurrency(size_t worker_count) const override {
|
||||
const size_t kPagePerTask = 2;
|
||||
return std::min<size_t>(
|
||||
kMaxSweeperTasks,
|
||||
worker_count +
|
||||
(sweeper_->ConcurrentSweepingPageCount() + kPagePerTask - 1) /
|
||||
kPagePerTask);
|
||||
}
|
||||
|
||||
private:
|
||||
Sweeper* const sweeper_;
|
||||
base::Semaphore* const pending_sweeper_tasks_;
|
||||
std::atomic<intptr_t>* const num_sweeping_tasks_;
|
||||
AllocationSpace space_to_start_;
|
||||
GCTracer* const tracer_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
|
||||
DISALLOW_COPY_AND_ASSIGN(SweeperJob);
|
||||
};
|
||||
|
||||
class Sweeper::IncrementalSweeperTask final : public CancelableTask {
|
||||
@ -136,7 +126,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
|
||||
sweeper_->incremental_sweeper_pending_ = false;
|
||||
|
||||
if (sweeper_->sweeping_in_progress()) {
|
||||
if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
|
||||
if (!sweeper_->IncrementalSweepSpace(CODE_SPACE)) {
|
||||
sweeper_->ScheduleIncrementalSweepingTask();
|
||||
}
|
||||
}
|
||||
@ -147,8 +137,11 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
|
||||
DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
|
||||
};
|
||||
|
||||
void Sweeper::TearDown() {
|
||||
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
|
||||
}
|
||||
|
||||
void Sweeper::StartSweeping() {
|
||||
CHECK(!stop_sweeper_tasks_);
|
||||
sweeping_in_progress_ = true;
|
||||
iterability_in_progress_ = true;
|
||||
should_reduce_memory_ = heap_->ShouldReduceMemory();
|
||||
@ -174,20 +167,12 @@ void Sweeper::StartSweeping() {
|
||||
}
|
||||
|
||||
void Sweeper::StartSweeperTasks() {
|
||||
DCHECK_EQ(0, num_tasks_);
|
||||
DCHECK_EQ(0, num_sweeping_tasks_);
|
||||
DCHECK(!job_handle_ || !job_handle_->IsValid());
|
||||
if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
|
||||
!heap_->delay_sweeper_tasks_for_testing_) {
|
||||
ForAllSweepingSpaces([this](AllocationSpace space) {
|
||||
DCHECK(IsValidSweepingSpace(space));
|
||||
num_sweeping_tasks_++;
|
||||
auto task = std::make_unique<SweeperTask>(
|
||||
heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
|
||||
&num_sweeping_tasks_, space);
|
||||
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
|
||||
task_ids_[num_tasks_++] = task->id();
|
||||
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
|
||||
});
|
||||
job_handle_ = V8::GetCurrentPlatform()->PostJob(
|
||||
TaskPriority::kUserVisible,
|
||||
std::make_unique<SweeperJob>(heap_->isolate(), this));
|
||||
ScheduleIncrementalSweepingTask();
|
||||
}
|
||||
}
|
||||
@ -212,22 +197,6 @@ void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
|
||||
});
|
||||
}
|
||||
|
||||
void Sweeper::AbortAndWaitForTasks() {
|
||||
if (!FLAG_concurrent_sweeping) return;
|
||||
|
||||
for (int i = 0; i < num_tasks_; i++) {
|
||||
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
|
||||
TryAbortResult::kTaskAborted) {
|
||||
pending_sweeper_tasks_semaphore_.Wait();
|
||||
} else {
|
||||
// Aborted case.
|
||||
num_sweeping_tasks_--;
|
||||
}
|
||||
}
|
||||
num_tasks_ = 0;
|
||||
DCHECK_EQ(0, num_sweeping_tasks_);
|
||||
}
|
||||
|
||||
void Sweeper::EnsureCompleted() {
|
||||
if (!sweeping_in_progress_) return;
|
||||
|
||||
@ -238,7 +207,7 @@ void Sweeper::EnsureCompleted() {
|
||||
ForAllSweepingSpaces(
|
||||
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
|
||||
|
||||
AbortAndWaitForTasks();
|
||||
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
|
||||
|
||||
ForAllSweepingSpaces([this](AllocationSpace space) {
|
||||
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
|
||||
@ -265,7 +234,9 @@ void Sweeper::SupportConcurrentSweeping() {
|
||||
});
|
||||
}
|
||||
|
||||
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
|
||||
bool Sweeper::AreSweeperTasksRunning() {
|
||||
return job_handle_ && job_handle_->IsValid() && !job_handle_->IsCompleted();
|
||||
}
|
||||
|
||||
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
|
||||
Address free_start, Address free_end, Page* page, Space* space,
|
||||
@ -445,9 +416,16 @@ int Sweeper::RawSweep(
|
||||
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
|
||||
}
|
||||
|
||||
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
|
||||
size_t Sweeper::ConcurrentSweepingPageCount() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
|
||||
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
|
||||
}
|
||||
|
||||
void Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
|
||||
JobDelegate* delegate) {
|
||||
Page* page = nullptr;
|
||||
while (!stop_sweeper_tasks_ &&
|
||||
while (!delegate->ShouldYield() &&
|
||||
((page = GetSweepingPageSafe(identity)) != nullptr)) {
|
||||
// Typed slot sets are only recorded on code pages. Code pages
|
||||
// are not swept concurrently to the application to ensure W^X.
|
||||
@ -457,7 +435,7 @@ void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
|
||||
}
|
||||
}
|
||||
|
||||
bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
|
||||
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
|
||||
if (Page* page = GetSweepingPageSafe(identity)) {
|
||||
ParallelSweepPage(page, identity);
|
||||
}
|
||||
@ -536,7 +514,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
|
||||
Sweeper::AddPageMode mode) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(IsValidSweepingSpace(space));
|
||||
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
|
||||
DCHECK(!FLAG_concurrent_sweeping || !job_handle_ || !job_handle_->IsValid());
|
||||
if (mode == Sweeper::REGULAR) {
|
||||
PrepareToBeSweptPage(space, page);
|
||||
} else {
|
||||
|
@ -81,6 +81,8 @@ class Sweeper {
|
||||
|
||||
bool sweeping_in_progress() const { return sweeping_in_progress_; }
|
||||
|
||||
void TearDown();
|
||||
|
||||
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
|
||||
|
||||
int ParallelSweepSpace(
|
||||
@ -123,7 +125,7 @@ class Sweeper {
|
||||
private:
|
||||
class IncrementalSweeperTask;
|
||||
class IterabilityTask;
|
||||
class SweeperTask;
|
||||
class SweeperJob;
|
||||
|
||||
static const int kNumberOfSweepingSpaces =
|
||||
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
|
||||
@ -172,13 +174,13 @@ class Sweeper {
|
||||
return is_done;
|
||||
}
|
||||
|
||||
void SweepSpaceFromTask(AllocationSpace identity);
|
||||
size_t ConcurrentSweepingPageCount();
|
||||
|
||||
void ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate);
|
||||
|
||||
// Sweeps incrementally one page from the given space. Returns true if
|
||||
// there are no more pages to sweep in the given space.
|
||||
bool SweepSpaceIncrementallyFromTask(AllocationSpace identity);
|
||||
|
||||
void AbortAndWaitForTasks();
|
||||
bool IncrementalSweepSpace(AllocationSpace identity);
|
||||
|
||||
Page* GetSweepingPageSafe(AllocationSpace space);
|
||||
|
||||
@ -202,9 +204,7 @@ class Sweeper {
|
||||
|
||||
Heap* const heap_;
|
||||
MajorNonAtomicMarkingState* marking_state_;
|
||||
int num_tasks_;
|
||||
CancelableTaskManager::Id task_ids_[kNumberOfSweepingSpaces];
|
||||
base::Semaphore pending_sweeper_tasks_semaphore_;
|
||||
std::unique_ptr<JobHandle> job_handle_;
|
||||
base::Mutex mutex_;
|
||||
SweptList swept_list_[kNumberOfSweepingSpaces];
|
||||
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
|
||||
@ -212,11 +212,6 @@ class Sweeper {
|
||||
// Main thread can finalize sweeping, while background threads allocation slow
|
||||
// path checks this flag to see whether it could support concurrent sweeping.
|
||||
std::atomic<bool> sweeping_in_progress_;
|
||||
// Counter is actively maintained by the concurrent tasks to avoid querying
|
||||
// the semaphore for maintaining a task counter on the main thread.
|
||||
std::atomic<intptr_t> num_sweeping_tasks_;
|
||||
// Used by PauseOrCompleteScope to signal early bailout to tasks.
|
||||
std::atomic<bool> stop_sweeper_tasks_;
|
||||
|
||||
// Pages that are only made iterable but have their free lists ignored.
|
||||
IterabilityList iterability_list_;
|
||||
|
Loading…
Reference in New Issue
Block a user