Reland "Reland "[Heap] ScavengerCollector use Jobs.""

This is a reland of 92f815a80d
Safe to reland as-is with task id lifetime fix in
https://chromium-review.googlesource.com/c/v8/v8/+/2437005

Original change's description:
> Reland "[Heap] ScavengerCollector use Jobs."
>
> This is a reland of 9e8c54f830
> Safe to reland as-is with fix in AcquireTaskId
> https://chromium-review.googlesource.com/c/v8/v8/+/2401964
>
> Additional changes are made in the reland:
> -TRACE_GC is be split for background/foreground scope.
> -New IndexGenerator is used for dynamic work assignement.
>
> Original change's description:
> > [Heap] ScavengerCollector use Jobs.
> >
> > No yielding is necessary since the main thread Join()s.
> >
> > max concurrency is determined based on either
> > remaining_memory_chunks_ or global pool size
> > (copied_list_ + promotion_list_)
> >
> > Change-Id: Ie30fa86c44d3224b04df5d79569bce126ce7d96b
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2354390
> > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#69746}
>
> Change-Id: Id9d7a5bf3b2337ae4cf1e76770f4b14ebb8ca256
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2399041
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70135}

Change-Id: Id0451b6eca9a125c7695d251d1a7d813e0664dd3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2432071
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70238}
This commit is contained in:
Etienne Pierre-doray 2020-09-30 11:41:56 -04:00 committed by Commit Bot
parent b2c4fec576
commit b376a124e2
5 changed files with 232 additions and 128 deletions

View File

@ -2652,6 +2652,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/paged-spaces-inl.h",
"src/heap/paged-spaces.cc",
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",

View File

@ -0,0 +1,32 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PARALLEL_WORK_ITEM_H_
#define V8_HEAP_PARALLEL_WORK_ITEM_H_
#include <atomic>
namespace v8 {
namespace internal {
class ParallelWorkItem {
public:
ParallelWorkItem() = default;
bool TryAcquire() {
// memory_order_relaxed is sufficient as the work item's state itself hasn't
// been modified since the beginning of its associated job. This is only
// atomically acquiring the right to work on it.
return reinterpret_cast<std::atomic<bool>*>(&acquire_)->exchange(
true, std::memory_order_relaxed) == false;
}
private:
bool acquire_{false};
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PARALLEL_WORK_ITEM_H_

View File

@ -38,6 +38,10 @@ bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
void Scavenger::PromotionList::View::FlushToGlobal() {
promotion_list_->FlushToGlobal(task_id_);
}
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
@ -78,6 +82,16 @@ bool Scavenger::PromotionList::Pop(int task_id,
return large_object_promotion_list_.Pop(task_id, entry);
}
void Scavenger::PromotionList::FlushToGlobal(int task_id) {
regular_object_promotion_list_.FlushToGlobal(task_id);
large_object_promotion_list_.FlushToGlobal(task_id);
}
size_t Scavenger::PromotionList::GlobalPoolSize() const {
return regular_object_promotion_list_.GlobalPoolSize() +
large_object_promotion_list_.GlobalPoolSize();
}
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();

View File

@ -25,65 +25,6 @@
namespace v8 {
namespace internal {
class PageScavengingItem final : public ItemParallelJob::Item {
public:
explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageScavengingItem() override = default;
void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
private:
MemoryChunk* const chunk_;
};
class ScavengingTask final : public ItemParallelJob::Task {
public:
ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
: ItemParallelJob::Task(heap->isolate()),
heap_(heap),
scavenger_(scavenger),
barrier_(barrier) {}
void RunInParallel(Runner runner) final {
if (runner == Runner::kForeground) {
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
ProcessItems();
} else {
TRACE_BACKGROUND_GC(
heap_->tracer(),
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
ProcessItems();
}
}
private:
void ProcessItems() {
double scavenging_time = 0.0;
{
barrier_->Start();
TimedScope scope(&scavenging_time);
PageScavengingItem* item = nullptr;
while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
do {
scavenger_->Process(barrier_);
} while (!barrier_->Wait());
scavenger_->Process();
}
if (FLAG_trace_parallel_scavenge) {
PrintIsolate(heap_->isolate(),
"scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
static_cast<void*>(this), scavenging_time,
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
}
Heap* const heap_;
Scavenger* const scavenger_;
OneshotBarrier* const barrier_;
};
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
@ -219,8 +160,81 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
}
};
ScavengerCollector::JobTask::JobTask(
ScavengerCollector* outer,
std::vector<std::unique_ptr<Scavenger>>* scavengers,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
Scavenger::CopiedList* copied_list,
Scavenger::PromotionList* promotion_list)
: outer_(outer),
scavengers_(scavengers),
memory_chunks_(std::move(memory_chunks)),
remaining_memory_chunks_(memory_chunks_.size()),
generator_(memory_chunks_.size()),
copied_list_(copied_list),
promotion_list_(promotion_list) {}
void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
if (delegate->IsJoiningThread()) {
TRACE_GC(outer_->heap_->tracer(),
GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
ProcessItems(delegate, scavenger);
} else {
TRACE_BACKGROUND_GC(
outer_->heap_->tracer(),
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
ProcessItems(delegate, scavenger);
}
}
size_t ScavengerCollector::JobTask::GetMaxConcurrency(
size_t worker_count) const {
// We need to account for local segments held by worker_count in addition to
// GlobalPoolSize() of copied_list_ and promotion_list_.
return std::min<size_t>(
scavengers_->size(),
std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
worker_count + copied_list_->GlobalPoolSize() +
promotion_list_->GlobalPoolSize()));
}
void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
Scavenger* scavenger) {
double scavenging_time = 0.0;
{
TimedScope scope(&scavenging_time);
ConcurrentScavengePages(scavenger);
scavenger->Process(delegate);
}
if (FLAG_trace_parallel_scavenge) {
PrintIsolate(outer_->heap_->isolate(),
"scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
static_cast<void*>(this), scavenging_time,
scavenger->bytes_copied(), scavenger->bytes_promoted());
}
}
void ScavengerCollector::JobTask::ConcurrentScavengePages(
Scavenger* scavenger) {
while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
base::Optional<size_t> index = generator_.GetNext();
if (!index) return;
for (size_t i = *index; i < memory_chunks_.size(); ++i) {
auto& work_item = memory_chunks_[i];
if (!work_item.first.TryAcquire()) break;
scavenger->ScavengePage(work_item.second);
if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
1) {
return;
}
}
}
}
ScavengerCollector::ScavengerCollector(Heap* heap)
: isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
: isolate_(heap->isolate()), heap_(heap) {}
// Remove this crashkey after chromium:1010312 is fixed.
class ScopedFullHeapCrashKey {
@ -246,23 +260,12 @@ void ScavengerCollector::CollectGarbage() {
}
DCHECK(surviving_new_large_objects_.empty());
ItemParallelJob job(isolate_->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
const int kMainThreadId = 0;
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = isolate_->LogObjectRelocation();
const int num_scavenge_tasks = NumberOfScavengeTasks();
OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
std::vector<std::unique_ptr<Scavenger>> scavengers;
Worklist<MemoryChunk*, 64> empty_chunks;
const int num_scavenge_tasks = NumberOfScavengeTasks();
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] =
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
&promotion_list, &ephemeron_table_list, i);
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
}
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
@ -289,12 +292,20 @@ void ScavengerCollector::CollectGarbage() {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
const bool is_logging = isolate_->LogObjectRelocation();
for (int i = 0; i < num_scavenge_tasks; ++i) {
scavengers.emplace_back(
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
&promotion_list, &ephemeron_table_list, i));
}
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&job](MemoryChunk* chunk) {
job.AddItem(new PageScavengingItem(chunk));
heap_, [&memory_chunks](MemoryChunk* chunk) {
memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
});
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
{
// Identify weak unmodified handles. Requires an unmodified graph.
@ -319,18 +330,24 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
scavengers[kMainThreadId]->Flush();
}
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
job.Run();
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
std::make_unique<JobTask>(this, &scavengers,
std::move(memory_chunks),
&copied_list, &promotion_list))
->Join();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
num_scavenge_tasks, kMainThreadId);
IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
kMainThreadId);
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
@ -357,10 +374,10 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i]->Finalize();
delete scavengers[i];
for (auto& scavenger : scavengers) {
scavenger->Finalize();
}
scavengers.clear();
HandleSurvivingNewLargeObjects();
}
@ -420,23 +437,24 @@ void ScavengerCollector::CollectGarbage() {
}
void ScavengerCollector::IterateStackAndScavenge(
RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
int num_scavenge_tasks, int main_thread_id) {
RootScavengeVisitor* root_scavenge_visitor,
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
// Scan the stack, scavenge the newly discovered objects, and report
// the survival statistics before and afer the stack scanning.
// This code is not intended for production.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
size_t survived_bytes_before = 0;
for (int i = 0; i < num_scavenge_tasks; i++) {
for (auto& scavenger : *scavengers) {
survived_bytes_before +=
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
scavenger->bytes_copied() + scavenger->bytes_promoted();
}
heap_->IterateStackRoots(root_scavenge_visitor);
scavengers[main_thread_id]->Process();
(*scavengers)[main_thread_id]->Process();
size_t survived_bytes_after = 0;
for (int i = 0; i < num_scavenge_tasks; i++) {
for (auto& scavenger : *scavengers) {
survived_bytes_after +=
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
scavenger->bytes_copied() + scavenger->bytes_promoted();
}
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCScavengerStackScanning", "survived_bytes_before",
@ -590,10 +608,9 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
AddPageToSweeperIfNecessary(page);
}
void Scavenger::Process(OneshotBarrier* barrier) {
void Scavenger::Process(JobDelegate* delegate) {
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
bool done;
size_t objects = 0;
do {
@ -603,9 +620,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
barrier->NotifyAll();
delegate->NotifyConcurrencyIncrease();
}
}
}
@ -615,9 +632,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
barrier->NotifyAll();
delegate->NotifyConcurrencyIncrease();
}
}
}
@ -705,6 +722,11 @@ void Scavenger::Finalize() {
}
}
void Scavenger::Flush() {
copied_list_.FlushToGlobal();
promotion_list_.FlushToGlobal();
}
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_.Push(table);
}

View File

@ -6,8 +6,10 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
#include "src/heap/worklist.h"
@ -33,38 +35,7 @@ constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
static const int kMaxWaitTimeMs = 2;
explicit ScavengerCollector(Heap* heap);
void CollectGarbage();
private:
void MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects);
int NumberOfScavengeTasks();
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
void SweepArrayBufferExtensions();
void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
Scavenger** scavengers, int num_scavenge_tasks,
int main_thread_id);
Isolate* const isolate_;
Heap* const heap_;
base::Semaphore parallel_scavenge_semaphore_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
friend class Scavenger;
};
class ScavengerCollector;
class Scavenger {
public:
@ -88,6 +59,7 @@ class Scavenger {
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
inline void FlushToGlobal();
private:
PromotionList* promotion_list_;
@ -102,10 +74,12 @@ class Scavenger {
inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
inline size_t GlobalPoolSize() const;
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
inline void FlushToGlobal(int task_id);
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
@ -134,10 +108,11 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
void Process(OneshotBarrier* barrier = nullptr);
void Process(JobDelegate* delegate = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
void Flush();
void AddEphemeronHashTable(EphemeronHashTable table);
@ -276,6 +251,66 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
Scavenger* const scavenger_;
};
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
static const int kMainThreadId = 0;
explicit ScavengerCollector(Heap* heap);
void CollectGarbage();
private:
class JobTask : public v8::JobTask {
public:
explicit JobTask(
ScavengerCollector* outer,
std::vector<std::unique_ptr<Scavenger>>* scavengers,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
Scavenger::CopiedList* copied_list,
Scavenger::PromotionList* promotion_list);
void Run(JobDelegate* delegate) override;
size_t GetMaxConcurrency(size_t worker_count) const override;
private:
void ProcessItems(JobDelegate* delegate, Scavenger* scavenger);
void ConcurrentScavengePages(Scavenger* scavenger);
ScavengerCollector* outer_;
std::vector<std::unique_ptr<Scavenger>>* scavengers_;
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks_;
std::atomic<size_t> remaining_memory_chunks_{0};
IndexGenerator generator_;
Scavenger::CopiedList* copied_list_;
Scavenger::PromotionList* promotion_list_;
};
void MergeSurvivingNewLargeObjects(
const SurvivingNewLargeObjectsMap& objects);
int NumberOfScavengeTasks();
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
void SweepArrayBufferExtensions();
void IterateStackAndScavenge(
RootScavengeVisitor* root_scavenge_visitor,
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id);
Isolate* const isolate_;
Heap* const heap_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
friend class Scavenger;
};
} // namespace internal
} // namespace v8