Revert "Reland "[Heap] ScavengerCollector use Jobs.""
This reverts commit92f815a80d
. Reason for revert: broke tests; see https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20TSAN/33395? Original change's description: > Reland "[Heap] ScavengerCollector use Jobs." > > This is a reland of9e8c54f830
> Safe to reland as-is with fix in AcquireTaskId > https://chromium-review.googlesource.com/c/v8/v8/+/2401964 > > Additional changes are made in the reland: > -TRACE_GC is be split for background/foreground scope. > -New IndexGenerator is used for dynamic work assignement. > > Original change's description: > > [Heap] ScavengerCollector use Jobs. > > > > No yielding is necessary since the main thread Join()s. > > > > max concurrency is determined based on either > > remaining_memory_chunks_ or global pool size > > (copied_list_ + promotion_list_) > > > > Change-Id: Ie30fa86c44d3224b04df5d79569bce126ce7d96b > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2354390 > > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org> > > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > > Cr-Commit-Position: refs/heads/master@{#69746} > > Change-Id: Id9d7a5bf3b2337ae4cf1e76770f4b14ebb8ca256 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2399041 > Reviewed-by: Ulan Degenbaev <ulan@chromium.org> > Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org> > Cr-Commit-Position: refs/heads/master@{#70135} TBR=ulan@chromium.org,etiennep@chromium.org Change-Id: I4823c642546b82a9a9c8955151cd8784e4b86bc8 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2431551 Commit-Queue: Francis McCabe <fgm@chromium.org> Reviewed-by: Francis McCabe <fgm@chromium.org> Cr-Commit-Position: refs/heads/master@{#70138}
This commit is contained in:
parent
21b585165f
commit
4822d3b22a
1
BUILD.gn
1
BUILD.gn
@ -2650,7 +2650,6 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/paged-spaces-inl.h",
|
||||
"src/heap/paged-spaces.cc",
|
||||
"src/heap/paged-spaces.h",
|
||||
"src/heap/parallel-work-item.h",
|
||||
"src/heap/read-only-heap-inl.h",
|
||||
"src/heap/read-only-heap.cc",
|
||||
"src/heap/read-only-heap.h",
|
||||
|
@ -1,32 +0,0 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_PARALLEL_WORK_ITEM_H_
|
||||
#define V8_HEAP_PARALLEL_WORK_ITEM_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class ParallelWorkItem {
|
||||
public:
|
||||
ParallelWorkItem() = default;
|
||||
|
||||
bool TryAcquire() {
|
||||
// memory_order_relaxed is sufficient as the work item's state itself hasn't
|
||||
// been modified since the beginning of its associated job. This is only
|
||||
// atomically acquiring the right to work on it.
|
||||
return reinterpret_cast<std::atomic<bool>*>(&acquire_)->exchange(
|
||||
true, std::memory_order_relaxed) == false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool acquire_{false};
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_PARALLEL_WORK_ITEM_H_
|
@ -38,10 +38,6 @@ bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
|
||||
return promotion_list_->Pop(task_id_, entry);
|
||||
}
|
||||
|
||||
void Scavenger::PromotionList::View::FlushToGlobal() {
|
||||
promotion_list_->FlushToGlobal(task_id_);
|
||||
}
|
||||
|
||||
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
|
||||
return promotion_list_->IsGlobalPoolEmpty();
|
||||
}
|
||||
@ -82,16 +78,6 @@ bool Scavenger::PromotionList::Pop(int task_id,
|
||||
return large_object_promotion_list_.Pop(task_id, entry);
|
||||
}
|
||||
|
||||
void Scavenger::PromotionList::FlushToGlobal(int task_id) {
|
||||
regular_object_promotion_list_.FlushToGlobal(task_id);
|
||||
large_object_promotion_list_.FlushToGlobal(task_id);
|
||||
}
|
||||
|
||||
size_t Scavenger::PromotionList::GlobalPoolSize() const {
|
||||
return regular_object_promotion_list_.GlobalPoolSize() +
|
||||
large_object_promotion_list_.GlobalPoolSize();
|
||||
}
|
||||
|
||||
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
|
||||
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
|
||||
large_object_promotion_list_.IsGlobalPoolEmpty();
|
||||
|
@ -25,6 +25,65 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class PageScavengingItem final : public ItemParallelJob::Item {
|
||||
public:
|
||||
explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
|
||||
~PageScavengingItem() override = default;
|
||||
|
||||
void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
|
||||
|
||||
private:
|
||||
MemoryChunk* const chunk_;
|
||||
};
|
||||
|
||||
class ScavengingTask final : public ItemParallelJob::Task {
|
||||
public:
|
||||
ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
|
||||
: ItemParallelJob::Task(heap->isolate()),
|
||||
heap_(heap),
|
||||
scavenger_(scavenger),
|
||||
barrier_(barrier) {}
|
||||
|
||||
void RunInParallel(Runner runner) final {
|
||||
if (runner == Runner::kForeground) {
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
|
||||
ProcessItems();
|
||||
} else {
|
||||
TRACE_BACKGROUND_GC(
|
||||
heap_->tracer(),
|
||||
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
|
||||
ProcessItems();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void ProcessItems() {
|
||||
double scavenging_time = 0.0;
|
||||
{
|
||||
barrier_->Start();
|
||||
TimedScope scope(&scavenging_time);
|
||||
PageScavengingItem* item = nullptr;
|
||||
while ((item = GetItem<PageScavengingItem>()) != nullptr) {
|
||||
item->Process(scavenger_);
|
||||
item->MarkFinished();
|
||||
}
|
||||
do {
|
||||
scavenger_->Process(barrier_);
|
||||
} while (!barrier_->Wait());
|
||||
scavenger_->Process();
|
||||
}
|
||||
if (FLAG_trace_parallel_scavenge) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
|
||||
static_cast<void*>(this), scavenging_time,
|
||||
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
|
||||
}
|
||||
}
|
||||
Heap* const heap_;
|
||||
Scavenger* const scavenger_;
|
||||
OneshotBarrier* const barrier_;
|
||||
};
|
||||
|
||||
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
|
||||
public:
|
||||
IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
|
||||
@ -160,80 +219,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
|
||||
}
|
||||
};
|
||||
|
||||
ScavengerCollector::JobTask::JobTask(
|
||||
ScavengerCollector* outer,
|
||||
std::vector<std::unique_ptr<Scavenger>>* scavengers,
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
|
||||
Scavenger::CopiedList* copied_list,
|
||||
Scavenger::PromotionList* promotion_list)
|
||||
: outer_(outer),
|
||||
scavengers_(scavengers),
|
||||
memory_chunks_(std::move(memory_chunks)),
|
||||
remaining_memory_chunks_(memory_chunks_.size()),
|
||||
generator_(memory_chunks_.size()),
|
||||
copied_list_(copied_list),
|
||||
promotion_list_(promotion_list) {}
|
||||
|
||||
void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
|
||||
Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
|
||||
if (delegate->IsJoiningThread()) {
|
||||
TRACE_GC(outer_->heap_->tracer(),
|
||||
GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
|
||||
ProcessItems(delegate, scavenger);
|
||||
} else {
|
||||
TRACE_BACKGROUND_GC(
|
||||
outer_->heap_->tracer(),
|
||||
GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
|
||||
ProcessItems(delegate, scavenger);
|
||||
}
|
||||
}
|
||||
|
||||
size_t ScavengerCollector::JobTask::GetMaxConcurrency(
|
||||
size_t worker_count) const {
|
||||
// We need to account for local segments held by worker_count in addition to
|
||||
// GlobalPoolSize() of copied_list_ and promotion_list_.
|
||||
return std::min<size_t>(
|
||||
scavengers_->size(),
|
||||
std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
|
||||
worker_count + copied_list_->GlobalPoolSize() +
|
||||
promotion_list_->GlobalPoolSize()));
|
||||
}
|
||||
|
||||
void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
|
||||
Scavenger* scavenger) {
|
||||
double scavenging_time = 0.0;
|
||||
{
|
||||
TimedScope scope(&scavenging_time);
|
||||
ConcurrentScavengePages(scavenger);
|
||||
scavenger->Process(delegate);
|
||||
}
|
||||
if (FLAG_trace_parallel_scavenge) {
|
||||
PrintIsolate(outer_->heap_->isolate(),
|
||||
"scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
|
||||
static_cast<void*>(this), scavenging_time,
|
||||
scavenger->bytes_copied(), scavenger->bytes_promoted());
|
||||
}
|
||||
}
|
||||
|
||||
void ScavengerCollector::JobTask::ConcurrentScavengePages(
|
||||
Scavenger* scavenger) {
|
||||
while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
|
||||
base::Optional<size_t> index = generator_.GetNext();
|
||||
if (!index) return;
|
||||
for (size_t i = *index; i < memory_chunks_.size(); ++i) {
|
||||
auto& work_item = memory_chunks_[i];
|
||||
if (!work_item.first.TryAcquire()) break;
|
||||
scavenger->ScavengePage(work_item.second);
|
||||
if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
|
||||
1) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ScavengerCollector::ScavengerCollector(Heap* heap)
|
||||
: isolate_(heap->isolate()), heap_(heap) {}
|
||||
: isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
|
||||
|
||||
// Remove this crashkey after chromium:1010312 is fixed.
|
||||
class ScopedFullHeapCrashKey {
|
||||
@ -259,12 +246,23 @@ void ScavengerCollector::CollectGarbage() {
|
||||
}
|
||||
|
||||
DCHECK(surviving_new_large_objects_.empty());
|
||||
std::vector<std::unique_ptr<Scavenger>> scavengers;
|
||||
Worklist<MemoryChunk*, 64> empty_chunks;
|
||||
ItemParallelJob job(isolate_->cancelable_task_manager(),
|
||||
¶llel_scavenge_semaphore_);
|
||||
const int kMainThreadId = 0;
|
||||
Scavenger* scavengers[kMaxScavengerTasks];
|
||||
const bool is_logging = isolate_->LogObjectRelocation();
|
||||
const int num_scavenge_tasks = NumberOfScavengeTasks();
|
||||
OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
|
||||
Worklist<MemoryChunk*, 64> empty_chunks;
|
||||
Scavenger::CopiedList copied_list(num_scavenge_tasks);
|
||||
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
|
||||
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
|
||||
for (int i = 0; i < num_scavenge_tasks; i++) {
|
||||
scavengers[i] =
|
||||
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
|
||||
&promotion_list, &ephemeron_table_list, i);
|
||||
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
|
||||
}
|
||||
|
||||
{
|
||||
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
|
||||
@ -291,20 +289,12 @@ void ScavengerCollector::CollectGarbage() {
|
||||
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
|
||||
});
|
||||
|
||||
const bool is_logging = isolate_->LogObjectRelocation();
|
||||
for (int i = 0; i < num_scavenge_tasks; ++i) {
|
||||
scavengers.emplace_back(
|
||||
new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
|
||||
&promotion_list, &ephemeron_table_list, i));
|
||||
}
|
||||
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
|
||||
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
|
||||
heap_, [&memory_chunks](MemoryChunk* chunk) {
|
||||
memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
|
||||
heap_, [&job](MemoryChunk* chunk) {
|
||||
job.AddItem(new PageScavengingItem(chunk));
|
||||
});
|
||||
|
||||
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
|
||||
RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
|
||||
|
||||
{
|
||||
// Identify weak unmodified handles. Requires an unmodified graph.
|
||||
@ -329,24 +319,18 @@ void ScavengerCollector::CollectGarbage() {
|
||||
heap_->IterateRoots(&root_scavenge_visitor, options);
|
||||
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
|
||||
&root_scavenge_visitor);
|
||||
scavengers[kMainThreadId]->Flush();
|
||||
}
|
||||
{
|
||||
// Parallel phase scavenging all copied and promoted objects.
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
|
||||
V8::GetCurrentPlatform()
|
||||
->PostJob(v8::TaskPriority::kUserBlocking,
|
||||
std::make_unique<JobTask>(this, &scavengers,
|
||||
std::move(memory_chunks),
|
||||
&copied_list, &promotion_list))
|
||||
->Join();
|
||||
job.Run();
|
||||
DCHECK(copied_list.IsEmpty());
|
||||
DCHECK(promotion_list.IsEmpty());
|
||||
}
|
||||
|
||||
if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
|
||||
IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
|
||||
kMainThreadId);
|
||||
IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
|
||||
num_scavenge_tasks, kMainThreadId);
|
||||
DCHECK(copied_list.IsEmpty());
|
||||
DCHECK(promotion_list.IsEmpty());
|
||||
}
|
||||
@ -373,10 +357,10 @@ void ScavengerCollector::CollectGarbage() {
|
||||
|
||||
DCHECK(surviving_new_large_objects_.empty());
|
||||
|
||||
for (auto& scavenger : scavengers) {
|
||||
scavenger->Finalize();
|
||||
for (int i = 0; i < num_scavenge_tasks; i++) {
|
||||
scavengers[i]->Finalize();
|
||||
delete scavengers[i];
|
||||
}
|
||||
scavengers.clear();
|
||||
|
||||
HandleSurvivingNewLargeObjects();
|
||||
}
|
||||
@ -436,24 +420,23 @@ void ScavengerCollector::CollectGarbage() {
|
||||
}
|
||||
|
||||
void ScavengerCollector::IterateStackAndScavenge(
|
||||
|
||||
RootScavengeVisitor* root_scavenge_visitor,
|
||||
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
|
||||
RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
|
||||
int num_scavenge_tasks, int main_thread_id) {
|
||||
// Scan the stack, scavenge the newly discovered objects, and report
|
||||
// the survival statistics before and afer the stack scanning.
|
||||
// This code is not intended for production.
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
|
||||
size_t survived_bytes_before = 0;
|
||||
for (auto& scavenger : *scavengers) {
|
||||
for (int i = 0; i < num_scavenge_tasks; i++) {
|
||||
survived_bytes_before +=
|
||||
scavenger->bytes_copied() + scavenger->bytes_promoted();
|
||||
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
|
||||
}
|
||||
heap_->IterateStackRoots(root_scavenge_visitor);
|
||||
(*scavengers)[main_thread_id]->Process();
|
||||
scavengers[main_thread_id]->Process();
|
||||
size_t survived_bytes_after = 0;
|
||||
for (auto& scavenger : *scavengers) {
|
||||
for (int i = 0; i < num_scavenge_tasks; i++) {
|
||||
survived_bytes_after +=
|
||||
scavenger->bytes_copied() + scavenger->bytes_promoted();
|
||||
scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
|
||||
}
|
||||
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"V8.GCScavengerStackScanning", "survived_bytes_before",
|
||||
@ -607,9 +590,10 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
|
||||
AddPageToSweeperIfNecessary(page);
|
||||
}
|
||||
|
||||
void Scavenger::Process(JobDelegate* delegate) {
|
||||
void Scavenger::Process(OneshotBarrier* barrier) {
|
||||
ScavengeVisitor scavenge_visitor(this);
|
||||
|
||||
const bool have_barrier = barrier != nullptr;
|
||||
bool done;
|
||||
size_t objects = 0;
|
||||
do {
|
||||
@ -619,9 +603,9 @@ void Scavenger::Process(JobDelegate* delegate) {
|
||||
copied_list_.Pop(&object_and_size)) {
|
||||
scavenge_visitor.Visit(object_and_size.first);
|
||||
done = false;
|
||||
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
|
||||
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
|
||||
if (!copied_list_.IsGlobalPoolEmpty()) {
|
||||
delegate->NotifyConcurrencyIncrease();
|
||||
barrier->NotifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -631,9 +615,9 @@ void Scavenger::Process(JobDelegate* delegate) {
|
||||
HeapObject target = entry.heap_object;
|
||||
IterateAndScavengePromotedObject(target, entry.map, entry.size);
|
||||
done = false;
|
||||
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
|
||||
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
|
||||
if (!promotion_list_.IsGlobalPoolEmpty()) {
|
||||
delegate->NotifyConcurrencyIncrease();
|
||||
barrier->NotifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -721,11 +705,6 @@ void Scavenger::Finalize() {
|
||||
}
|
||||
}
|
||||
|
||||
void Scavenger::Flush() {
|
||||
copied_list_.FlushToGlobal();
|
||||
promotion_list_.FlushToGlobal();
|
||||
}
|
||||
|
||||
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
|
||||
ephemeron_table_list_.Push(table);
|
||||
}
|
||||
|
@ -6,10 +6,8 @@
|
||||
#define V8_HEAP_SCAVENGER_H_
|
||||
|
||||
#include "src/base/platform/condition-variable.h"
|
||||
#include "src/heap/index-generator.h"
|
||||
#include "src/heap/local-allocator.h"
|
||||
#include "src/heap/objects-visiting.h"
|
||||
#include "src/heap/parallel-work-item.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/heap/worklist.h"
|
||||
|
||||
@ -35,7 +33,38 @@ constexpr int kEphemeronTableListSegmentSize = 128;
|
||||
using EphemeronTableList =
|
||||
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
|
||||
|
||||
class ScavengerCollector;
|
||||
class ScavengerCollector {
|
||||
public:
|
||||
static const int kMaxScavengerTasks = 8;
|
||||
static const int kMaxWaitTimeMs = 2;
|
||||
|
||||
explicit ScavengerCollector(Heap* heap);
|
||||
|
||||
void CollectGarbage();
|
||||
|
||||
private:
|
||||
void MergeSurvivingNewLargeObjects(
|
||||
const SurvivingNewLargeObjectsMap& objects);
|
||||
|
||||
int NumberOfScavengeTasks();
|
||||
|
||||
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
|
||||
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
|
||||
void ClearOldEphemerons();
|
||||
void HandleSurvivingNewLargeObjects();
|
||||
|
||||
void SweepArrayBufferExtensions();
|
||||
|
||||
void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
|
||||
Scavenger** scavengers, int num_scavenge_tasks,
|
||||
int main_thread_id);
|
||||
Isolate* const isolate_;
|
||||
Heap* const heap_;
|
||||
base::Semaphore parallel_scavenge_semaphore_;
|
||||
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
|
||||
|
||||
friend class Scavenger;
|
||||
};
|
||||
|
||||
class Scavenger {
|
||||
public:
|
||||
@ -59,7 +88,6 @@ class Scavenger {
|
||||
inline bool Pop(struct PromotionListEntry* entry);
|
||||
inline bool IsGlobalPoolEmpty();
|
||||
inline bool ShouldEagerlyProcessPromotionList();
|
||||
inline void FlushToGlobal();
|
||||
|
||||
private:
|
||||
PromotionList* promotion_list_;
|
||||
@ -74,12 +102,10 @@ class Scavenger {
|
||||
inline void PushLargeObject(int task_id, HeapObject object, Map map,
|
||||
int size);
|
||||
inline bool IsEmpty();
|
||||
inline size_t GlobalPoolSize() const;
|
||||
inline size_t LocalPushSegmentSize(int task_id);
|
||||
inline bool Pop(int task_id, struct PromotionListEntry* entry);
|
||||
inline bool IsGlobalPoolEmpty();
|
||||
inline bool ShouldEagerlyProcessPromotionList(int task_id);
|
||||
inline void FlushToGlobal(int task_id);
|
||||
|
||||
private:
|
||||
static const int kRegularObjectPromotionListSegmentSize = 256;
|
||||
@ -108,11 +134,10 @@ class Scavenger {
|
||||
|
||||
// Processes remaining work (=objects) after single objects have been
|
||||
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
|
||||
void Process(JobDelegate* delegate = nullptr);
|
||||
void Process(OneshotBarrier* barrier = nullptr);
|
||||
|
||||
// Finalize the Scavenger. Needs to be called from the main thread.
|
||||
void Finalize();
|
||||
void Flush();
|
||||
|
||||
void AddEphemeronHashTable(EphemeronHashTable table);
|
||||
|
||||
@ -251,66 +276,6 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
|
||||
Scavenger* const scavenger_;
|
||||
};
|
||||
|
||||
class ScavengerCollector {
|
||||
public:
|
||||
static const int kMaxScavengerTasks = 8;
|
||||
static const int kMainThreadId = 0;
|
||||
|
||||
explicit ScavengerCollector(Heap* heap);
|
||||
|
||||
void CollectGarbage();
|
||||
|
||||
private:
|
||||
class JobTask : public v8::JobTask {
|
||||
public:
|
||||
explicit JobTask(
|
||||
ScavengerCollector* outer,
|
||||
std::vector<std::unique_ptr<Scavenger>>* scavengers,
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
|
||||
Scavenger::CopiedList* copied_list,
|
||||
Scavenger::PromotionList* promotion_list);
|
||||
|
||||
void Run(JobDelegate* delegate) override;
|
||||
size_t GetMaxConcurrency(size_t worker_count) const override;
|
||||
|
||||
private:
|
||||
void ProcessItems(JobDelegate* delegate, Scavenger* scavenger);
|
||||
void ConcurrentScavengePages(Scavenger* scavenger);
|
||||
|
||||
ScavengerCollector* outer_;
|
||||
|
||||
std::vector<std::unique_ptr<Scavenger>>* scavengers_;
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks_;
|
||||
std::atomic<size_t> remaining_memory_chunks_{0};
|
||||
IndexGenerator generator_;
|
||||
|
||||
Scavenger::CopiedList* copied_list_;
|
||||
Scavenger::PromotionList* promotion_list_;
|
||||
};
|
||||
|
||||
void MergeSurvivingNewLargeObjects(
|
||||
const SurvivingNewLargeObjectsMap& objects);
|
||||
|
||||
int NumberOfScavengeTasks();
|
||||
|
||||
void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
|
||||
void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
|
||||
void ClearOldEphemerons();
|
||||
void HandleSurvivingNewLargeObjects();
|
||||
|
||||
void SweepArrayBufferExtensions();
|
||||
|
||||
void IterateStackAndScavenge(
|
||||
RootScavengeVisitor* root_scavenge_visitor,
|
||||
std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id);
|
||||
|
||||
Isolate* const isolate_;
|
||||
Heap* const heap_;
|
||||
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
|
||||
|
||||
friend class Scavenger;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user