[heap] Use PageParallelJob for parallel evacuation
Move evacuation of new and old space pages to the provided framework for parallelization. Drive-by-fix: Remove left overs from POPULAR_PAGE flag. BUG=chromium:524425 LOG=N Review URL: https://codereview.chromium.org/1782043004 Cr-Commit-Position: refs/heads/master@{#34687}
This commit is contained in:
parent
d81c3b4a78
commit
4566531c6e
@ -665,11 +665,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
|||||||
while (it.has_next()) {
|
while (it.has_next()) {
|
||||||
Page* p = it.next();
|
Page* p = it.next();
|
||||||
if (p->NeverEvacuate()) continue;
|
if (p->NeverEvacuate()) continue;
|
||||||
if (p->IsFlagSet(Page::POPULAR_PAGE)) {
|
|
||||||
// This page had slots buffer overflow on previous GC, skip it.
|
|
||||||
p->ClearFlag(Page::POPULAR_PAGE);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Invariant: Evacuation candidates are just created when marking is
|
// Invariant: Evacuation candidates are just created when marking is
|
||||||
// started. This means that sweeping has finished. Furthermore, at the end
|
// started. This means that sweeping has finished. Furthermore, at the end
|
||||||
// of a GC all evacuation candidates are cleared and their slot buffers are
|
// of a GC all evacuation candidates are cleared and their slot buffers are
|
||||||
@ -2958,12 +2953,8 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
|
|||||||
|
|
||||||
class MarkCompactCollector::Evacuator : public Malloced {
|
class MarkCompactCollector::Evacuator : public Malloced {
|
||||||
public:
|
public:
|
||||||
Evacuator(MarkCompactCollector* collector,
|
explicit Evacuator(MarkCompactCollector* collector)
|
||||||
const List<Page*>& evacuation_candidates,
|
|
||||||
const List<NewSpacePage*>& newspace_evacuation_candidates)
|
|
||||||
: collector_(collector),
|
: collector_(collector),
|
||||||
evacuation_candidates_(evacuation_candidates),
|
|
||||||
newspace_evacuation_candidates_(newspace_evacuation_candidates),
|
|
||||||
compaction_spaces_(collector->heap()),
|
compaction_spaces_(collector->heap()),
|
||||||
local_pretenuring_feedback_(HashMap::PointersMatch,
|
local_pretenuring_feedback_(HashMap::PointersMatch,
|
||||||
kInitialLocalPretenuringFeedbackCapacity),
|
kInitialLocalPretenuringFeedbackCapacity),
|
||||||
@ -2973,11 +2964,9 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
|||||||
old_space_visitor_(collector->heap(), &compaction_spaces_,
|
old_space_visitor_(collector->heap(), &compaction_spaces_,
|
||||||
&old_to_old_slots_, &old_to_new_slots_),
|
&old_to_old_slots_, &old_to_new_slots_),
|
||||||
duration_(0.0),
|
duration_(0.0),
|
||||||
bytes_compacted_(0),
|
bytes_compacted_(0) {}
|
||||||
task_id_(0) {}
|
|
||||||
|
|
||||||
// Evacuate the configured set of pages in parallel.
|
inline bool EvacuatePage(MemoryChunk* chunk);
|
||||||
inline void EvacuatePages();
|
|
||||||
|
|
||||||
// Merge back locally cached info sequentially. Note that this method needs
|
// Merge back locally cached info sequentially. Note that this method needs
|
||||||
// to be called from the main thread.
|
// to be called from the main thread.
|
||||||
@ -2985,9 +2974,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
|||||||
|
|
||||||
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
|
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
|
||||||
|
|
||||||
uint32_t task_id() { return task_id_; }
|
|
||||||
void set_task_id(uint32_t id) { task_id_ = id; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||||
|
|
||||||
@ -3002,10 +2988,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
|||||||
|
|
||||||
MarkCompactCollector* collector_;
|
MarkCompactCollector* collector_;
|
||||||
|
|
||||||
// Pages to process.
|
|
||||||
const List<Page*>& evacuation_candidates_;
|
|
||||||
const List<NewSpacePage*>& newspace_evacuation_candidates_;
|
|
||||||
|
|
||||||
// Locally cached collector data.
|
// Locally cached collector data.
|
||||||
CompactionSpaceCollection compaction_spaces_;
|
CompactionSpaceCollection compaction_spaces_;
|
||||||
LocalSlotsBuffer old_to_old_slots_;
|
LocalSlotsBuffer old_to_old_slots_;
|
||||||
@ -3019,60 +3001,40 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
|||||||
// Book keeping info.
|
// Book keeping info.
|
||||||
double duration_;
|
double duration_;
|
||||||
intptr_t bytes_compacted_;
|
intptr_t bytes_compacted_;
|
||||||
|
|
||||||
// Task id, if this evacuator is executed on a background task instead of
|
|
||||||
// the main thread. Can be used to try to abort the task currently scheduled
|
|
||||||
// to executed to evacuate pages.
|
|
||||||
uint32_t task_id_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
|
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
|
||||||
MemoryChunk* p, HeapObjectVisitor* visitor) {
|
MemoryChunk* p, HeapObjectVisitor* visitor) {
|
||||||
bool success = true;
|
bool success = false;
|
||||||
if (p->parallel_compaction_state().TrySetValue(
|
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
|
||||||
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
|
int saved_live_bytes = p->LiveBytes();
|
||||||
if (p->IsEvacuationCandidate() || p->InNewSpace()) {
|
double evacuation_time;
|
||||||
DCHECK_EQ(p->parallel_compaction_state().Value(),
|
{
|
||||||
MemoryChunk::kCompactingInProgress);
|
AlwaysAllocateScope always_allocate(heap()->isolate());
|
||||||
int saved_live_bytes = p->LiveBytes();
|
TimedScope timed_scope(&evacuation_time);
|
||||||
double evacuation_time;
|
success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
|
||||||
{
|
}
|
||||||
AlwaysAllocateScope always_allocate(heap()->isolate());
|
if (success) {
|
||||||
TimedScope timed_scope(&evacuation_time);
|
ReportCompactionProgress(evacuation_time, saved_live_bytes);
|
||||||
success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
|
|
||||||
}
|
|
||||||
if (success) {
|
|
||||||
ReportCompactionProgress(evacuation_time, saved_live_bytes);
|
|
||||||
p->parallel_compaction_state().SetValue(
|
|
||||||
MemoryChunk::kCompactingFinalize);
|
|
||||||
} else {
|
|
||||||
p->parallel_compaction_state().SetValue(
|
|
||||||
MemoryChunk::kCompactingAborted);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// There could be popular pages in the list of evacuation candidates
|
|
||||||
// which we do not compact.
|
|
||||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::Evacuator::EvacuatePages() {
|
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
|
||||||
for (NewSpacePage* p : newspace_evacuation_candidates_) {
|
bool success = false;
|
||||||
DCHECK(p->InNewSpace());
|
if (chunk->InNewSpace()) {
|
||||||
DCHECK_EQ(p->concurrent_sweeping_state().Value(),
|
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
|
||||||
NewSpacePage::kSweepingDone);
|
NewSpacePage::kSweepingDone);
|
||||||
bool success = EvacuateSinglePage(p, &new_space_visitor_);
|
success = EvacuateSinglePage(chunk, &new_space_visitor_);
|
||||||
DCHECK(success);
|
DCHECK(success);
|
||||||
USE(success);
|
USE(success);
|
||||||
|
} else {
|
||||||
|
DCHECK(chunk->IsEvacuationCandidate() ||
|
||||||
|
chunk->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
|
||||||
|
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
|
||||||
|
success = EvacuateSinglePage(chunk, &old_space_visitor_);
|
||||||
}
|
}
|
||||||
for (Page* p : evacuation_candidates_) {
|
return success;
|
||||||
DCHECK(p->IsEvacuationCandidate() ||
|
|
||||||
p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
|
|
||||||
DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
|
|
||||||
EvacuateSinglePage(p, &old_space_visitor_);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::Evacuator::Finalize() {
|
void MarkCompactCollector::Evacuator::Finalize() {
|
||||||
@ -3105,29 +3067,6 @@ void MarkCompactCollector::Evacuator::Finalize() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
class MarkCompactCollector::CompactionTask : public CancelableTask {
|
|
||||||
public:
|
|
||||||
explicit CompactionTask(Heap* heap, Evacuator* evacuator)
|
|
||||||
: CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
|
|
||||||
evacuator->set_task_id(id());
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual ~CompactionTask() {}
|
|
||||||
|
|
||||||
private:
|
|
||||||
// v8::internal::CancelableTask overrides.
|
|
||||||
void RunInternal() override {
|
|
||||||
evacuator_->EvacuatePages();
|
|
||||||
heap_->mark_compact_collector()
|
|
||||||
->pending_compaction_tasks_semaphore_.Signal();
|
|
||||||
}
|
|
||||||
|
|
||||||
Heap* heap_;
|
|
||||||
Evacuator* evacuator_;
|
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
|
|
||||||
};
|
|
||||||
|
|
||||||
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
|
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
|
||||||
intptr_t live_bytes) {
|
intptr_t live_bytes) {
|
||||||
if (!FLAG_parallel_compaction) return 1;
|
if (!FLAG_parallel_compaction) return 1;
|
||||||
@ -3158,19 +3097,63 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
|
|||||||
return Min(available_cores, tasks_capped_pages);
|
return Min(available_cores, tasks_capped_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class EvacuationJobTraits {
|
||||||
|
public:
|
||||||
|
typedef int* PerPageData; // Pointer to number of aborted pages.
|
||||||
|
typedef MarkCompactCollector::Evacuator* PerTaskData;
|
||||||
|
|
||||||
|
static const bool NeedSequentialFinalization = true;
|
||||||
|
|
||||||
|
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
|
||||||
|
MemoryChunk* chunk, PerPageData) {
|
||||||
|
return evacuator->EvacuatePage(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
|
||||||
|
PerPageData data) {
|
||||||
|
if (chunk->InNewSpace()) {
|
||||||
|
DCHECK(success);
|
||||||
|
} else {
|
||||||
|
Page* p = static_cast<Page*>(chunk);
|
||||||
|
if (success) {
|
||||||
|
DCHECK(p->IsEvacuationCandidate());
|
||||||
|
DCHECK(p->SweepingDone());
|
||||||
|
p->Unlink();
|
||||||
|
} else {
|
||||||
|
// We have partially compacted the page, i.e., some objects may have
|
||||||
|
// moved, others are still in place.
|
||||||
|
// We need to:
|
||||||
|
// - Leave the evacuation candidate flag for later processing of slots
|
||||||
|
// buffer entries.
|
||||||
|
// - Leave the slots buffer there for processing of entries added by
|
||||||
|
// the write barrier.
|
||||||
|
// - Rescan the page as slot recording in the migration buffer only
|
||||||
|
// happens upon moving (which we potentially didn't do).
|
||||||
|
// - Leave the page in the list of pages of a space since we could not
|
||||||
|
// fully evacuate it.
|
||||||
|
DCHECK(p->IsEvacuationCandidate());
|
||||||
|
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
||||||
|
*data += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void MarkCompactCollector::EvacuatePagesInParallel() {
|
void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||||
int num_pages = 0;
|
PageParallelJob<EvacuationJobTraits> job(
|
||||||
|
heap_, heap_->isolate()->cancelable_task_manager());
|
||||||
|
|
||||||
|
int abandoned_pages = 0;
|
||||||
intptr_t live_bytes = 0;
|
intptr_t live_bytes = 0;
|
||||||
for (Page* page : evacuation_candidates_) {
|
for (Page* page : evacuation_candidates_) {
|
||||||
num_pages++;
|
|
||||||
live_bytes += page->LiveBytes();
|
live_bytes += page->LiveBytes();
|
||||||
|
job.AddPage(page, &abandoned_pages);
|
||||||
}
|
}
|
||||||
for (NewSpacePage* page : newspace_evacuation_candidates_) {
|
for (NewSpacePage* page : newspace_evacuation_candidates_) {
|
||||||
num_pages++;
|
|
||||||
live_bytes += page->LiveBytes();
|
live_bytes += page->LiveBytes();
|
||||||
|
job.AddPage(page, &abandoned_pages);
|
||||||
}
|
}
|
||||||
DCHECK_GE(num_pages, 1);
|
DCHECK_GE(job.NumberOfPages(), 1);
|
||||||
|
|
||||||
// Used for trace summary.
|
// Used for trace summary.
|
||||||
intptr_t compaction_speed = 0;
|
intptr_t compaction_speed = 0;
|
||||||
@ -3178,113 +3161,32 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
|||||||
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
|
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
|
||||||
}
|
}
|
||||||
|
|
||||||
const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
|
const int wanted_num_tasks =
|
||||||
|
NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
|
||||||
// Set up compaction spaces.
|
Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
|
||||||
Evacuator** evacuators = new Evacuator*[num_tasks];
|
for (int i = 0; i < wanted_num_tasks; i++) {
|
||||||
for (int i = 0; i < num_tasks; i++) {
|
evacuators[i] = new Evacuator(this);
|
||||||
evacuators[i] = new Evacuator(this, evacuation_candidates_,
|
|
||||||
newspace_evacuation_candidates_);
|
|
||||||
}
|
}
|
||||||
|
job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
|
||||||
// Kick off parallel tasks.
|
for (int i = 0; i < wanted_num_tasks; i++) {
|
||||||
StartParallelCompaction(evacuators, num_tasks);
|
|
||||||
// Wait for unfinished and not-yet-started tasks.
|
|
||||||
WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
|
|
||||||
|
|
||||||
// Finalize local evacuators by merging back all locally cached data.
|
|
||||||
for (int i = 0; i < num_tasks; i++) {
|
|
||||||
evacuators[i]->Finalize();
|
evacuators[i]->Finalize();
|
||||||
delete evacuators[i];
|
delete evacuators[i];
|
||||||
}
|
}
|
||||||
delete[] evacuators;
|
delete[] evacuators;
|
||||||
|
|
||||||
// Finalize pages sequentially.
|
|
||||||
for (NewSpacePage* p : newspace_evacuation_candidates_) {
|
|
||||||
DCHECK_EQ(p->parallel_compaction_state().Value(),
|
|
||||||
MemoryChunk::kCompactingFinalize);
|
|
||||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
|
||||||
}
|
|
||||||
|
|
||||||
int abandoned_pages = 0;
|
|
||||||
for (Page* p : evacuation_candidates_) {
|
|
||||||
switch (p->parallel_compaction_state().Value()) {
|
|
||||||
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
|
|
||||||
// We have partially compacted the page, i.e., some objects may have
|
|
||||||
// moved, others are still in place.
|
|
||||||
// We need to:
|
|
||||||
// - Leave the evacuation candidate flag for later processing of
|
|
||||||
// slots buffer entries.
|
|
||||||
// - Leave the slots buffer there for processing of entries added by
|
|
||||||
// the write barrier.
|
|
||||||
// - Rescan the page as slot recording in the migration buffer only
|
|
||||||
// happens upon moving (which we potentially didn't do).
|
|
||||||
// - Leave the page in the list of pages of a space since we could not
|
|
||||||
// fully evacuate it.
|
|
||||||
// - Mark them for rescanning for store buffer entries as we otherwise
|
|
||||||
// might have stale store buffer entries that become "valid" again
|
|
||||||
// after reusing the memory. Note that all existing store buffer
|
|
||||||
// entries of such pages are filtered before rescanning.
|
|
||||||
DCHECK(p->IsEvacuationCandidate());
|
|
||||||
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
|
||||||
abandoned_pages++;
|
|
||||||
break;
|
|
||||||
case MemoryChunk::kCompactingFinalize:
|
|
||||||
DCHECK(p->IsEvacuationCandidate());
|
|
||||||
DCHECK(p->SweepingDone());
|
|
||||||
p->Unlink();
|
|
||||||
break;
|
|
||||||
case MemoryChunk::kCompactingDone:
|
|
||||||
DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
|
|
||||||
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
// MemoryChunk::kCompactingInProgress.
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
|
||||||
}
|
|
||||||
if (FLAG_trace_fragmentation) {
|
if (FLAG_trace_fragmentation) {
|
||||||
PrintIsolate(isolate(),
|
PrintIsolate(isolate(),
|
||||||
"%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
|
"%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
|
||||||
"tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
|
"wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
|
||||||
"d compaction_speed=%" V8_PTR_PREFIX "d\n",
|
"d compaction_speed=%" V8_PTR_PREFIX "d\n",
|
||||||
isolate()->time_millis_since_init(), FLAG_parallel_compaction,
|
isolate()->time_millis_since_init(), FLAG_parallel_compaction,
|
||||||
num_pages, abandoned_pages, num_tasks,
|
job.NumberOfPages(), abandoned_pages, wanted_num_tasks,
|
||||||
base::SysInfo::NumberOfProcessors(), live_bytes,
|
job.NumberOfTasks(),
|
||||||
compaction_speed);
|
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
|
||||||
|
live_bytes, compaction_speed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
|
|
||||||
int len) {
|
|
||||||
compaction_in_progress_ = true;
|
|
||||||
for (int i = 1; i < len; i++) {
|
|
||||||
CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
|
|
||||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
|
||||||
task, v8::Platform::kShortRunningTask);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contribute on main thread.
|
|
||||||
evacuators[0]->EvacuatePages();
|
|
||||||
}
|
|
||||||
|
|
||||||
void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
|
|
||||||
int len) {
|
|
||||||
// Try to cancel compaction tasks that have not been run (as they might be
|
|
||||||
// stuck in a worker queue). Tasks that cannot be canceled, have either
|
|
||||||
// already completed or are still running, hence we need to wait for their
|
|
||||||
// semaphore signal.
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
|
|
||||||
evacuators[i]->task_id())) {
|
|
||||||
pending_compaction_tasks_semaphore_.Wait();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
compaction_in_progress_ = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
|
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
|
||||||
public:
|
public:
|
||||||
virtual Object* RetainAs(Object* object) {
|
virtual Object* RetainAs(Object* object) {
|
||||||
|
@ -321,6 +321,8 @@ class ThreadLocalTop;
|
|||||||
// Mark-Compact collector
|
// Mark-Compact collector
|
||||||
class MarkCompactCollector {
|
class MarkCompactCollector {
|
||||||
public:
|
public:
|
||||||
|
class Evacuator;
|
||||||
|
|
||||||
enum IterationMode {
|
enum IterationMode {
|
||||||
kKeepMarking,
|
kKeepMarking,
|
||||||
kClearMarkbits,
|
kClearMarkbits,
|
||||||
@ -504,11 +506,9 @@ class MarkCompactCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class CompactionTask;
|
|
||||||
class EvacuateNewSpaceVisitor;
|
class EvacuateNewSpaceVisitor;
|
||||||
class EvacuateOldSpaceVisitor;
|
class EvacuateOldSpaceVisitor;
|
||||||
class EvacuateVisitorBase;
|
class EvacuateVisitorBase;
|
||||||
class Evacuator;
|
|
||||||
class HeapObjectVisitor;
|
class HeapObjectVisitor;
|
||||||
class SweeperTask;
|
class SweeperTask;
|
||||||
|
|
||||||
@ -704,9 +704,6 @@ class MarkCompactCollector {
|
|||||||
// The number of parallel compaction tasks, including the main thread.
|
// The number of parallel compaction tasks, including the main thread.
|
||||||
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
|
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
|
||||||
|
|
||||||
void StartParallelCompaction(Evacuator** evacuators, int len);
|
|
||||||
void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
|
|
||||||
|
|
||||||
void EvacuateNewSpaceAndCandidates();
|
void EvacuateNewSpaceAndCandidates();
|
||||||
|
|
||||||
void UpdatePointersAfterEvacuation();
|
void UpdatePointersAfterEvacuation();
|
||||||
|
@ -57,7 +57,10 @@ class PageParallelJob {
|
|||||||
|
|
||||||
int NumberOfPages() { return num_items_; }
|
int NumberOfPages() { return num_items_; }
|
||||||
|
|
||||||
// Runs the given number of tasks in parallel and processes the previosly
|
// Returns the number of tasks that were spawned when running the job.
|
||||||
|
int NumberOfTasks() { return num_tasks_; }
|
||||||
|
|
||||||
|
// Runs the given number of tasks in parallel and processes the previously
|
||||||
// added pages. This function blocks until all tasks finish.
|
// added pages. This function blocks until all tasks finish.
|
||||||
// The callback takes the index of a task and returns data for that task.
|
// The callback takes the index of a task and returns data for that task.
|
||||||
template <typename Callback>
|
template <typename Callback>
|
||||||
@ -69,11 +72,11 @@ class PageParallelJob {
|
|||||||
kMaxNumberOfTasks,
|
kMaxNumberOfTasks,
|
||||||
static_cast<int>(
|
static_cast<int>(
|
||||||
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
|
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
|
||||||
num_tasks = Max(1, Min(num_tasks, max_num_tasks));
|
num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
|
||||||
int items_per_task = (num_items_ + num_tasks - 1) / num_tasks;
|
int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
|
||||||
int start_index = 0;
|
int start_index = 0;
|
||||||
Task* main_task = nullptr;
|
Task* main_task = nullptr;
|
||||||
for (int i = 0; i < num_tasks; i++, start_index += items_per_task) {
|
for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
|
||||||
if (start_index >= num_items_) {
|
if (start_index >= num_items_) {
|
||||||
start_index -= num_items_;
|
start_index -= num_items_;
|
||||||
}
|
}
|
||||||
@ -91,7 +94,7 @@ class PageParallelJob {
|
|||||||
main_task->Run();
|
main_task->Run();
|
||||||
delete main_task;
|
delete main_task;
|
||||||
// Wait for background tasks.
|
// Wait for background tasks.
|
||||||
for (int i = 0; i < num_tasks; i++) {
|
for (int i = 0; i < num_tasks_; i++) {
|
||||||
if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
|
if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
|
||||||
pending_tasks_.Wait();
|
pending_tasks_.Wait();
|
||||||
}
|
}
|
||||||
@ -172,6 +175,7 @@ class PageParallelJob {
|
|||||||
CancelableTaskManager* cancelable_task_manager_;
|
CancelableTaskManager* cancelable_task_manager_;
|
||||||
Item* items_;
|
Item* items_;
|
||||||
int num_items_;
|
int num_items_;
|
||||||
|
int num_tasks_;
|
||||||
base::Semaphore pending_tasks_;
|
base::Semaphore pending_tasks_;
|
||||||
DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
|
DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
|
||||||
};
|
};
|
||||||
|
@ -485,7 +485,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
|||||||
chunk->progress_bar_ = 0;
|
chunk->progress_bar_ = 0;
|
||||||
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
||||||
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
||||||
chunk->parallel_compaction_state().SetValue(kCompactingDone);
|
|
||||||
chunk->mutex_ = nullptr;
|
chunk->mutex_ = nullptr;
|
||||||
chunk->available_in_free_list_ = 0;
|
chunk->available_in_free_list_ = 0;
|
||||||
chunk->wasted_memory_ = 0;
|
chunk->wasted_memory_ = 0;
|
||||||
|
@ -302,7 +302,6 @@ class MemoryChunk {
|
|||||||
EVACUATION_CANDIDATE,
|
EVACUATION_CANDIDATE,
|
||||||
RESCAN_ON_EVACUATION,
|
RESCAN_ON_EVACUATION,
|
||||||
NEVER_EVACUATE, // May contain immortal immutables.
|
NEVER_EVACUATE, // May contain immortal immutables.
|
||||||
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
|
|
||||||
|
|
||||||
// Large objects can have a progress bar in their page header. These object
|
// Large objects can have a progress bar in their page header. These object
|
||||||
// are scanned in increments and will be kept black while being scanned.
|
// are scanned in increments and will be kept black while being scanned.
|
||||||
@ -331,19 +330,6 @@ class MemoryChunk {
|
|||||||
NUM_MEMORY_CHUNK_FLAGS
|
NUM_MEMORY_CHUNK_FLAGS
|
||||||
};
|
};
|
||||||
|
|
||||||
// |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
|
|
||||||
// |kCompactingInProgress|: Parallel compaction is currently in progress.
|
|
||||||
// |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
|
|
||||||
// be finalized.
|
|
||||||
// |kCompactingAborted|: Parallel compaction has been aborted, which should
|
|
||||||
// for now only happen in OOM scenarios.
|
|
||||||
enum ParallelCompactingState {
|
|
||||||
kCompactingDone,
|
|
||||||
kCompactingInProgress,
|
|
||||||
kCompactingFinalize,
|
|
||||||
kCompactingAborted,
|
|
||||||
};
|
|
||||||
|
|
||||||
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
|
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
|
||||||
// not be performed on that page. Sweeper threads that are done with their
|
// not be performed on that page. Sweeper threads that are done with their
|
||||||
// work will set this value and not touch the page anymore.
|
// work will set this value and not touch the page anymore.
|
||||||
@ -403,8 +389,7 @@ class MemoryChunk {
|
|||||||
kIntptrSize // intptr_t write_barrier_counter_
|
kIntptrSize // intptr_t write_barrier_counter_
|
||||||
+ kPointerSize // AtomicValue high_water_mark_
|
+ kPointerSize // AtomicValue high_water_mark_
|
||||||
+ kPointerSize // base::Mutex* mutex_
|
+ kPointerSize // base::Mutex* mutex_
|
||||||
+ kPointerSize // base::AtomicWord parallel_sweeping_
|
+ kPointerSize // base::AtomicWord concurrent_sweeping_
|
||||||
+ kPointerSize // AtomicValue parallel_compaction_
|
|
||||||
+ 2 * kPointerSize // AtomicNumber free-list statistics
|
+ 2 * kPointerSize // AtomicNumber free-list statistics
|
||||||
+ kPointerSize // AtomicValue next_chunk_
|
+ kPointerSize // AtomicValue next_chunk_
|
||||||
+ kPointerSize; // AtomicValue prev_chunk_
|
+ kPointerSize; // AtomicValue prev_chunk_
|
||||||
@ -471,10 +456,6 @@ class MemoryChunk {
|
|||||||
return concurrent_sweeping_;
|
return concurrent_sweeping_;
|
||||||
}
|
}
|
||||||
|
|
||||||
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
|
|
||||||
return parallel_compaction_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manage live byte count, i.e., count of bytes in black objects.
|
// Manage live byte count, i.e., count of bytes in black objects.
|
||||||
inline void ResetLiveBytes();
|
inline void ResetLiveBytes();
|
||||||
inline void IncrementLiveBytes(int by);
|
inline void IncrementLiveBytes(int by);
|
||||||
@ -701,7 +682,6 @@ class MemoryChunk {
|
|||||||
base::Mutex* mutex_;
|
base::Mutex* mutex_;
|
||||||
|
|
||||||
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
||||||
AtomicValue<ParallelCompactingState> parallel_compaction_;
|
|
||||||
|
|
||||||
// PagedSpace free-list statistics.
|
// PagedSpace free-list statistics.
|
||||||
AtomicNumber<intptr_t> available_in_free_list_;
|
AtomicNumber<intptr_t> available_in_free_list_;
|
||||||
|
Loading…
Reference in New Issue
Block a user