[heap] Use PageParallelJob for parallel evacuation
Move evacuation of new and old space pages to the provided framework for parallelization. Drive-by-fix: Remove left overs from POPULAR_PAGE flag. BUG=chromium:524425 LOG=N Review URL: https://codereview.chromium.org/1782043004 Cr-Commit-Position: refs/heads/master@{#34687}
This commit is contained in:
parent
d81c3b4a78
commit
4566531c6e
@ -665,11 +665,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
while (it.has_next()) {
|
||||
Page* p = it.next();
|
||||
if (p->NeverEvacuate()) continue;
|
||||
if (p->IsFlagSet(Page::POPULAR_PAGE)) {
|
||||
// This page had slots buffer overflow on previous GC, skip it.
|
||||
p->ClearFlag(Page::POPULAR_PAGE);
|
||||
continue;
|
||||
}
|
||||
// Invariant: Evacuation candidates are just created when marking is
|
||||
// started. This means that sweeping has finished. Furthermore, at the end
|
||||
// of a GC all evacuation candidates are cleared and their slot buffers are
|
||||
@ -2958,12 +2953,8 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
|
||||
|
||||
class MarkCompactCollector::Evacuator : public Malloced {
|
||||
public:
|
||||
Evacuator(MarkCompactCollector* collector,
|
||||
const List<Page*>& evacuation_candidates,
|
||||
const List<NewSpacePage*>& newspace_evacuation_candidates)
|
||||
explicit Evacuator(MarkCompactCollector* collector)
|
||||
: collector_(collector),
|
||||
evacuation_candidates_(evacuation_candidates),
|
||||
newspace_evacuation_candidates_(newspace_evacuation_candidates),
|
||||
compaction_spaces_(collector->heap()),
|
||||
local_pretenuring_feedback_(HashMap::PointersMatch,
|
||||
kInitialLocalPretenuringFeedbackCapacity),
|
||||
@ -2973,11 +2964,9 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
||||
old_space_visitor_(collector->heap(), &compaction_spaces_,
|
||||
&old_to_old_slots_, &old_to_new_slots_),
|
||||
duration_(0.0),
|
||||
bytes_compacted_(0),
|
||||
task_id_(0) {}
|
||||
bytes_compacted_(0) {}
|
||||
|
||||
// Evacuate the configured set of pages in parallel.
|
||||
inline void EvacuatePages();
|
||||
inline bool EvacuatePage(MemoryChunk* chunk);
|
||||
|
||||
// Merge back locally cached info sequentially. Note that this method needs
|
||||
// to be called from the main thread.
|
||||
@ -2985,9 +2974,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
||||
|
||||
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
|
||||
|
||||
uint32_t task_id() { return task_id_; }
|
||||
void set_task_id(uint32_t id) { task_id_ = id; }
|
||||
|
||||
private:
|
||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||
|
||||
@ -3002,10 +2988,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
||||
|
||||
MarkCompactCollector* collector_;
|
||||
|
||||
// Pages to process.
|
||||
const List<Page*>& evacuation_candidates_;
|
||||
const List<NewSpacePage*>& newspace_evacuation_candidates_;
|
||||
|
||||
// Locally cached collector data.
|
||||
CompactionSpaceCollection compaction_spaces_;
|
||||
LocalSlotsBuffer old_to_old_slots_;
|
||||
@ -3019,60 +3001,40 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
||||
// Book keeping info.
|
||||
double duration_;
|
||||
intptr_t bytes_compacted_;
|
||||
|
||||
// Task id, if this evacuator is executed on a background task instead of
|
||||
// the main thread. Can be used to try to abort the task currently scheduled
|
||||
// to executed to evacuate pages.
|
||||
uint32_t task_id_;
|
||||
};
|
||||
|
||||
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
|
||||
MemoryChunk* p, HeapObjectVisitor* visitor) {
|
||||
bool success = true;
|
||||
if (p->parallel_compaction_state().TrySetValue(
|
||||
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
|
||||
if (p->IsEvacuationCandidate() || p->InNewSpace()) {
|
||||
DCHECK_EQ(p->parallel_compaction_state().Value(),
|
||||
MemoryChunk::kCompactingInProgress);
|
||||
int saved_live_bytes = p->LiveBytes();
|
||||
double evacuation_time;
|
||||
{
|
||||
AlwaysAllocateScope always_allocate(heap()->isolate());
|
||||
TimedScope timed_scope(&evacuation_time);
|
||||
success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
|
||||
}
|
||||
if (success) {
|
||||
ReportCompactionProgress(evacuation_time, saved_live_bytes);
|
||||
p->parallel_compaction_state().SetValue(
|
||||
MemoryChunk::kCompactingFinalize);
|
||||
} else {
|
||||
p->parallel_compaction_state().SetValue(
|
||||
MemoryChunk::kCompactingAborted);
|
||||
}
|
||||
} else {
|
||||
// There could be popular pages in the list of evacuation candidates
|
||||
// which we do not compact.
|
||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
||||
}
|
||||
bool success = false;
|
||||
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
|
||||
int saved_live_bytes = p->LiveBytes();
|
||||
double evacuation_time;
|
||||
{
|
||||
AlwaysAllocateScope always_allocate(heap()->isolate());
|
||||
TimedScope timed_scope(&evacuation_time);
|
||||
success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
|
||||
}
|
||||
if (success) {
|
||||
ReportCompactionProgress(evacuation_time, saved_live_bytes);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
void MarkCompactCollector::Evacuator::EvacuatePages() {
|
||||
for (NewSpacePage* p : newspace_evacuation_candidates_) {
|
||||
DCHECK(p->InNewSpace());
|
||||
DCHECK_EQ(p->concurrent_sweeping_state().Value(),
|
||||
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
|
||||
bool success = false;
|
||||
if (chunk->InNewSpace()) {
|
||||
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
|
||||
NewSpacePage::kSweepingDone);
|
||||
bool success = EvacuateSinglePage(p, &new_space_visitor_);
|
||||
success = EvacuateSinglePage(chunk, &new_space_visitor_);
|
||||
DCHECK(success);
|
||||
USE(success);
|
||||
} else {
|
||||
DCHECK(chunk->IsEvacuationCandidate() ||
|
||||
chunk->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
|
||||
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
|
||||
success = EvacuateSinglePage(chunk, &old_space_visitor_);
|
||||
}
|
||||
for (Page* p : evacuation_candidates_) {
|
||||
DCHECK(p->IsEvacuationCandidate() ||
|
||||
p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
|
||||
DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
|
||||
EvacuateSinglePage(p, &old_space_visitor_);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
void MarkCompactCollector::Evacuator::Finalize() {
|
||||
@ -3105,29 +3067,6 @@ void MarkCompactCollector::Evacuator::Finalize() {
|
||||
});
|
||||
}
|
||||
|
||||
class MarkCompactCollector::CompactionTask : public CancelableTask {
|
||||
public:
|
||||
explicit CompactionTask(Heap* heap, Evacuator* evacuator)
|
||||
: CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
|
||||
evacuator->set_task_id(id());
|
||||
}
|
||||
|
||||
virtual ~CompactionTask() {}
|
||||
|
||||
private:
|
||||
// v8::internal::CancelableTask overrides.
|
||||
void RunInternal() override {
|
||||
evacuator_->EvacuatePages();
|
||||
heap_->mark_compact_collector()
|
||||
->pending_compaction_tasks_semaphore_.Signal();
|
||||
}
|
||||
|
||||
Heap* heap_;
|
||||
Evacuator* evacuator_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
|
||||
};
|
||||
|
||||
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
|
||||
intptr_t live_bytes) {
|
||||
if (!FLAG_parallel_compaction) return 1;
|
||||
@ -3158,19 +3097,63 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
|
||||
return Min(available_cores, tasks_capped_pages);
|
||||
}
|
||||
|
||||
class EvacuationJobTraits {
|
||||
public:
|
||||
typedef int* PerPageData; // Pointer to number of aborted pages.
|
||||
typedef MarkCompactCollector::Evacuator* PerTaskData;
|
||||
|
||||
static const bool NeedSequentialFinalization = true;
|
||||
|
||||
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
|
||||
MemoryChunk* chunk, PerPageData) {
|
||||
return evacuator->EvacuatePage(chunk);
|
||||
}
|
||||
|
||||
static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
|
||||
PerPageData data) {
|
||||
if (chunk->InNewSpace()) {
|
||||
DCHECK(success);
|
||||
} else {
|
||||
Page* p = static_cast<Page*>(chunk);
|
||||
if (success) {
|
||||
DCHECK(p->IsEvacuationCandidate());
|
||||
DCHECK(p->SweepingDone());
|
||||
p->Unlink();
|
||||
} else {
|
||||
// We have partially compacted the page, i.e., some objects may have
|
||||
// moved, others are still in place.
|
||||
// We need to:
|
||||
// - Leave the evacuation candidate flag for later processing of slots
|
||||
// buffer entries.
|
||||
// - Leave the slots buffer there for processing of entries added by
|
||||
// the write barrier.
|
||||
// - Rescan the page as slot recording in the migration buffer only
|
||||
// happens upon moving (which we potentially didn't do).
|
||||
// - Leave the page in the list of pages of a space since we could not
|
||||
// fully evacuate it.
|
||||
DCHECK(p->IsEvacuationCandidate());
|
||||
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
||||
*data += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
int num_pages = 0;
|
||||
PageParallelJob<EvacuationJobTraits> job(
|
||||
heap_, heap_->isolate()->cancelable_task_manager());
|
||||
|
||||
int abandoned_pages = 0;
|
||||
intptr_t live_bytes = 0;
|
||||
for (Page* page : evacuation_candidates_) {
|
||||
num_pages++;
|
||||
live_bytes += page->LiveBytes();
|
||||
job.AddPage(page, &abandoned_pages);
|
||||
}
|
||||
for (NewSpacePage* page : newspace_evacuation_candidates_) {
|
||||
num_pages++;
|
||||
live_bytes += page->LiveBytes();
|
||||
job.AddPage(page, &abandoned_pages);
|
||||
}
|
||||
DCHECK_GE(num_pages, 1);
|
||||
DCHECK_GE(job.NumberOfPages(), 1);
|
||||
|
||||
// Used for trace summary.
|
||||
intptr_t compaction_speed = 0;
|
||||
@ -3178,113 +3161,32 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
|
||||
}
|
||||
|
||||
const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
|
||||
|
||||
// Set up compaction spaces.
|
||||
Evacuator** evacuators = new Evacuator*[num_tasks];
|
||||
for (int i = 0; i < num_tasks; i++) {
|
||||
evacuators[i] = new Evacuator(this, evacuation_candidates_,
|
||||
newspace_evacuation_candidates_);
|
||||
const int wanted_num_tasks =
|
||||
NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
|
||||
Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
|
||||
for (int i = 0; i < wanted_num_tasks; i++) {
|
||||
evacuators[i] = new Evacuator(this);
|
||||
}
|
||||
|
||||
// Kick off parallel tasks.
|
||||
StartParallelCompaction(evacuators, num_tasks);
|
||||
// Wait for unfinished and not-yet-started tasks.
|
||||
WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
|
||||
|
||||
// Finalize local evacuators by merging back all locally cached data.
|
||||
for (int i = 0; i < num_tasks; i++) {
|
||||
job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
|
||||
for (int i = 0; i < wanted_num_tasks; i++) {
|
||||
evacuators[i]->Finalize();
|
||||
delete evacuators[i];
|
||||
}
|
||||
delete[] evacuators;
|
||||
|
||||
// Finalize pages sequentially.
|
||||
for (NewSpacePage* p : newspace_evacuation_candidates_) {
|
||||
DCHECK_EQ(p->parallel_compaction_state().Value(),
|
||||
MemoryChunk::kCompactingFinalize);
|
||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
||||
}
|
||||
|
||||
int abandoned_pages = 0;
|
||||
for (Page* p : evacuation_candidates_) {
|
||||
switch (p->parallel_compaction_state().Value()) {
|
||||
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
|
||||
// We have partially compacted the page, i.e., some objects may have
|
||||
// moved, others are still in place.
|
||||
// We need to:
|
||||
// - Leave the evacuation candidate flag for later processing of
|
||||
// slots buffer entries.
|
||||
// - Leave the slots buffer there for processing of entries added by
|
||||
// the write barrier.
|
||||
// - Rescan the page as slot recording in the migration buffer only
|
||||
// happens upon moving (which we potentially didn't do).
|
||||
// - Leave the page in the list of pages of a space since we could not
|
||||
// fully evacuate it.
|
||||
// - Mark them for rescanning for store buffer entries as we otherwise
|
||||
// might have stale store buffer entries that become "valid" again
|
||||
// after reusing the memory. Note that all existing store buffer
|
||||
// entries of such pages are filtered before rescanning.
|
||||
DCHECK(p->IsEvacuationCandidate());
|
||||
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
||||
abandoned_pages++;
|
||||
break;
|
||||
case MemoryChunk::kCompactingFinalize:
|
||||
DCHECK(p->IsEvacuationCandidate());
|
||||
DCHECK(p->SweepingDone());
|
||||
p->Unlink();
|
||||
break;
|
||||
case MemoryChunk::kCompactingDone:
|
||||
DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
|
||||
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
|
||||
break;
|
||||
default:
|
||||
// MemoryChunk::kCompactingInProgress.
|
||||
UNREACHABLE();
|
||||
}
|
||||
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
|
||||
}
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintIsolate(isolate(),
|
||||
"%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
|
||||
"tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
|
||||
"wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
|
||||
"d compaction_speed=%" V8_PTR_PREFIX "d\n",
|
||||
isolate()->time_millis_since_init(), FLAG_parallel_compaction,
|
||||
num_pages, abandoned_pages, num_tasks,
|
||||
base::SysInfo::NumberOfProcessors(), live_bytes,
|
||||
compaction_speed);
|
||||
job.NumberOfPages(), abandoned_pages, wanted_num_tasks,
|
||||
job.NumberOfTasks(),
|
||||
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
|
||||
live_bytes, compaction_speed);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
|
||||
int len) {
|
||||
compaction_in_progress_ = true;
|
||||
for (int i = 1; i < len; i++) {
|
||||
CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
task, v8::Platform::kShortRunningTask);
|
||||
}
|
||||
|
||||
// Contribute on main thread.
|
||||
evacuators[0]->EvacuatePages();
|
||||
}
|
||||
|
||||
void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
|
||||
int len) {
|
||||
// Try to cancel compaction tasks that have not been run (as they might be
|
||||
// stuck in a worker queue). Tasks that cannot be canceled, have either
|
||||
// already completed or are still running, hence we need to wait for their
|
||||
// semaphore signal.
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
|
||||
evacuators[i]->task_id())) {
|
||||
pending_compaction_tasks_semaphore_.Wait();
|
||||
}
|
||||
}
|
||||
compaction_in_progress_ = false;
|
||||
}
|
||||
|
||||
|
||||
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
|
||||
public:
|
||||
virtual Object* RetainAs(Object* object) {
|
||||
|
@ -321,6 +321,8 @@ class ThreadLocalTop;
|
||||
// Mark-Compact collector
|
||||
class MarkCompactCollector {
|
||||
public:
|
||||
class Evacuator;
|
||||
|
||||
enum IterationMode {
|
||||
kKeepMarking,
|
||||
kClearMarkbits,
|
||||
@ -504,11 +506,9 @@ class MarkCompactCollector {
|
||||
}
|
||||
|
||||
private:
|
||||
class CompactionTask;
|
||||
class EvacuateNewSpaceVisitor;
|
||||
class EvacuateOldSpaceVisitor;
|
||||
class EvacuateVisitorBase;
|
||||
class Evacuator;
|
||||
class HeapObjectVisitor;
|
||||
class SweeperTask;
|
||||
|
||||
@ -704,9 +704,6 @@ class MarkCompactCollector {
|
||||
// The number of parallel compaction tasks, including the main thread.
|
||||
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
|
||||
|
||||
void StartParallelCompaction(Evacuator** evacuators, int len);
|
||||
void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
|
||||
|
||||
void EvacuateNewSpaceAndCandidates();
|
||||
|
||||
void UpdatePointersAfterEvacuation();
|
||||
|
@ -57,7 +57,10 @@ class PageParallelJob {
|
||||
|
||||
int NumberOfPages() { return num_items_; }
|
||||
|
||||
// Runs the given number of tasks in parallel and processes the previosly
|
||||
// Returns the number of tasks that were spawned when running the job.
|
||||
int NumberOfTasks() { return num_tasks_; }
|
||||
|
||||
// Runs the given number of tasks in parallel and processes the previously
|
||||
// added pages. This function blocks until all tasks finish.
|
||||
// The callback takes the index of a task and returns data for that task.
|
||||
template <typename Callback>
|
||||
@ -69,11 +72,11 @@ class PageParallelJob {
|
||||
kMaxNumberOfTasks,
|
||||
static_cast<int>(
|
||||
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
|
||||
num_tasks = Max(1, Min(num_tasks, max_num_tasks));
|
||||
int items_per_task = (num_items_ + num_tasks - 1) / num_tasks;
|
||||
num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
|
||||
int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
|
||||
int start_index = 0;
|
||||
Task* main_task = nullptr;
|
||||
for (int i = 0; i < num_tasks; i++, start_index += items_per_task) {
|
||||
for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
|
||||
if (start_index >= num_items_) {
|
||||
start_index -= num_items_;
|
||||
}
|
||||
@ -91,7 +94,7 @@ class PageParallelJob {
|
||||
main_task->Run();
|
||||
delete main_task;
|
||||
// Wait for background tasks.
|
||||
for (int i = 0; i < num_tasks; i++) {
|
||||
for (int i = 0; i < num_tasks_; i++) {
|
||||
if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
|
||||
pending_tasks_.Wait();
|
||||
}
|
||||
@ -172,6 +175,7 @@ class PageParallelJob {
|
||||
CancelableTaskManager* cancelable_task_manager_;
|
||||
Item* items_;
|
||||
int num_items_;
|
||||
int num_tasks_;
|
||||
base::Semaphore pending_tasks_;
|
||||
DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
|
||||
};
|
||||
|
@ -485,7 +485,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->progress_bar_ = 0;
|
||||
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
||||
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
||||
chunk->parallel_compaction_state().SetValue(kCompactingDone);
|
||||
chunk->mutex_ = nullptr;
|
||||
chunk->available_in_free_list_ = 0;
|
||||
chunk->wasted_memory_ = 0;
|
||||
|
@ -302,7 +302,6 @@ class MemoryChunk {
|
||||
EVACUATION_CANDIDATE,
|
||||
RESCAN_ON_EVACUATION,
|
||||
NEVER_EVACUATE, // May contain immortal immutables.
|
||||
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
|
||||
|
||||
// Large objects can have a progress bar in their page header. These object
|
||||
// are scanned in increments and will be kept black while being scanned.
|
||||
@ -331,19 +330,6 @@ class MemoryChunk {
|
||||
NUM_MEMORY_CHUNK_FLAGS
|
||||
};
|
||||
|
||||
// |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
|
||||
// |kCompactingInProgress|: Parallel compaction is currently in progress.
|
||||
// |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
|
||||
// be finalized.
|
||||
// |kCompactingAborted|: Parallel compaction has been aborted, which should
|
||||
// for now only happen in OOM scenarios.
|
||||
enum ParallelCompactingState {
|
||||
kCompactingDone,
|
||||
kCompactingInProgress,
|
||||
kCompactingFinalize,
|
||||
kCompactingAborted,
|
||||
};
|
||||
|
||||
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
|
||||
// not be performed on that page. Sweeper threads that are done with their
|
||||
// work will set this value and not touch the page anymore.
|
||||
@ -403,8 +389,7 @@ class MemoryChunk {
|
||||
kIntptrSize // intptr_t write_barrier_counter_
|
||||
+ kPointerSize // AtomicValue high_water_mark_
|
||||
+ kPointerSize // base::Mutex* mutex_
|
||||
+ kPointerSize // base::AtomicWord parallel_sweeping_
|
||||
+ kPointerSize // AtomicValue parallel_compaction_
|
||||
+ kPointerSize // base::AtomicWord concurrent_sweeping_
|
||||
+ 2 * kPointerSize // AtomicNumber free-list statistics
|
||||
+ kPointerSize // AtomicValue next_chunk_
|
||||
+ kPointerSize; // AtomicValue prev_chunk_
|
||||
@ -471,10 +456,6 @@ class MemoryChunk {
|
||||
return concurrent_sweeping_;
|
||||
}
|
||||
|
||||
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
|
||||
return parallel_compaction_;
|
||||
}
|
||||
|
||||
// Manage live byte count, i.e., count of bytes in black objects.
|
||||
inline void ResetLiveBytes();
|
||||
inline void IncrementLiveBytes(int by);
|
||||
@ -701,7 +682,6 @@ class MemoryChunk {
|
||||
base::Mutex* mutex_;
|
||||
|
||||
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
||||
AtomicValue<ParallelCompactingState> parallel_compaction_;
|
||||
|
||||
// PagedSpace free-list statistics.
|
||||
AtomicNumber<intptr_t> available_in_free_list_;
|
||||
|
Loading…
Reference in New Issue
Block a user