Revert "[heap] Cleanup: Use std::atomic<T> instead of base::AtomicValue<T> in heap/*."
This reverts commit 770ace0779
.
Reason for revert: Breaking the V8 Arm and V8 Arm - debug builds https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Arm%20-%20debug/7260
Original change's description:
> [heap] Cleanup: Use std::atomic<T> instead of base::AtomicValue<T> in heap/*.
>
> Bug: chromium:842083
> Change-Id: Idc04f9ddea326df4ac48a8c58321620660b21549
> Reviewed-on: https://chromium-review.googlesource.com/1129520
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54389}
TBR=ulan@chromium.org,hpayer@chromium.org,mlippautz@chromium.org
Change-Id: I108bc5386ea825c2700f9b830b85a1ca8c10bf4b
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:842083
Reviewed-on: https://chromium-review.googlesource.com/1134966
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54398}
This commit is contained in:
parent
ae044d697f
commit
d20c14149c
@ -627,7 +627,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
marked_bytes += current_marked_bytes;
|
||||
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
|
||||
marked_bytes);
|
||||
if (task_state->preemption_request) {
|
||||
if (task_state->preemption_request.Value()) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"ConcurrentMarking::Run Preempted");
|
||||
break;
|
||||
@ -703,7 +703,7 @@ void ConcurrentMarking::ScheduleTasks() {
|
||||
heap_->isolate()->PrintWithTimestamp(
|
||||
"Scheduling concurrent marking task %d\n", i);
|
||||
}
|
||||
task_state_[i].preemption_request = false;
|
||||
task_state_[i].preemption_request.SetValue(false);
|
||||
is_pending_[i] = true;
|
||||
++pending_task_count_;
|
||||
auto task =
|
||||
@ -744,7 +744,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
|
||||
is_pending_[i] = false;
|
||||
--pending_task_count_;
|
||||
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
|
||||
task_state_[i].preemption_request = true;
|
||||
task_state_[i].preemption_request.SetValue(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ class ConcurrentMarking {
|
||||
struct TaskState {
|
||||
// The main thread sets this flag to true when it wants the concurrent
|
||||
// marker to give up the worker thread.
|
||||
std::atomic<bool> preemption_request;
|
||||
base::AtomicValue<bool> preemption_request;
|
||||
|
||||
LiveBytesMap live_bytes;
|
||||
size_t marked_bytes = 0;
|
||||
|
@ -1425,7 +1425,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
|
||||
if (deserialization_complete_) {
|
||||
memory_reducer_->NotifyMarkCompact(event);
|
||||
}
|
||||
memory_pressure_level_ = MemoryPressureLevel::kNone;
|
||||
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
|
||||
}
|
||||
|
||||
tracer()->Stop(collector);
|
||||
@ -3484,9 +3484,9 @@ void Heap::CheckMemoryPressure() {
|
||||
// The optimizing compiler may be unnecessarily holding on to memory.
|
||||
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
|
||||
}
|
||||
if (memory_pressure_level_ == MemoryPressureLevel::kCritical) {
|
||||
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
|
||||
CollectGarbageOnMemoryPressure();
|
||||
} else if (memory_pressure_level_ == MemoryPressureLevel::kModerate) {
|
||||
} else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
|
||||
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
|
||||
StartIncrementalMarking(kReduceMemoryFootprintMask,
|
||||
GarbageCollectionReason::kMemoryPressure);
|
||||
@ -3538,8 +3538,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
|
||||
|
||||
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
|
||||
bool is_isolate_locked) {
|
||||
MemoryPressureLevel previous = memory_pressure_level_;
|
||||
memory_pressure_level_ = level;
|
||||
MemoryPressureLevel previous = memory_pressure_level_.Value();
|
||||
memory_pressure_level_.SetValue(level);
|
||||
if ((previous != MemoryPressureLevel::kCritical &&
|
||||
level == MemoryPressureLevel::kCritical) ||
|
||||
(previous == MemoryPressureLevel::kNone &&
|
||||
|
@ -704,7 +704,7 @@ class Heap {
|
||||
bool ShouldOptimizeForMemoryUsage();
|
||||
|
||||
bool HighMemoryPressure() {
|
||||
return memory_pressure_level_ != MemoryPressureLevel::kNone;
|
||||
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
|
||||
}
|
||||
|
||||
void RestoreHeapLimit(size_t heap_limit) {
|
||||
@ -2005,7 +2005,7 @@ class Heap {
|
||||
|
||||
// Stores the memory pressure level that set by MemoryPressureNotification
|
||||
// and reset by a mark-compact garbage collection.
|
||||
std::atomic<MemoryPressureLevel> memory_pressure_level_;
|
||||
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
|
||||
|
||||
std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
|
||||
near_heap_limit_callbacks_;
|
||||
|
@ -49,18 +49,17 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
|
||||
virtual ~Item() = default;
|
||||
|
||||
// Marks an item as being finished.
|
||||
void MarkFinished() { CHECK_EQ(kProcessing, state_.exchange(kFinished)); }
|
||||
void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
|
||||
|
||||
private:
|
||||
enum ProcessingState : uintptr_t { kAvailable, kProcessing, kFinished };
|
||||
enum ProcessingState { kAvailable, kProcessing, kFinished };
|
||||
|
||||
bool TryMarkingAsProcessing() {
|
||||
ProcessingState available = kAvailable;
|
||||
return state_.compare_exchange_weak(available, kProcessing);
|
||||
return state_.TrySetValue(kAvailable, kProcessing);
|
||||
}
|
||||
bool IsFinished() { return state_ == kFinished; }
|
||||
bool IsFinished() { return state_.Value() == kFinished; }
|
||||
|
||||
std::atomic<ProcessingState> state_{kAvailable};
|
||||
base::AtomicValue<ProcessingState> state_{kAvailable};
|
||||
|
||||
friend class ItemParallelJob;
|
||||
friend class ItemParallelJob::Task;
|
||||
|
@ -3379,7 +3379,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
// that this adds unusable memory into the free list that is later on
|
||||
// (in the free list) dropped again. Since we only use the flag for
|
||||
// testing this is fine.
|
||||
p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
|
||||
Heap::ShouldZapGarbage()
|
||||
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
|
||||
|
@ -621,8 +621,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->invalidated_slots_ = nullptr;
|
||||
chunk->skip_list_ = nullptr;
|
||||
chunk->progress_bar_ = 0;
|
||||
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
|
||||
chunk->set_concurrent_sweeping_state(kSweepingDone);
|
||||
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
||||
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
||||
chunk->page_protection_change_mutex_ = new base::Mutex();
|
||||
chunk->write_unprotect_counter_ = 0;
|
||||
chunk->mutex_ = new base::Mutex();
|
||||
@ -760,7 +760,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
|
||||
size_t MemoryChunk::CommittedPhysicalMemory() {
|
||||
if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
|
||||
return size();
|
||||
return high_water_mark_;
|
||||
return high_water_mark_.Value();
|
||||
}
|
||||
|
||||
bool MemoryChunk::IsPagedSpace() const {
|
||||
@ -2229,8 +2229,8 @@ void NewSpace::UpdateLinearAllocationArea() {
|
||||
Address new_top = to_space_.page_low();
|
||||
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
||||
allocation_info_.Reset(new_top, to_space_.page_high());
|
||||
original_top_ = top();
|
||||
original_limit_ = limit();
|
||||
original_top_.SetValue(top());
|
||||
original_limit_.SetValue(limit());
|
||||
StartNextInlineAllocationStep();
|
||||
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
|
||||
}
|
||||
|
@ -368,10 +368,9 @@ class MemoryChunk {
|
||||
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
|
||||
+ kPointerSize // InvalidatedSlots* invalidated_slots_
|
||||
+ kPointerSize // SkipList* skip_list_
|
||||
+ kPointerSize // std::atomic<intptr_t> high_water_mark_
|
||||
+ kPointerSize // AtomicValue high_water_mark_
|
||||
+ kPointerSize // base::Mutex* mutex_
|
||||
+
|
||||
kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
|
||||
+ kPointerSize // base::AtomicWord concurrent_sweeping_
|
||||
+ kPointerSize // base::Mutex* page_protection_change_mutex_
|
||||
+ kPointerSize // unitptr_t write_unprotect_counter_
|
||||
+ kSizetSize * kNumTypes
|
||||
@ -435,10 +434,9 @@ class MemoryChunk {
|
||||
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
|
||||
intptr_t old_mark = 0;
|
||||
do {
|
||||
old_mark = chunk->high_water_mark_;
|
||||
} while (
|
||||
(new_mark > old_mark) &&
|
||||
!chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
|
||||
old_mark = chunk->high_water_mark_.Value();
|
||||
} while ((new_mark > old_mark) &&
|
||||
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
|
||||
}
|
||||
|
||||
Address address() const {
|
||||
@ -457,15 +455,13 @@ class MemoryChunk {
|
||||
return addr >= area_start() && addr <= area_end();
|
||||
}
|
||||
|
||||
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
|
||||
concurrent_sweeping_ = state;
|
||||
}
|
||||
|
||||
ConcurrentSweepingState concurrent_sweeping_state() const {
|
||||
base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
|
||||
return concurrent_sweeping_;
|
||||
}
|
||||
|
||||
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
|
||||
bool SweepingDone() {
|
||||
return concurrent_sweeping_state().Value() == kSweepingDone;
|
||||
}
|
||||
|
||||
size_t size() const { return size_; }
|
||||
void set_size(size_t size) { size_ = size; }
|
||||
@ -526,7 +522,7 @@ class MemoryChunk {
|
||||
// Approximate amount of physical memory committed for this chunk.
|
||||
size_t CommittedPhysicalMemory();
|
||||
|
||||
Address HighWaterMark() { return address() + high_water_mark_; }
|
||||
Address HighWaterMark() { return address() + high_water_mark_.Value(); }
|
||||
|
||||
int progress_bar() {
|
||||
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
|
||||
@ -623,9 +619,9 @@ class MemoryChunk {
|
||||
|
||||
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
|
||||
|
||||
Space* owner() const { return owner_; }
|
||||
Space* owner() const { return owner_.Value(); }
|
||||
|
||||
void set_owner(Space* space) { owner_ = space; }
|
||||
void set_owner(Space* space) { owner_.SetValue(space); }
|
||||
|
||||
bool IsPagedSpace() const;
|
||||
|
||||
@ -660,7 +656,7 @@ class MemoryChunk {
|
||||
VirtualMemory reservation_;
|
||||
|
||||
// The space owning this memory chunk.
|
||||
std::atomic<Space*> owner_;
|
||||
base::AtomicValue<Space*> owner_;
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
@ -682,11 +678,11 @@ class MemoryChunk {
|
||||
|
||||
// Assuming the initial allocation on a page is sequential,
|
||||
// count highest number of bytes ever allocated on the page.
|
||||
std::atomic<intptr_t> high_water_mark_;
|
||||
base::AtomicValue<intptr_t> high_water_mark_;
|
||||
|
||||
base::Mutex* mutex_;
|
||||
|
||||
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
|
||||
base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
||||
|
||||
base::Mutex* page_protection_change_mutex_;
|
||||
|
||||
@ -1375,8 +1371,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
// been allocated by this MemoryAllocator.
|
||||
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
|
||||
return address < lowest_ever_allocated_ ||
|
||||
address >= highest_ever_allocated_;
|
||||
return address < lowest_ever_allocated_.Value() ||
|
||||
address >= highest_ever_allocated_.Value();
|
||||
}
|
||||
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
@ -1459,13 +1455,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
// values only if they did not change in between.
|
||||
Address ptr = kNullAddress;
|
||||
do {
|
||||
ptr = lowest_ever_allocated_;
|
||||
} while ((low < ptr) &&
|
||||
!lowest_ever_allocated_.compare_exchange_weak(ptr, low));
|
||||
ptr = lowest_ever_allocated_.Value();
|
||||
} while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
|
||||
do {
|
||||
ptr = highest_ever_allocated_;
|
||||
} while ((high > ptr) &&
|
||||
!highest_ever_allocated_.compare_exchange_weak(ptr, high));
|
||||
ptr = highest_ever_allocated_.Value();
|
||||
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
|
||||
}
|
||||
|
||||
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
|
||||
@ -1496,8 +1490,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
// conservative, i.e. not all addresses in 'allocated' space are allocated
|
||||
// to our heap. The range is [lowest, highest[, inclusive on the low end
|
||||
// and exclusive on the high end.
|
||||
std::atomic<Address> lowest_ever_allocated_;
|
||||
std::atomic<Address> highest_ever_allocated_;
|
||||
base::AtomicValue<Address> lowest_ever_allocated_;
|
||||
base::AtomicValue<Address> highest_ever_allocated_;
|
||||
|
||||
VirtualMemory last_chunk_;
|
||||
Unmapper unmapper_;
|
||||
@ -2689,11 +2683,11 @@ class NewSpace : public SpaceWithLinearArea {
|
||||
void ResetOriginalTop() {
|
||||
DCHECK_GE(top(), original_top());
|
||||
DCHECK_LE(top(), original_limit());
|
||||
original_top_ = top();
|
||||
original_top_.SetValue(top());
|
||||
}
|
||||
|
||||
Address original_top() { return original_top_; }
|
||||
Address original_limit() { return original_limit_; }
|
||||
Address original_top() { return original_top_.Value(); }
|
||||
Address original_limit() { return original_limit_.Value(); }
|
||||
|
||||
// Return the address of the first allocatable address in the active
|
||||
// semispace. This may be the address where the first object resides.
|
||||
@ -2782,8 +2776,8 @@ class NewSpace : public SpaceWithLinearArea {
|
||||
|
||||
// The top and the limit at the time of setting the linear allocation area.
|
||||
// These values can be accessed by background tasks.
|
||||
std::atomic<Address> original_top_;
|
||||
std::atomic<Address> original_limit_;
|
||||
base::AtomicValue<Address> original_top_;
|
||||
base::AtomicValue<Address> original_limit_;
|
||||
|
||||
// The semispaces.
|
||||
SemiSpace to_space_;
|
||||
|
@ -17,7 +17,7 @@ namespace internal {
|
||||
|
||||
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
|
||||
: sweeper_(sweeper) {
|
||||
sweeper_->stop_sweeper_tasks_ = true;
|
||||
sweeper_->stop_sweeper_tasks_.SetValue(true);
|
||||
if (!sweeper_->sweeping_in_progress()) return;
|
||||
|
||||
sweeper_->AbortAndWaitForTasks();
|
||||
@ -34,7 +34,7 @@ Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
|
||||
}
|
||||
|
||||
Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
|
||||
sweeper_->stop_sweeper_tasks_ = false;
|
||||
sweeper_->stop_sweeper_tasks_.SetValue(false);
|
||||
if (!sweeper_->sweeping_in_progress()) return;
|
||||
|
||||
sweeper_->StartSweeperTasks();
|
||||
@ -133,7 +133,7 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
|
||||
};
|
||||
|
||||
void Sweeper::StartSweeping() {
|
||||
CHECK(!stop_sweeper_tasks_);
|
||||
CHECK(!stop_sweeper_tasks_.Value());
|
||||
sweeping_in_progress_ = true;
|
||||
iterability_in_progress_ = true;
|
||||
MajorNonAtomicMarkingState* marking_state =
|
||||
@ -366,14 +366,14 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
|
||||
// The allocated_bytes() counter is precisely the total size of objects.
|
||||
DCHECK_EQ(live_bytes, p->allocated_bytes());
|
||||
}
|
||||
p->set_concurrent_sweeping_state(Page::kSweepingDone);
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
|
||||
if (free_list_mode == IGNORE_FREE_LIST) return 0;
|
||||
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
|
||||
}
|
||||
|
||||
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
|
||||
Page* page = nullptr;
|
||||
while (!stop_sweeper_tasks_ &&
|
||||
while (!stop_sweeper_tasks_.Value() &&
|
||||
((page = GetSweepingPageSafe(identity)) != nullptr)) {
|
||||
ParallelSweepPage(page, identity);
|
||||
}
|
||||
@ -419,8 +419,9 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
|
||||
// the page protection mode from rx -> rw while sweeping.
|
||||
CodePageMemoryModificationScope code_page_scope(page);
|
||||
|
||||
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
|
||||
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
|
||||
DCHECK_EQ(Page::kSweepingPending,
|
||||
page->concurrent_sweeping_state().Value());
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
const FreeSpaceTreatmentMode free_space_mode =
|
||||
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
|
||||
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
|
||||
@ -466,17 +467,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
|
||||
// happened when the page was initially added, so it is skipped here.
|
||||
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
|
||||
}
|
||||
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
|
||||
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
|
||||
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
|
||||
}
|
||||
|
||||
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
|
||||
DCHECK_GE(page->area_size(),
|
||||
static_cast<size_t>(marking_state_->live_bytes(page)));
|
||||
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
|
||||
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
|
||||
page->ForAllFreeListCategories(
|
||||
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
|
||||
page->set_concurrent_sweeping_state(Page::kSweepingPending);
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
|
||||
heap_->paged_space(space)->IncreaseAllocatedBytes(
|
||||
marking_state_->live_bytes(page), page);
|
||||
}
|
||||
@ -568,10 +569,10 @@ void Sweeper::AddPageForIterability(Page* page) {
|
||||
DCHECK(iterability_in_progress_);
|
||||
DCHECK(!iterability_task_started_);
|
||||
DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
|
||||
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
|
||||
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
|
||||
|
||||
iterability_list_.push_back(page);
|
||||
page->set_concurrent_sweeping_state(Page::kSweepingPending);
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
|
||||
}
|
||||
|
||||
void Sweeper::MakeIterable(Page* page) {
|
||||
|
@ -188,7 +188,7 @@ class Sweeper {
|
||||
// the semaphore for maintaining a task counter on the main thread.
|
||||
std::atomic<intptr_t> num_sweeping_tasks_;
|
||||
// Used by PauseOrCompleteScope to signal early bailout to tasks.
|
||||
std::atomic<bool> stop_sweeper_tasks_;
|
||||
base::AtomicValue<bool> stop_sweeper_tasks_;
|
||||
|
||||
// Pages that are only made iterable but have their free lists ignored.
|
||||
IterabilityList iterability_list_;
|
||||
|
Loading…
Reference in New Issue
Block a user