[heap] Replace ConcurrentSweepingState with a MemoryChunk local epoch counter.

Bug: v8:9093
Change-Id: I7c415fd0ea9e48f7ee189115f164825cb120695b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1624213
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62423}
This commit is contained in:
Hannes Payer 2019-06-27 14:49:53 +02:00 committed by Commit Bot
parent 12b45fdf00
commit 907f3a64b7
12 changed files with 78 additions and 65 deletions

View File

@ -102,7 +102,8 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return true;
DCHECK(page->SweepingDone());
DCHECK_IMPLIES(Sweeper::IsValidSweepingSpace(page->owner()->identity()),
!page->SweepingDone());
tracker->Process([mode](JSArrayBuffer old_buffer, JSArrayBuffer* new_buffer) {
MapWord map_word = old_buffer.map_word();
if (map_word.IsForwardingAddress()) {

View File

@ -83,7 +83,7 @@ class ConcurrentMarkingVisitor final
ConcurrentMarking::MarkingWorklist* shared,
MemoryChunkDataMap* memory_chunk_data, WeakObjects* weak_objects,
ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
bool embedder_tracing_enabled, unsigned mark_compact_epoch,
bool embedder_tracing_enabled, uintptr_t mark_compact_epoch,
bool is_forced_gc)
: shared_(shared, task_id),
weak_objects_(weak_objects),
@ -673,7 +673,7 @@ class ConcurrentMarkingVisitor final
int task_id_;
SlotSnapshot slot_snapshot_;
bool embedder_tracing_enabled_;
const unsigned mark_compact_epoch_;
const uintptr_t mark_compact_epoch_;
bool is_forced_gc_;
BytecodeFlushMode bytecode_flush_mode_;
};

View File

@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
std::atomic<bool> preemption_request;
MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0;
unsigned mark_compact_epoch;
uintptr_t mark_compact_epoch;
bool is_forced_gc;
char cache_line_padding[64];
};

View File

@ -1933,8 +1933,6 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) {
heap()->incremental_marking()->Deactivate();
}
epoch_++;
}
void MarkCompactCollector::ClearNonLiveReferences() {
@ -2778,7 +2776,6 @@ class Evacuator : public Malloced {
void Evacuator::EvacuatePage(MemoryChunk* chunk) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
@ -3005,6 +3002,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
// going to sweep the page and add page->live_byte_count.
page->MarkUnswept();
heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
page);
} else {
@ -3757,7 +3755,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
aborted_pages_verified++;
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
DCHECK(!p->SweepingDone());
p->owner()->memory_chunk_list().Remove(p);
}
}
@ -3773,7 +3771,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
non_atomic_marking_state()->SetLiveBytes(p, 0);
CHECK(p->SweepingDone());
CHECK(!p->SweepingDone());
space->ReleasePage(p);
}
old_space_evacuation_pages_.clear();
@ -3789,7 +3787,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// Loop needs to support deletion if live bytes == 0 for a page.
for (auto it = space->begin(); it != space->end();) {
Page* p = *(it++);
DCHECK(p->SweepingDone());
DCHECK(!p->SweepingDone());
if (p->IsEvacuationCandidate()) {
// Will be processed in Evacuate.
@ -3824,6 +3822,11 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
void MarkCompactCollector::StartSweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
// We increment the epoch when sweeping starts. That will make all sweepable
// pages of the old generation not swept.
epoch_++;
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
@ -4855,7 +4858,11 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
// A page is promoted to the old generation. We have to give it the
// current MC epoch counter. It will take part in the sweeping phase
// of the next MC.
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
page->InitializeSweepingState();
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}

View File

@ -709,7 +709,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif
unsigned epoch() const { return epoch_; }
uintptr_t epoch() const { return epoch_; }
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
@ -909,12 +909,17 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
// Counts the number of major mark-compact collections. The counter is
// Counts the number of major garbage collections. The counter is
// incremented right after marking. This is used for:
// - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
// two bits are used, so it is okay if this counter overflows and wraps
// around.
unsigned epoch_ = 0;
// - identifying if a MemoryChunk is swept. When sweeping starts the epoch
// counter is incremented. When sweeping of a MemoryChunk finishes the
// epoch counter of a MemoryChunk is incremented. A MemoryChunk is swept
// if both counters match. A MemoryChunk still requires sweeping if
// they don't match.
uintptr_t epoch_ = 0;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;
@ -1008,7 +1013,7 @@ class MarkingVisitor final
Heap* const heap_;
MarkCompactCollector* const collector_;
MarkingState* const marking_state_;
const unsigned mark_compact_epoch_;
const uintptr_t mark_compact_epoch_;
};
class EvacuationScope {

View File

@ -711,7 +711,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->invalidated_slots_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(kSweepingDone);
chunk->InitializeSweepingState();
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
chunk->mutex_ = new base::Mutex();
@ -1023,6 +1023,24 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
}
}
bool MemoryChunk::SweepingDone() {
return !Sweeper::IsValidSweepingSpace(owner()->identity()) ||
heap_->mark_compact_collector()->epoch() == mark_compact_epoch_;
}
void MemoryChunk::MarkUnswept() {
DCHECK(Sweeper::IsValidSweepingSpace(owner()->identity()));
mark_compact_epoch_ = heap_->mark_compact_collector()->epoch() - 1;
}
void MemoryChunk::InitializeSweepingState() {
if (Sweeper::IsValidSweepingSpace(owner()->identity())) {
mark_compact_epoch_ = heap_->mark_compact_collector()->epoch();
} else {
mark_compact_epoch_ = 0;
}
}
void Page::ResetAllocationStatistics() {
allocated_bytes_ = area_size();
wasted_memory_ = 0;
@ -1740,7 +1758,6 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
}
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
memory_chunk_list_.PushBack(page);
AccountCommitted(page->size());
@ -2123,8 +2140,8 @@ void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(page);
DCHECK(page->SweepingDone());
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.

View File

@ -367,17 +367,6 @@ class MemoryChunk {
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
// not be performed on that page. Sweeper threads that are done with their
// work will set this value and not touch the page anymore.
// |kSweepingPending|: This page is ready for parallel sweeping.
// |kSweepingInProgress|: This page is currently swept by a sweeper thread.
enum ConcurrentSweepingState {
kSweepingDone,
kSweepingPending,
kSweepingInProgress,
};
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@ -412,8 +401,7 @@ class MemoryChunk {
+ kSystemPointerSize // InvalidatedSlots* invalidated_slots_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
+ kUIntptrSize // std::atomic<uintptr_t> mark_compact_epoch_
+ kSystemPointerSize // base::Mutex* page_protection_change_mutex_
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
@ -487,15 +475,10 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
ConcurrentSweepingState concurrent_sweeping_state() {
return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
}
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
V8_EXPORT_PRIVATE void InitializeSweepingState();
void MarkSwept() { mark_compact_epoch_++; }
V8_EXPORT_PRIVATE void MarkUnswept();
V8_EXPORT_PRIVATE bool SweepingDone();
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@ -793,7 +776,7 @@ class MemoryChunk {
base::Mutex* mutex_;
std::atomic<intptr_t> concurrent_sweeping_;
std::atomic<uintptr_t> mark_compact_epoch_;
base::Mutex* page_protection_change_mutex_;
@ -1150,7 +1133,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// Tracks off-heap memory used by this space.
std::atomic<size_t>* external_backing_store_bytes_;
private:
static const intptr_t kIdOffset = 9 * kSystemPointerSize;
bool allocation_observers_paused_;

View File

@ -247,7 +247,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK_IMPLIES(free_list_mode == REBUILD_FREE_LIST,
!p->IsEvacuationCandidate() && !p->SweepingDone());
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
@ -367,9 +368,13 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
p->MarkSwept();
DCHECK(p->SweepingDone());
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
@ -424,12 +429,9 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
// the page protection mode from rx -> rw while sweeping.
CodePageMemoryModificationScope code_page_scope(page);
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
@ -472,17 +474,17 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
// happened when the page was initially added, so it is skipped here.
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
DCHECK(!page->SweepingDone());
sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
DCHECK(!page->SweepingDone());
page->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
}
@ -574,10 +576,8 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
page->set_concurrent_sweeping_state(Page::kSweepingPending);
}
void Sweeper::MakeIterable(Page* page) {

View File

@ -77,6 +77,11 @@ class Sweeper {
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
}
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@ -153,11 +158,6 @@ class Sweeper {
return space == NEW_SPACE || space == RO_SPACE;
}
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
}
static int GetSweepSpaceIndex(AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
return space - FIRST_GROWABLE_PAGED_SPACE;

View File

@ -148,7 +148,7 @@ class DescriptorArray : public HeapObject {
// Atomic compare-and-swap operation on the raw_number_of_marked_descriptors.
int16_t CompareAndSwapRawNumberOfMarkedDescriptors(int16_t expected,
int16_t value);
int16_t UpdateNumberOfMarkedDescriptors(unsigned mark_compact_epoch,
int16_t UpdateNumberOfMarkedDescriptors(uintptr_t mark_compact_epoch,
int16_t number_of_marked_descriptors);
static constexpr int SizeFor(int number_of_all_descriptors) {
@ -240,11 +240,12 @@ class NumberOfMarkedDescriptors {
static const int kMaxNumberOfMarkedDescriptors = Marked::kMax;
// Decodes the raw value of the number of marked descriptors for the
// given mark compact garbage collection epoch.
static inline int16_t decode(unsigned mark_compact_epoch, int16_t raw_value) {
static inline int16_t decode(uintptr_t mark_compact_epoch,
int16_t raw_value) {
unsigned epoch_from_value = Epoch::decode(static_cast<uint16_t>(raw_value));
int16_t marked_from_value =
Marked::decode(static_cast<uint16_t>(raw_value));
unsigned actual_epoch = mark_compact_epoch & Epoch::kMask;
uintptr_t actual_epoch = mark_compact_epoch & Epoch::kMask;
if (actual_epoch == epoch_from_value) return marked_from_value;
// If the epochs do not match, then either the raw_value is zero (freshly
// allocated descriptor array) or the epoch from value lags by 1.
@ -257,7 +258,7 @@ class NumberOfMarkedDescriptors {
// Encodes the number of marked descriptors for the given mark compact
// garbage collection epoch.
static inline int16_t encode(unsigned mark_compact_epoch, int16_t value) {
static inline int16_t encode(uintptr_t mark_compact_epoch, int16_t value) {
// TODO(ulan): avoid casting to int16_t by adding support for uint16_t
// atomics.
return static_cast<int16_t>(

View File

@ -4248,7 +4248,7 @@ void DescriptorArray::Sort() {
}
int16_t DescriptorArray::UpdateNumberOfMarkedDescriptors(
unsigned mark_compact_epoch, int16_t new_marked) {
uintptr_t mark_compact_epoch, int16_t new_marked) {
STATIC_ASSERT(kMaxNumberOfDescriptors <=
NumberOfMarkedDescriptors::kMaxNumberOfMarkedDescriptors);
int16_t old_raw_marked = raw_number_of_marked_descriptors();

View File

@ -6278,16 +6278,16 @@ HEAP_TEST(MarkCompactEpochCounter) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
unsigned epoch0 = heap->mark_compact_collector()->epoch();
uintptr_t epoch0 = heap->mark_compact_collector()->epoch();
CcTest::CollectGarbage(OLD_SPACE);
unsigned epoch1 = heap->mark_compact_collector()->epoch();
uintptr_t epoch1 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch0 + 1, epoch1);
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectGarbage(OLD_SPACE);
unsigned epoch2 = heap->mark_compact_collector()->epoch();
uintptr_t epoch2 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch1 + 1, epoch2);
CcTest::CollectGarbage(NEW_SPACE);
unsigned epoch3 = heap->mark_compact_collector()->epoch();
uintptr_t epoch3 = heap->mark_compact_collector()->epoch();
CHECK_EQ(epoch2, epoch3);
}