[heap] Slots are either in sweeping or old-to-new RS after Full GC
This CL ensures that recorded slots are either in the sweeping or the old-to-new remembered set after mark-compact depending on whether the page was already swept or not. All pages that are swept during the evacuation phase also have their remembered sets merged. is_local() is renamed to is_compaction_space() and non-virtual. The PagedSpace now not only knows whether it is a compaction space or not but also for which collection through the compaction_space_kind_ field. This allows RefillFreeList to merge the remembered sets immediately also for the mark-compact collection. Change-Id: I7457f8393d73f3e8d6b6ebedc46ebc36af509729 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1868613 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/master@{#64458}
This commit is contained in:
parent
e8a514a092
commit
e79d34ee36
@ -758,6 +758,13 @@ enum MinimumCapacity {
|
||||
|
||||
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
|
||||
|
||||
enum class CompactionSpaceKind {
|
||||
kNone,
|
||||
kScavenge,
|
||||
kMarkCompact,
|
||||
kMinorMarkCompact,
|
||||
};
|
||||
|
||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||
|
||||
enum VisitMode {
|
||||
|
@ -19,10 +19,10 @@ class LocalAllocator {
|
||||
static const int kLabSize = 32 * KB;
|
||||
static const int kMaxLabObjectSize = 8 * KB;
|
||||
|
||||
explicit LocalAllocator(Heap* heap)
|
||||
explicit LocalAllocator(Heap* heap, CompactionSpaceKind compaction_space_kind)
|
||||
: heap_(heap),
|
||||
new_space_(heap->new_space()),
|
||||
compaction_spaces_(heap),
|
||||
compaction_spaces_(heap, compaction_space_kind),
|
||||
new_space_lab_(LocalAllocationBuffer::InvalidBuffer()),
|
||||
lab_allocation_will_fail_(false) {}
|
||||
|
||||
|
@ -1187,8 +1187,11 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
|
||||
DCHECK_IMPLIES(
|
||||
p->IsToPage(),
|
||||
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
|
||||
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
|
||||
DCHECK(chunk->SweepingDone());
|
||||
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
|
||||
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
|
||||
} else if (p->IsEvacuationCandidate()) {
|
||||
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
@ -2748,18 +2751,18 @@ class Evacuator : public Malloced {
|
||||
}
|
||||
|
||||
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
|
||||
bool always_promote_young)
|
||||
LocalAllocator* local_allocator, bool always_promote_young)
|
||||
: heap_(heap),
|
||||
local_allocator_(heap_),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
new_space_visitor_(heap_, &local_allocator_, record_visitor,
|
||||
new_space_visitor_(heap_, local_allocator, record_visitor,
|
||||
&local_pretenuring_feedback_, always_promote_young),
|
||||
new_to_new_page_visitor_(heap_, record_visitor,
|
||||
&local_pretenuring_feedback_),
|
||||
new_to_old_page_visitor_(heap_, record_visitor,
|
||||
&local_pretenuring_feedback_),
|
||||
|
||||
old_space_visitor_(heap_, &local_allocator_, record_visitor),
|
||||
old_space_visitor_(heap_, local_allocator, record_visitor),
|
||||
local_allocator_(local_allocator),
|
||||
duration_(0.0),
|
||||
bytes_compacted_(0) {}
|
||||
|
||||
@ -2795,8 +2798,6 @@ class Evacuator : public Malloced {
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
// Locally cached collector data.
|
||||
LocalAllocator local_allocator_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
|
||||
// Visitors for the corresponding spaces.
|
||||
@ -2807,6 +2808,9 @@ class Evacuator : public Malloced {
|
||||
new_to_old_page_visitor_;
|
||||
EvacuateOldSpaceVisitor old_space_visitor_;
|
||||
|
||||
// Locally cached collector data.
|
||||
LocalAllocator* local_allocator_;
|
||||
|
||||
// Book keeping info.
|
||||
double duration_;
|
||||
intptr_t bytes_compacted_;
|
||||
@ -2840,7 +2844,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
|
||||
}
|
||||
|
||||
void Evacuator::Finalize() {
|
||||
local_allocator_.Finalize();
|
||||
local_allocator_->Finalize();
|
||||
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
|
||||
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
|
||||
new_to_old_page_visitor_.moved_bytes());
|
||||
@ -2858,9 +2862,10 @@ void Evacuator::Finalize() {
|
||||
class FullEvacuator : public Evacuator {
|
||||
public:
|
||||
explicit FullEvacuator(MarkCompactCollector* collector)
|
||||
: Evacuator(collector->heap(), &record_visitor_,
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
FLAG_always_promote_young_mc),
|
||||
record_visitor_(collector, &ephemeron_remembered_set_),
|
||||
local_allocator_(heap_, CompactionSpaceKind::kMarkCompact),
|
||||
collector_(collector) {}
|
||||
|
||||
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
|
||||
@ -2892,6 +2897,7 @@ class FullEvacuator : public Evacuator {
|
||||
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
|
||||
EphemeronRememberedSet ephemeron_remembered_set_;
|
||||
RecordMigratedSlotVisitor record_visitor_;
|
||||
LocalAllocator local_allocator_;
|
||||
|
||||
MarkCompactCollector* collector_;
|
||||
};
|
||||
@ -3085,6 +3091,14 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
|
||||
CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &evacuation_job, nullptr,
|
||||
live_bytes);
|
||||
|
||||
// After evacuation there might still be swept pages that weren't
|
||||
// added to one of the compaction space but still reside in the
|
||||
// sweeper's swept_list_. Merge remembered sets for those pages as
|
||||
// well such that after mark-compact all pages either store slots
|
||||
// in the sweeping or old-to-new remembered set.
|
||||
sweeper()->MergeOldToNewRememberedSetsForSweptPages();
|
||||
|
||||
PostProcessEvacuationCandidates();
|
||||
}
|
||||
|
||||
@ -3363,18 +3377,16 @@ class ToSpaceUpdatingItem : public UpdatingItem {
|
||||
MarkingState* marking_state_;
|
||||
};
|
||||
|
||||
template <typename MarkingState>
|
||||
template <typename MarkingState, GarbageCollector collector>
|
||||
class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
public:
|
||||
explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
|
||||
MemoryChunk* chunk,
|
||||
RememberedSetUpdatingMode updating_mode,
|
||||
bool always_promote_young)
|
||||
RememberedSetUpdatingMode updating_mode)
|
||||
: heap_(heap),
|
||||
marking_state_(marking_state),
|
||||
chunk_(chunk),
|
||||
updating_mode_(updating_mode),
|
||||
always_promote_young_(always_promote_young) {}
|
||||
updating_mode_(updating_mode) {}
|
||||
~RememberedSetUpdatingItem() override = default;
|
||||
|
||||
void Process() override {
|
||||
@ -3440,6 +3452,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
|
||||
void UpdateUntypedPointers() {
|
||||
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
|
||||
DCHECK_IMPLIES(
|
||||
collector == MARK_COMPACTOR,
|
||||
chunk_->SweepingDone() &&
|
||||
chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
|
||||
|
||||
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
|
||||
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
|
||||
chunk_,
|
||||
@ -3449,7 +3466,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
},
|
||||
SlotSet::FREE_EMPTY_BUCKETS);
|
||||
|
||||
DCHECK_IMPLIES(always_promote_young_, slots == 0);
|
||||
DCHECK_IMPLIES(
|
||||
collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
|
||||
slots == 0);
|
||||
|
||||
if (slots == 0) {
|
||||
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
|
||||
@ -3457,6 +3476,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
}
|
||||
|
||||
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
|
||||
DCHECK_IMPLIES(
|
||||
collector == MARK_COMPACTOR,
|
||||
!chunk_->SweepingDone() &&
|
||||
(chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
|
||||
nullptr);
|
||||
DCHECK(!chunk_->IsLargePage());
|
||||
|
||||
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
|
||||
int slots = RememberedSetSweeping::Iterate(
|
||||
chunk_,
|
||||
@ -3466,7 +3492,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
},
|
||||
SlotSet::FREE_EMPTY_BUCKETS);
|
||||
|
||||
DCHECK_IMPLIES(always_promote_young_, slots == 0);
|
||||
DCHECK_IMPLIES(
|
||||
collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
|
||||
slots == 0);
|
||||
|
||||
if (slots == 0) {
|
||||
chunk_->ReleaseSweepingSlotSet();
|
||||
@ -3532,7 +3560,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
MarkingState* marking_state_;
|
||||
MemoryChunk* chunk_;
|
||||
RememberedSetUpdatingMode updating_mode_;
|
||||
bool always_promote_young_;
|
||||
};
|
||||
|
||||
UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
|
||||
@ -3543,9 +3570,8 @@ UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
|
||||
|
||||
UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
|
||||
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
|
||||
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
|
||||
heap(), non_atomic_marking_state(), chunk, updating_mode,
|
||||
FLAG_always_promote_young_mc);
|
||||
return new RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>(
|
||||
heap(), non_atomic_marking_state(), chunk, updating_mode);
|
||||
}
|
||||
|
||||
// Update array buffers on a page that has been evacuated by copying objects.
|
||||
@ -4270,8 +4296,9 @@ class YoungGenerationRecordMigratedSlotVisitor final
|
||||
DCHECK_IMPLIES(
|
||||
p->IsToPage(),
|
||||
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
|
||||
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
|
||||
DCHECK(chunk->SweepingDone());
|
||||
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
|
||||
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
|
||||
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
|
||||
MemoryChunk::FromHeapObject(host), slot);
|
||||
@ -4586,8 +4613,9 @@ UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
|
||||
|
||||
UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
|
||||
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
|
||||
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
|
||||
heap(), non_atomic_marking_state(), chunk, updating_mode, false);
|
||||
return new RememberedSetUpdatingItem<NonAtomicMarkingState,
|
||||
MINOR_MARK_COMPACTOR>(
|
||||
heap(), non_atomic_marking_state(), chunk, updating_mode);
|
||||
}
|
||||
|
||||
class MarkingItem;
|
||||
@ -4891,8 +4919,10 @@ namespace {
|
||||
class YoungGenerationEvacuator : public Evacuator {
|
||||
public:
|
||||
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
|
||||
: Evacuator(collector->heap(), &record_visitor_, false),
|
||||
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
|
||||
false),
|
||||
record_visitor_(collector->heap()->mark_compact_collector()),
|
||||
local_allocator_(heap_, CompactionSpaceKind::kMinorMarkCompact),
|
||||
collector_(collector) {}
|
||||
|
||||
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
|
||||
@ -4907,6 +4937,7 @@ class YoungGenerationEvacuator : public Evacuator {
|
||||
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
|
||||
|
||||
YoungGenerationRecordMigratedSlotVisitor record_visitor_;
|
||||
LocalAllocator local_allocator_;
|
||||
MinorMarkCompactCollector* collector_;
|
||||
};
|
||||
|
||||
|
@ -402,7 +402,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
|
||||
copied_size_(0),
|
||||
promoted_size_(0),
|
||||
allocator_(heap),
|
||||
allocator_(heap, CompactionSpaceKind::kScavenge),
|
||||
is_logging_(is_logging),
|
||||
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
|
||||
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
|
||||
|
@ -480,7 +480,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
|
||||
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
|
||||
#endif
|
||||
HeapObject heap_obj;
|
||||
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
|
||||
if (!result.IsRetry() && result.To(&heap_obj) && !is_compaction_space()) {
|
||||
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
|
||||
heap_obj.address(), size_in_bytes);
|
||||
StartNextInlineAllocationStep();
|
||||
|
@ -1639,8 +1639,11 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
|
||||
}
|
||||
|
||||
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
||||
Executability executable, FreeList* free_list)
|
||||
: SpaceWithLinearArea(heap, space, free_list), executable_(executable) {
|
||||
Executability executable, FreeList* free_list,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
: SpaceWithLinearArea(heap, space, free_list),
|
||||
executable_(executable),
|
||||
compaction_space_kind_(compaction_space_kind) {
|
||||
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
@ -1676,18 +1679,17 @@ void PagedSpace::RefillFreeList() {
|
||||
});
|
||||
}
|
||||
|
||||
// Also merge old-to-new remembered sets outside of collections.
|
||||
// Do not do this during GC, because of races during scavenges.
|
||||
// One thread might iterate remembered set, while another thread merges
|
||||
// them.
|
||||
if (!is_local()) {
|
||||
// Also merge old-to-new remembered sets if not scavenging because of
|
||||
// data races: One thread might iterate remembered set, while another
|
||||
// thread merges them.
|
||||
if (compaction_space_kind() != CompactionSpaceKind::kScavenge) {
|
||||
p->MergeOldToNewRememberedSets();
|
||||
}
|
||||
|
||||
// Only during compaction pages can actually change ownership. This is
|
||||
// safe because there exists no other competing action on the page links
|
||||
// during compaction.
|
||||
if (is_local()) {
|
||||
if (is_compaction_space()) {
|
||||
DCHECK_NE(this, p->owner());
|
||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
|
||||
base::MutexGuard guard(owner->mutex());
|
||||
@ -1701,7 +1703,7 @@ void PagedSpace::RefillFreeList() {
|
||||
added += RelinkFreeListCategories(p);
|
||||
}
|
||||
added += p->wasted_memory();
|
||||
if (is_local() && (added > kCompactionMemoryWanted)) break;
|
||||
if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2047,7 +2049,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
|
||||
// if it is big enough.
|
||||
FreeLinearAllocationArea();
|
||||
|
||||
if (!is_local()) {
|
||||
if (!is_compaction_space()) {
|
||||
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||
heap()->GCFlagsForIncrementalMarking(),
|
||||
kGCCallbackScheduleIdleGarbageCollection);
|
||||
@ -3741,7 +3743,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
// Sweeping is still in progress.
|
||||
if (collector->sweeping_in_progress()) {
|
||||
if (FLAG_concurrent_sweeping && !is_local() &&
|
||||
if (FLAG_concurrent_sweeping && !is_compaction_space() &&
|
||||
!collector->sweeper()->AreSweeperTasksRunning()) {
|
||||
collector->EnsureSweepingCompleted();
|
||||
}
|
||||
@ -3759,8 +3761,9 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
// final atomic pause.
|
||||
Sweeper::FreeSpaceMayContainInvalidatedSlots
|
||||
invalidated_slots_in_free_space =
|
||||
is_local() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
|
||||
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
|
||||
is_compaction_space()
|
||||
? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
|
||||
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
|
||||
|
||||
// If sweeping is still in progress try to sweep pages.
|
||||
int max_freed = collector->sweeper()->ParallelSweepSpace(
|
||||
@ -3774,7 +3777,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
if (is_local()) {
|
||||
if (is_compaction_space()) {
|
||||
// The main thread may have acquired all swept pages. Try to steal from
|
||||
// it. This can only happen during young generation evacuation.
|
||||
PagedSpace* main_space = heap()->paged_space(identity());
|
||||
|
@ -2285,8 +2285,10 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
static const size_t kCompactionMemoryWanted = 500 * KB;
|
||||
|
||||
// Creates a space with an id.
|
||||
PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
FreeList* free_list);
|
||||
PagedSpace(
|
||||
Heap* heap, AllocationSpace id, Executability executable,
|
||||
FreeList* free_list,
|
||||
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
|
||||
|
||||
~PagedSpace() override { TearDown(); }
|
||||
|
||||
@ -2462,7 +2464,11 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
// Return size of allocatable area on a page in this space.
|
||||
inline int AreaSize() { return static_cast<int>(area_size_); }
|
||||
|
||||
virtual bool is_local() { return false; }
|
||||
bool is_compaction_space() {
|
||||
return compaction_space_kind_ != CompactionSpaceKind::kNone;
|
||||
}
|
||||
|
||||
CompactionSpaceKind compaction_space_kind() { return compaction_space_kind_; }
|
||||
|
||||
// Merges {other} into the current space. Note that this modifies {other},
|
||||
// e.g., removes its bump pointer area and resets statistics.
|
||||
@ -2503,7 +2509,7 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
void DecreaseLimit(Address new_limit);
|
||||
void UpdateInlineAllocationLimit(size_t min_size) override;
|
||||
bool SupportsInlineAllocation() override {
|
||||
return identity() == OLD_SPACE && !is_local();
|
||||
return identity() == OLD_SPACE && !is_compaction_space();
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -2560,6 +2566,8 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
|
||||
Executability executable_;
|
||||
|
||||
CompactionSpaceKind compaction_space_kind_;
|
||||
|
||||
size_t area_size_;
|
||||
|
||||
// Accounting information for this space.
|
||||
@ -3035,10 +3043,12 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
|
||||
|
||||
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
public:
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
|
||||
: PagedSpace(heap, id, executable, FreeList::CreateFreeList()) {}
|
||||
|
||||
bool is_local() override { return true; }
|
||||
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
|
||||
compaction_space_kind) {
|
||||
DCHECK_NE(compaction_space_kind, CompactionSpaceKind::kNone);
|
||||
}
|
||||
|
||||
protected:
|
||||
// The space is temporary and not included in any snapshots.
|
||||
@ -3054,9 +3064,12 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
|
||||
// A collection of |CompactionSpace|s used by a single compaction task.
|
||||
class CompactionSpaceCollection : public Malloced {
|
||||
public:
|
||||
explicit CompactionSpaceCollection(Heap* heap)
|
||||
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
|
||||
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
|
||||
explicit CompactionSpaceCollection(Heap* heap,
|
||||
CompactionSpaceKind compaction_space_kind)
|
||||
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
|
||||
compaction_space_kind),
|
||||
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
|
||||
compaction_space_kind) {}
|
||||
|
||||
CompactionSpace* Get(AllocationSpace space) {
|
||||
switch (space) {
|
||||
|
@ -214,6 +214,15 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
ForAllSweepingSpaces([this](AllocationSpace space) {
|
||||
SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
|
||||
for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
|
||||
});
|
||||
}
|
||||
|
||||
void Sweeper::AbortAndWaitForTasks() {
|
||||
if (!FLAG_concurrent_sweeping) return;
|
||||
|
||||
|
@ -110,6 +110,7 @@ class Sweeper {
|
||||
void AddPageForIterability(Page* page);
|
||||
void StartIterabilityTasks();
|
||||
void EnsureIterabilityCompleted();
|
||||
void MergeOldToNewRememberedSetsForSweptPages();
|
||||
|
||||
private:
|
||||
class IncrementalSweeperTask;
|
||||
|
@ -18,8 +18,8 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
|
||||
OldSpace* old_space = heap->old_space();
|
||||
EXPECT_TRUE(old_space != nullptr);
|
||||
|
||||
CompactionSpace* compaction_space =
|
||||
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
|
||||
CompactionSpace* compaction_space = new CompactionSpace(
|
||||
heap, OLD_SPACE, NOT_EXECUTABLE, CompactionSpaceKind::kMarkCompact);
|
||||
EXPECT_TRUE(compaction_space != nullptr);
|
||||
|
||||
for (Page* p : *old_space) {
|
||||
|
Loading…
Reference in New Issue
Block a user