[heap] Iterate promoted pages during sweeping

Promoted pages are iterated to record slots containing old to new and
old to shared references. This takes a significant amount of time during
the atomic pause.
Instead we offload this task to the concurrent sweepers, record slots to
a local cache, and merge it when finalizing sweeping.

Array buffer sweeping depends on iteration of promoted pages, so it is
frozen until iteration is done.

See design doc at https://docs.google.com/document/d/1JzXZHguAnNAZUfS7kLeaPVXFfCYbf5bGCtyKgyiMDH4/edit?usp=sharing

Bug: v8:12612
Change-Id: Icdc79a7a70c53352e3a1b3961cfe369e8563b65b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4062041
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Auto-Submit: Omer Katz <omerkatz@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84706}
This commit is contained in:
Omer Katz 2022-12-07 14:21:09 +01:00 committed by V8 LUCI CQ
parent 3241896c87
commit 1e3dd39d09
29 changed files with 582 additions and 122 deletions

View File

@ -1147,6 +1147,8 @@ enum class CodeFlushMode {
kStressFlushCode,
};
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
bool inline IsBaselineCodeFlushingEnabled(base::EnumSet<CodeFlushMode> mode) {
return mode.contains(CodeFlushMode::kFlushBaselineCode);
}

View File

@ -13,6 +13,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/diagnostics/etw-jit-metadata-win.h"
#include "src/logging/log.h"
#include "src/objects/shared-function-info.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"

View File

@ -1372,6 +1372,8 @@ DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"ephemeron algorithm")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_NEG_NEG_IMPLICATION(concurrent_sweeping,
concurrent_array_buffer_sweeping)
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
"use parallel pointer update during compaction")

View File

@ -161,6 +161,7 @@ void ArrayBufferSweeper::RequestSweep(SweepingType type) {
? GCTracer::Scope::BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
: GCTracer::Scope::BACKGROUND_FULL_ARRAY_BUFFER_SWEEP;
TRACE_GC_EPOCH(heap_->tracer(), scope_id, ThreadKind::kBackground);
heap_->sweeper()->WaitForPromotedPagesIteration();
base::MutexGuard guard(&sweeping_mutex_);
job_->Sweep();
job_finished_.NotifyAll();
@ -168,6 +169,7 @@ void ArrayBufferSweeper::RequestSweep(SweepingType type) {
job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
heap_->sweeper()->WaitForPromotedPagesIteration();
job_->Sweep();
Finalize();
}

View File

@ -10,7 +10,6 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/heap-verifier.h"
#include "src/logging/log.h"
#include "src/utils/allocation.h"
namespace v8 {

View File

@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/allocation-result.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/free-space.h"
#include "src/objects/map.h"

View File

@ -1902,6 +1902,8 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
void Heap::CompleteSweepingFull() {
EnsureSweepingCompleted(SweepingForcedFinalizationMode::kUnifiedHeap);
if (array_buffer_sweeper()->sweeping_in_progress()) {
GCTracer::Scope::ScopeId scope_id;
@ -1919,7 +1921,6 @@ void Heap::CompleteSweepingFull() {
TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
array_buffer_sweeper()->EnsureFinished();
}
EnsureSweepingCompleted(SweepingForcedFinalizationMode::kUnifiedHeap);
DCHECK(!sweeping_in_progress());
DCHECK_IMPLIES(cpp_heap(),
@ -2194,6 +2195,8 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer::MarkingType::kAtomic);
}
}
DCHECK_IMPLIES(!v8_flags.minor_mc,
!pretenuring_handler_.HasPretenuringFeedback());
if (v8_flags.minor_mc) pretenuring_handler_.ProcessPretenuringFeedback();
tracer()->StartAtomicPause();
@ -2253,6 +2256,8 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector,
Scavenge();
}
DCHECK_IMPLIES(collector == GarbageCollector::MINOR_MARK_COMPACTOR,
!pretenuring_handler_.HasPretenuringFeedback());
pretenuring_handler_.ProcessPretenuringFeedback();
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
@ -2400,6 +2405,15 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
}
void Heap::CompleteSweepingYoung() {
// If sweeping is in progress and there are no sweeper tasks running, finish
// the sweeping here, to avoid having to pause and resume during the young
// generation GC.
FinishSweepingIfOutOfWork();
if (v8_flags.minor_mc && sweeping_in_progress()) {
PauseSweepingAndEnsureYoungSweepingCompleted();
}
if (array_buffer_sweeper()->sweeping_in_progress()) {
GCTracer::Scope::ScopeId scope_id;
@ -2418,15 +2432,6 @@ void Heap::CompleteSweepingYoung() {
array_buffer_sweeper()->EnsureFinished();
}
// If sweeping is in progress and there are no sweeper tasks running, finish
// the sweeping here, to avoid having to pause and resume during the young
// generation GC.
FinishSweepingIfOutOfWork();
if (v8_flags.minor_mc && sweeping_in_progress()) {
PauseSweepingAndEnsureYoungSweepingCompleted();
}
#if defined(CPPGC_YOUNG_GENERATION)
// Always complete sweeping if young generation is enabled.
if (cpp_heap()) {
@ -6303,29 +6308,6 @@ PagedSpace* PagedSpaceIterator::Next() {
return nullptr;
}
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::HasNext() {
while (current_space_ <= LAST_MUTABLE_SPACE) {
Space* space = heap_->space(current_space_);
if (space) return true;
++current_space_;
}
// No more spaces left.
return false;
}
Space* SpaceIterator::Next() {
DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
Space* space = heap_->space(current_space_++);
DCHECK_NOT_NULL(space);
return space;
}
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;

View File

@ -148,8 +148,6 @@ enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
enum class YoungGenerationHandling {
@ -2723,19 +2721,6 @@ class V8_EXPORT_PRIVATE PagedSpaceIterator {
int counter_;
};
class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
bool HasNext();
Space* Next();
private:
Heap* heap_;
int current_space_; // from enum AllocationSpace.
};
// A HeapObjectIterator provides iteration over the entire non-read-only heap.
// It aggregates the specific iterators for the different spaces as these can
// only iterate over one space only.

View File

@ -5701,19 +5701,16 @@ std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
void MinorMarkCompactCollector::CleanupPromotedPages() {
for (Page* p : promoted_pages_) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
non_atomic_marking_state()->ClearLiveness(p);
}
promoted_pages_.clear();
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
}
promoted_large_pages_.clear();
}
@ -5992,7 +5989,11 @@ void MinorMarkCompactCollector::Finish() {
local_marking_worklists_.reset();
main_marking_visitor_.reset();
CleanupPromotedPages();
sweeper()->StartSweeperTasks();
SweepArrayBufferExtensions();
}
void MinorMarkCompactCollector::CollectGarbage() {
@ -6000,8 +6001,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
DCHECK_NOT_NULL(heap()->new_space());
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
heap()->array_buffer_sweeper()->EnsureFinished();
DCHECK(!heap()->array_buffer_sweeper()->sweeping_in_progress());
MarkLiveObjects();
ClearNonLiveReferences();
@ -6025,10 +6025,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
#endif // VERIFY_HEAP
CleanupPromotedPages();
SweepArrayBufferExtensions();
auto* isolate = heap()->isolate();
isolate->global_handles()->UpdateListOfYoungNodes();
isolate->traced_handles()->UpdateListOfYoungNodes();
@ -6049,9 +6045,9 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
DCHECK(heap_->non_atomic_marking_state()->bitmap(p)->AllBitsClearInRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
p->AddressToMarkbitIndex(free_end)));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
@ -6066,9 +6062,9 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
DCHECK(heap_->non_atomic_marking_state()->bitmap(p)->AllBitsClearInRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
p->AddressToMarkbitIndex(p->area_end())));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
@ -6563,7 +6559,7 @@ class YoungGenerationEvacuator : public Evacuator {
record_visitor_(heap_),
local_allocator_(
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
collector_(heap_->minor_mark_compact_collector()) {}
sweeper_(heap->sweeper()) {}
GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
@ -6578,7 +6574,7 @@ class YoungGenerationEvacuator : public Evacuator {
YoungGenerationRecordMigratedSlotVisitor record_visitor_;
EvacuationAllocator local_allocator_;
MinorMarkCompactCollector* collector_;
Sweeper* const sweeper_;
};
bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
@ -6588,23 +6584,9 @@ bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
&new_to_old_page_visitor_);
sweeper_->AddPromotedPageForIteration(chunk);
new_to_old_page_visitor_.account_moved_bytes(
marking_state->live_bytes(chunk));
if (!chunk->IsLargePage()) {
if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits
// of the full collector. We cannot yet discard the young
// generation mark bits as they are still relevant for pointers
// updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
}
return true;
}

View File

@ -6,6 +6,7 @@
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/marking-state.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting.h"

View File

@ -771,19 +771,20 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
return false;
}
// static
const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
Address addr) const {
base::MutexGuard guard(&pages_mutex_);
const NormalPagesSet& normal_pages, const LargePagesSet& large_pages,
Address addr) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(addr);
if (auto it = normal_pages_.find(static_cast<Page*>(chunk));
it != normal_pages_.end()) {
if (auto it = normal_pages.find(static_cast<Page*>(chunk));
it != normal_pages.end()) {
// The chunk is a normal page.
DCHECK_LE(chunk->address(), addr);
if (chunk->Contains(addr)) return *it;
} else if (auto it = large_pages_.upper_bound(static_cast<LargePage*>(chunk));
it != large_pages_.begin()) {
} else if (auto it = large_pages.upper_bound(static_cast<LargePage*>(chunk));
it != large_pages.begin()) {
// The chunk could be inside a large page.
DCHECK_IMPLIES(it != large_pages_.end(), addr < (*it)->address());
DCHECK_IMPLIES(it != large_pages.end(), addr < (*it)->address());
auto* large_page = *std::next(it, -1);
DCHECK_NOT_NULL(large_page);
DCHECK_LE(large_page->address(), addr);
@ -793,6 +794,14 @@ const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
return nullptr;
}
const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
Address addr) const {
// All threads should be either parked or in a safepoint whenever this method
// is called, thus pages cannot be allocated or freed at the same time and a
// mutex is not required here,
return LookupChunkContainingAddress(normal_pages_, large_pages_, addr);
}
void MemoryAllocator::RecordNormalPageCreated(const Page& page) {
base::MutexGuard guard(&pages_mutex_);
auto result = normal_pages_.insert(&page);

View File

@ -7,9 +7,9 @@
#include <atomic>
#include <memory>
#include <unordered_map>
#include <set>
#include <unordered_set>
#include <vector>
#include <utility>
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
@ -28,6 +28,10 @@
namespace v8 {
namespace internal {
namespace heap {
class TestMemoryAllocatorScope;
} // namespace heap
class Heap;
class Isolate;
class ReadOnlyPage;
@ -38,6 +42,9 @@ class ReadOnlyPage;
// pages for large object space.
class MemoryAllocator {
public:
using NormalPagesSet = std::unordered_set<const Page*>;
using LargePagesSet = std::set<const LargePage*>;
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
class Unmapper {
@ -265,6 +272,9 @@ class MemoryAllocator {
// Return the normal or large page that contains this address, if it is owned
// by this heap, otherwise a nullptr.
V8_EXPORT_PRIVATE static const MemoryChunk* LookupChunkContainingAddress(
const NormalPagesSet& normal_pages, const LargePagesSet& large_page,
Address addr);
V8_EXPORT_PRIVATE const MemoryChunk* LookupChunkContainingAddress(
Address addr) const;
@ -274,6 +284,13 @@ class MemoryAllocator {
void RecordLargePageCreated(const LargePage& page);
void RecordLargePageDestroyed(const LargePage& page);
std::pair<const NormalPagesSet, const LargePagesSet> SnapshotPageSets()
const {
// No need for a mutex as this is only called during GC atomic pause (which
// is in a safepoint).
return std::make_pair(normal_pages_, large_pages_);
}
private:
// Used to store all data about MemoryChunk allocation, e.g. in
// AllocateUninitializedChunk.
@ -424,8 +441,8 @@ class MemoryAllocator {
// Allocated normal and large pages are stored here, to be used during
// conservative stack scanning.
std::unordered_set<const Page*> normal_pages_;
std::set<const LargePage*> large_pages_;
NormalPagesSet normal_pages_;
LargePagesSet large_pages_;
mutable base::Mutex pages_mutex_;
V8_EXPORT_PRIVATE static size_t commit_page_size_;

View File

@ -5,8 +5,8 @@
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/progress-bar.h"
#include "src/heap/slot-set.h"

View File

@ -12,7 +12,6 @@
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
@ -24,6 +23,7 @@ namespace internal {
class CodeObjectRegistry;
class FreeListCategory;
class Space;
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
@ -241,6 +241,15 @@ class MemoryChunk : public BasicMemoryChunk {
static void ValidateOffsets(MemoryChunk* chunk);
#endif
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
void set_slot_set(SlotSet* slot_set) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomicPointer::Release_Store(&slot_set_[type], slot_set);
return;
}
slot_set_[type] = slot_set;
}
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
@ -306,6 +315,8 @@ class MemoryChunk : public BasicMemoryChunk {
friend class MemoryAllocator;
friend class MemoryChunkValidator;
friend class PagedSpace;
template <RememberedSetType>
friend class RememberedSet;
};
} // namespace internal

View File

@ -769,10 +769,17 @@ void PagedSpaceBase::Verify(Isolate* isolate,
}
CHECK(allocation_pointer_found_in_space);
if (identity() == OLD_SPACE && !v8_flags.concurrent_array_buffer_sweeping) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
if (!v8_flags.concurrent_array_buffer_sweeping) {
if (identity() == OLD_SPACE) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes, ExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer));
} else if (identity() == NEW_SPACE) {
DCHECK(v8_flags.minor_mc);
size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
CHECK_EQ(bytes, ExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer));
}
}
#ifdef DEBUG

View File

@ -19,6 +19,7 @@
#include "src/heap/allocation-observer.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/heap-verifier.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"

View File

@ -24,7 +24,7 @@ void PretenturingHandler::UpdateAllocationSite(
DCHECK_IMPLIES(chunk->IsToPage(),
v8_flags.minor_mc ||
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
DCHECK_IMPLIES(!v8_flags.minor_mc && !chunk->InYoungGeneration(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
#endif
if (!v8_flags.allocation_site_pretenuring ||

View File

@ -68,6 +68,12 @@ class PretenturingHandler final {
// Removes an entry from the global pretenuring storage.
void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
#if DEBUG
bool HasPretenuringFeedback() const {
return !global_pretenuring_feedback_.empty();
}
#endif // DEBUG
private:
bool DeoptMaybeTenuredAllocationSites() const;

View File

@ -11,6 +11,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/allocation-result.h"
#include "src/heap/allocation-stats.h"
#include "src/heap/base-space.h"
#include "src/heap/basic-memory-chunk.h"

View File

@ -102,6 +102,19 @@ class RememberedSet : public AllStatic {
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
// Given a page and a slot set, this function merges the slot set to the set
// of the page. |other_slot_set| should not be used after calling this method.
static void MergeAndDelete(MemoryChunk* chunk, SlotSet* other_slot_set) {
static_assert(type == RememberedSetType::OLD_TO_NEW);
SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
if (slot_set == nullptr) {
chunk->set_slot_set<RememberedSetType::OLD_TO_NEW>(other_slot_set);
return;
}
slot_set->Merge(other_slot_set, chunk->buckets());
SlotSet::Delete(other_slot_set, chunk->buckets());
}
// Given a page and a slot in that page, this function returns true if
// the remembered set contains the slot.
static bool Contains(MemoryChunk* chunk, Address slot_addr) {

View File

@ -197,6 +197,22 @@ class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
return empty;
}
void Merge(SlotSet* other, size_t buckets) {
for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
Bucket* other_bucket =
other->LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
if (!other_bucket) continue;
Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
if (bucket == nullptr) {
bucket = new Bucket;
CHECK(SwapInNewBucket<AccessMode::NON_ATOMIC>(bucket_index, bucket));
}
for (int cell_index = 0; cell_index < kCellsPerBucket; cell_index++) {
bucket->SetCellBits(cell_index, *other_bucket->cell(cell_index));
}
}
}
};
static_assert(std::is_standard_layout<SlotSet>::value);

View File

@ -14,9 +14,7 @@
#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
@ -458,5 +456,28 @@ int MemoryChunk::FreeListsLength() {
return length;
}
SpaceIterator::SpaceIterator(Heap* heap)
: heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::HasNext() {
while (current_space_ <= LAST_MUTABLE_SPACE) {
Space* space = heap_->space(current_space_);
if (space) return true;
++current_space_;
}
// No more spaces left.
return false;
}
Space* SpaceIterator::Next() {
DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
Space* space = heap_->space(current_space_++);
DCHECK_NOT_NULL(space);
return space;
}
} // namespace internal
} // namespace v8

View File

@ -16,7 +16,6 @@
#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk-layout.h"
@ -37,9 +36,11 @@ class TestCodePageAllocatorScope;
class AllocationObserver;
class FreeList;
class Heap;
class Isolate;
class LargeObjectSpace;
class LargePage;
class ObjectIterator;
class Page;
class PagedSpaceBase;
class SemiSpace;
@ -650,6 +651,19 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
bool HasNext();
Space* Next();
private:
Heap* heap_;
int current_space_; // from enum AllocationSpace.
};
// Iterates over all memory chunks in the heap (across all spaces).
class MemoryChunkIterator {
public:

View File

@ -7,6 +7,7 @@
#include <memory>
#include <vector>
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
@ -18,11 +19,16 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-state.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/pretenuring-handler-inl.h"
#include "src/heap/pretenuring-handler.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@ -46,13 +52,29 @@ class Sweeper::ConcurrentSweeper final {
return false;
}
bool ConcurrentSweepForRememberedSet(JobDelegate* delegate) {
while (!delegate->ShouldYield()) {
MemoryChunk* chunk = sweeper_->GetPromotedPageForIterationSafe();
if (chunk == nullptr) return true;
sweeper_->ParallelIteratePromotedPageForRememberedSets(
chunk, &local_pretenuring_feedback_,
&snapshot_old_to_new_remembered_sets_);
}
return false;
}
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback() {
return &local_pretenuring_feedback_;
}
CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets() {
return &snapshot_old_to_new_remembered_sets_;
}
private:
Sweeper* const sweeper_;
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
CachedOldToNewRememberedSets snapshot_old_to_new_remembered_sets_;
};
class Sweeper::SweeperJob final : public JobTask {
@ -99,6 +121,7 @@ class Sweeper::SweeperJob final : public JobTask {
tracer_, sweeper_->GetTracingScope(NEW_SPACE, is_joining_thread),
is_joining_thread ? ThreadKind::kMain : ThreadKind::kBackground);
if (!concurrent_sweeper.ConcurrentSweepSpace(NEW_SPACE, delegate)) return;
if (!concurrent_sweeper.ConcurrentSweepForRememberedSet(delegate)) return;
}
if (!SweepNonNewSpaces(concurrent_sweeper, delegate, is_joining_thread, 1,
offset == 0 ? kNumberOfSweepingSpaces : offset))
@ -205,6 +228,13 @@ void Sweeper::StartSweeping(GarbageCollector collector) {
return marking_state->live_bytes(a) > marking_state->live_bytes(b);
});
});
DCHECK(snapshot_normal_pages_set_.empty());
DCHECK(snapshot_large_pages_set_.empty());
if (v8_flags.minor_mc &&
(collector == GarbageCollector::MINOR_MARK_COMPACTOR)) {
std::tie(snapshot_normal_pages_set_, snapshot_large_pages_set_) =
heap_->memory_allocator()->SnapshotPageSets();
}
}
int Sweeper::NumberOfConcurrentSweepers() const {
@ -242,6 +272,30 @@ Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
return nullptr;
}
void Sweeper::MergePretenuringFeedbackAndRememberedSets() {
DCHECK_EQ(promoted_pages_for_iteration_count_,
iterated_promoted_pages_count_);
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
local_pretenuring_feedback_);
local_pretenuring_feedback_.clear();
for (auto it : snapshot_old_to_new_remembered_sets_) {
MemoryChunk* chunk = it.first;
RememberedSet<OLD_TO_NEW>::MergeAndDelete(chunk, it.second);
}
snapshot_old_to_new_remembered_sets_.clear();
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
*concurrent_sweeper.local_pretenuring_feedback());
concurrent_sweeper.local_pretenuring_feedback()->clear();
for (auto it : *concurrent_sweeper.snapshot_old_to_new_remembered_sets()) {
MemoryChunk* chunk = it.first;
RememberedSet<OLD_TO_NEW>::MergeAndDelete(chunk, it.second);
}
concurrent_sweeper.snapshot_old_to_new_remembered_sets()->clear();
}
}
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
@ -258,6 +312,7 @@ void Sweeper::EnsureCompleted() {
TRACE_GC_EPOCH(heap_->tracer(), GetTracingScopeForCompleteYoungSweep(),
ThreadKind::kMain);
ParallelSweepSpace(NEW_SPACE, SweepingMode::kLazyOrConcurrent, 0);
ParallelIteratePromotedPagesForRememberedSets();
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
@ -265,19 +320,19 @@ void Sweeper::EnsureCompleted() {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
});
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
local_pretenuring_feedback_);
local_pretenuring_feedback_.clear();
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
*concurrent_sweeper.local_pretenuring_feedback());
// No need to clear the concurrent feedback map since the concurrent sweeper
// goes away.
}
MergePretenuringFeedbackAndRememberedSets();
concurrent_sweepers_.clear();
current_new_space_collector_.reset();
should_sweep_non_new_spaces_ = false;
{
base::MutexGuard guard(&promoted_pages_iteration_notification_mutex_);
base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
->store(0, std::memory_order_relaxed);
iterated_promoted_pages_count_ = 0;
}
snapshot_large_pages_set_.clear();
snapshot_normal_pages_set_.clear();
sweeping_in_progress_ = false;
}
@ -285,21 +340,21 @@ void Sweeper::PauseAndEnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
ParallelSweepSpace(NEW_SPACE, SweepingMode::kLazyOrConcurrent, 0);
ParallelIteratePromotedPagesForRememberedSets();
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
CHECK(sweeping_list_[GetSweepSpaceIndex(NEW_SPACE)].empty());
CHECK(sweeping_list_for_promoted_page_iteration_.empty());
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
local_pretenuring_feedback_);
local_pretenuring_feedback_.clear();
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
*concurrent_sweeper.local_pretenuring_feedback());
concurrent_sweeper.local_pretenuring_feedback()->clear();
}
MergePretenuringFeedbackAndRememberedSets();
current_new_space_collector_.reset();
base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
->store(0, std::memory_order_relaxed);
iterated_promoted_pages_count_ = 0;
snapshot_large_pages_set_.clear();
snapshot_normal_pages_set_.clear();
}
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
@ -537,9 +592,233 @@ int Sweeper::RawSweep(
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
namespace {
class PromotedPageRecordMigratedSlotVisitor
: public ObjectVisitorWithCageBases {
public:
PromotedPageRecordMigratedSlotVisitor(
Heap* heap,
Sweeper::CachedOldToNewRememberedSets*
snapshot_old_to_new_remembered_sets,
const MemoryAllocator::NormalPagesSet& snapshot_normal_pages_set,
const MemoryAllocator::LargePagesSet& snapshot_large_pages_set)
: ObjectVisitorWithCageBases(heap->isolate()),
heap_(heap),
snapshot_old_to_new_remembered_sets_(
snapshot_old_to_new_remembered_sets),
snapshot_normal_pages_set_(snapshot_normal_pages_set),
snapshot_large_pages_set_(snapshot_large_pages_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(p.Relaxed_Load(cage_base())));
RecordMigratedSlot(host,
MaybeObject::FromObject(p.Relaxed_Load(cage_base())),
p.address());
}
inline void VisitMapPointer(HeapObject host) final {
VisitPointer(host, host.map_slot());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
RecordMigratedSlot(host, p.Relaxed_Load(cage_base()), p.address());
}
inline void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
// This code is similar to the implementation of VisitPointer() modulo
// new kind of slot.
DCHECK(!HasWeakHeapObjectTag(slot.Relaxed_Load(code_cage_base())));
Object code = slot.Relaxed_Load(code_cage_base());
RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
}
inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot value) override {
DCHECK(host.IsEphemeronHashTable());
DCHECK(!Heap::InYoungGeneration(host));
VisitPointer(host, value);
VisitPointer(host, key);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
// Entries that are skipped for recording.
inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) final {}
inline void MarkArrayBufferExtensionPromoted(HeapObject object) {
if (!object.IsJSArrayBuffer()) return;
JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
}
protected:
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
DCHECK(!host.InSharedWritableHeap());
DCHECK(!Heap::InYoungGeneration(host));
DCHECK(!MemoryChunk::FromHeapObject(host)->SweepingDone());
if (value->IsStrongOrWeak()) {
const MemoryChunk* value_chunk =
MemoryAllocator::LookupChunkContainingAddress(
snapshot_normal_pages_set_, snapshot_large_pages_set_,
value.ptr());
if (!value_chunk) return;
#ifdef THREAD_SANITIZER
value_chunk->SynchronizedHeapLoad();
#endif // THREAD_SANITIZER
if (value_chunk->InYoungGeneration()) {
MemoryChunk* host_chunk = MemoryChunk::FromHeapObject(host);
if (snapshot_old_to_new_remembered_sets_->find(host_chunk) ==
snapshot_old_to_new_remembered_sets_->end()) {
snapshot_old_to_new_remembered_sets_->emplace(
host_chunk, SlotSet::Allocate(host_chunk->buckets()));
}
RememberedSetOperations::Insert<AccessMode::NON_ATOMIC>(
(*snapshot_old_to_new_remembered_sets_)[host_chunk], host_chunk,
slot);
} else if (value_chunk->InSharedHeap()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
}
}
Heap* const heap_;
Sweeper::CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets_;
const MemoryAllocator::NormalPagesSet& snapshot_normal_pages_set_;
const MemoryAllocator::LargePagesSet& snapshot_large_pages_set_;
};
inline void HandlePromotedObject(
HeapObject object, NonAtomicMarkingState* marking_state,
PretenturingHandler* pretenuring_handler, PtrComprCageBase cage_base,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
PromotedPageRecordMigratedSlotVisitor* record_visitor) {
DCHECK(marking_state->IsBlack(object));
pretenuring_handler->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback);
DCHECK(!IsCodeSpaceObject(object));
object.IterateFast(cage_base, record_visitor);
if (object.IsJSArrayBuffer()) {
JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
}
}
} // namespace
void Sweeper::RawIteratePromotedPageForRememberedSets(
MemoryChunk* chunk,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets) {
DCHECK(chunk->owner_identity() == OLD_SPACE ||
chunk->owner_identity() == LO_SPACE);
DCHECK(!chunk->SweepingDone());
DCHECK(!chunk->IsEvacuationCandidate());
// Iterate over the page using the live objects and free the memory before
// the given live object.
PtrComprCageBase cage_base(heap_->isolate());
PromotedPageRecordMigratedSlotVisitor record_visitor(
heap_, snapshot_old_to_new_remembered_sets, snapshot_normal_pages_set_,
snapshot_large_pages_set_);
DCHECK(!heap_->incremental_marking()->IsMarking());
if (chunk->IsLargePage()) {
HandlePromotedObject(static_cast<LargePage*>(chunk)->GetObject(),
marking_state_, pretenuring_handler_, cage_base,
local_pretenuring_feedback, &record_visitor);
} else {
bool should_make_iterable = heap_->ShouldZapGarbage();
PtrComprCageBase cage_base(chunk->heap()->isolate());
Address free_start = chunk->area_start();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state_->bitmap(chunk))) {
HeapObject object = object_and_size.first;
HandlePromotedObject(object, marking_state_, pretenuring_handler_,
cage_base, local_pretenuring_feedback,
&record_visitor);
Address free_end = object.address();
if (should_make_iterable && (free_end != free_start)) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
DCHECK(
heap_->non_atomic_marking_state()
->bitmap(chunk)
->AllBitsClearInRange(chunk->AddressToMarkbitIndex(free_start),
chunk->AddressToMarkbitIndex(free_end)));
ZapCode(free_start, size);
heap_->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
}
Map map = object.map(cage_base, kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
if (should_make_iterable && (free_start != chunk->area_end())) {
CHECK_GT(chunk->area_end(), free_start);
size_t size = static_cast<size_t>(chunk->area_end() - free_start);
DCHECK(
heap_->non_atomic_marking_state()->bitmap(chunk)->AllBitsClearInRange(
chunk->AddressToMarkbitIndex(free_start),
chunk->AddressToMarkbitIndex(chunk->area_end())));
ZapCode(free_start, size);
heap_->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
}
}
marking_state_->ClearLiveness(chunk);
chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
}
void Sweeper::WaitForPromotedPagesIteration() {
if (!sweeping_in_progress()) return;
if (iterated_promoted_pages_count_ ==
base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
->load(std::memory_order_relaxed))
return;
base::MutexGuard guard(&promoted_pages_iteration_notification_mutex_);
// Check again that iteration is not yet finished.
if (iterated_promoted_pages_count_ ==
base::AsAtomicPtr(&promoted_pages_for_iteration_count_)
->load(std::memory_order_relaxed))
return;
promoted_pages_iteration_notification_variable_.Wait(
&promoted_pages_iteration_notification_mutex_);
}
void Sweeper::NotifyPromotedPagesIterationFinished() {
DCHECK_EQ(iterated_promoted_pages_count_,
promoted_pages_for_iteration_count_);
base::MutexGuard guard(&promoted_pages_iteration_notification_mutex_);
promoted_pages_iteration_notification_variable_.NotifyAll();
}
size_t Sweeper::ConcurrentSweepingPageCount() {
base::MutexGuard guard(&mutex_);
size_t count = 0;
base::MutexGuard promoted_pages_guard(&promoted_pages_iteration_mutex_);
size_t count = sweeping_list_for_promoted_page_iteration_.size();
for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
count += sweeping_list_[i].size();
}
@ -606,6 +885,34 @@ int Sweeper::ParallelSweepPage(
return max_freed;
}
void Sweeper::ParallelIteratePromotedPagesForRememberedSets() {
MemoryChunk* chunk = nullptr;
while ((chunk = GetPromotedPageForIterationSafe()) != nullptr) {
ParallelIteratePromotedPageForRememberedSets(
chunk, &local_pretenuring_feedback_,
&snapshot_old_to_new_remembered_sets_);
}
}
void Sweeper::ParallelIteratePromotedPageForRememberedSets(
MemoryChunk* chunk,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets) {
DCHECK_NOT_NULL(chunk);
base::MutexGuard guard(chunk->mutex());
DCHECK(!chunk->SweepingDone());
DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
chunk->concurrent_sweeping_state());
chunk->set_concurrent_sweeping_state(
Page::ConcurrentSweepingState::kInProgress);
RawIteratePromotedPageForRememberedSets(chunk, local_pretenuring_feedback,
snapshot_old_to_new_remembered_sets);
DCHECK(chunk->SweepingDone());
if (++iterated_promoted_pages_count_ == promoted_pages_for_iteration_count_) {
NotifyPromotedPagesIterationFinished();
}
}
void Sweeper::EnsurePageIsSwept(Page* page) {
if (!sweeping_in_progress() || page->SweepingDone()) return;
AllocationSpace space = page->owner_identity();
@ -657,6 +964,37 @@ void Sweeper::AddNewSpacePage(Page* page) {
AddPageImpl(NEW_SPACE, page, AddPageMode::REGULAR);
}
void Sweeper::AddPromotedPageForIteration(MemoryChunk* chunk) {
DCHECK(chunk->owner_identity() == OLD_SPACE ||
chunk->owner_identity() == LO_SPACE);
base::MutexGuard guard(&promoted_pages_iteration_mutex_);
DCHECK_IMPLIES(v8_flags.concurrent_sweeping,
!job_handle_ || !job_handle_->IsValid());
DCHECK_GE(chunk->area_size(),
static_cast<size_t>(marking_state_->live_bytes(chunk)));
#if DEBUG
if (!chunk->IsLargePage()) {
static_cast<Page*>(chunk)->ForAllFreeListCategories(
[chunk](FreeListCategory* category) {
DCHECK(!category->is_linked(chunk->owner()->free_list()));
});
}
#endif // DEBUG
DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
chunk->concurrent_sweeping_state());
chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
if (heap_->ShouldReduceMemory()) {
// For memory reducing GCs, iterate pages immediately to avoid delaying
// array buffer sweeping.
RawIteratePromotedPageForRememberedSets(
chunk, &local_pretenuring_feedback_,
&snapshot_old_to_new_remembered_sets_);
} else {
sweeping_list_for_promoted_page_iteration_.push_back(chunk);
promoted_pages_for_iteration_count_++;
}
}
void Sweeper::AddPageImpl(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::MutexGuard guard(&mutex_);
@ -708,6 +1046,16 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
return page;
}
MemoryChunk* Sweeper::GetPromotedPageForIterationSafe() {
base::MutexGuard guard(&promoted_pages_iteration_mutex_);
MemoryChunk* chunk = nullptr;
if (!sweeping_list_for_promoted_page_iteration_.empty()) {
chunk = sweeping_list_for_promoted_page_iteration_.back();
sweeping_list_for_promoted_page_iteration_.pop_back();
}
return chunk;
}
GCTracer::Scope::ScopeId Sweeper::GetTracingScope(AllocationSpace space,
bool is_joining_thread) {
if (space == NEW_SPACE &&

View File

@ -6,6 +6,7 @@
#define V8_HEAP_SWEEPER_H_
#include <map>
#include <unordered_map>
#include <vector>
#include "src/base/optional.h"
@ -14,6 +15,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/pretenuring-handler.h"
#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
@ -22,8 +24,10 @@ namespace v8 {
namespace internal {
class InvalidatedSlotsCleanup;
class MemoryChunk;
class NonAtomicMarkingState;
class Page;
class LargePage;
class PagedSpaceBase;
class Space;
@ -33,6 +37,8 @@ class Sweeper {
public:
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
using CachedOldToNewRememberedSets =
std::unordered_map<MemoryChunk*, SlotSet*>;
// Pauses the sweeper tasks.
class V8_NODISCARD PauseScope final {
@ -78,7 +84,7 @@ class Sweeper {
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
Sweeper(Heap* heap);
explicit Sweeper(Heap* heap);
~Sweeper();
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@ -87,6 +93,7 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
void AddNewSpacePage(Page* page);
void AddPromotedPageForIteration(MemoryChunk* chunk);
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages = 0);
@ -102,6 +109,16 @@ class Sweeper {
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback);
void ParallelIteratePromotedPagesForRememberedSets();
void ParallelIteratePromotedPageForRememberedSets(
MemoryChunk* chunk,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets);
void RawIteratePromotedPageForRememberedSets(
MemoryChunk* chunk,
PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
CachedOldToNewRememberedSets* snapshot_old_to_new_remembered_sets);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
@ -120,11 +137,15 @@ class Sweeper {
bool is_joining_thread);
GCTracer::Scope::ScopeId GetTracingScopeForCompleteYoungSweep();
void WaitForPromotedPagesIteration();
private:
NonAtomicMarkingState* marking_state() const { return marking_state_; }
void AddPageImpl(AllocationSpace space, Page* page, AddPageMode mode);
void MergePretenuringFeedbackAndRememberedSets();
class ConcurrentSweeper;
class SweeperJob;
@ -180,6 +201,7 @@ class Sweeper {
size_t ConcurrentSweepingPageCount();
Page* GetSweepingPageSafe(AllocationSpace space);
MemoryChunk* GetPromotedPageForIterationSafe();
bool TryRemoveSweepingPageSafe(AllocationSpace space, Page* page);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
@ -195,13 +217,17 @@ class Sweeper {
int NumberOfConcurrentSweepers() const;
void NotifyPromotedPagesIterationFinished();
Heap* const heap_;
NonAtomicMarkingState* const marking_state_;
std::unique_ptr<JobHandle> job_handle_;
base::Mutex mutex_;
base::Mutex promoted_pages_iteration_mutex_;
base::ConditionVariable cv_page_swept_;
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
std::vector<MemoryChunk*> sweeping_list_for_promoted_page_iteration_;
std::vector<ConcurrentSweeper> concurrent_sweepers_;
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
@ -211,6 +237,16 @@ class Sweeper {
PretenturingHandler* const pretenuring_handler_;
PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
base::Optional<GarbageCollector> current_new_space_collector_;
CachedOldToNewRememberedSets snapshot_old_to_new_remembered_sets_;
// The following fields are used for maintaining an order between iterating
// promoted pages and sweeping array buffer extensions.
size_t promoted_pages_for_iteration_count_ = 0;
std::atomic<size_t> iterated_promoted_pages_count_{0};
base::Mutex promoted_pages_iteration_notification_mutex_;
base::ConditionVariable promoted_pages_iteration_notification_variable_;
MemoryAllocator::NormalPagesSet snapshot_normal_pages_set_;
MemoryAllocator::LargePagesSet snapshot_large_pages_set_;
};
} // namespace internal

View File

@ -14,6 +14,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate-utils.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/objects/string-inl.h"
#include "src/strings/string-stream.h"

View File

@ -9,6 +9,7 @@
#include "src/execution/v8threads.h"
#include "src/handles/handles-inl.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/logging/log.h"
#include "src/objects/oddball.h"
#include "src/roots/roots-inl.h"

View File

@ -398,6 +398,7 @@ TEST_F(HeapTest, RememberedSet_InsertOnPromotingObjectToOld) {
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
CollectGarbage(i::NEW_SPACE);
heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));

View File

@ -4,7 +4,7 @@ tools/gcmole/gcmole-test.cc:30:10: warning: Possibly stale variable due to GCs.
tools/gcmole/gcmole-test.cc:28:20: note: Call might cause unexpected GC.
isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
^
./src/heap/heap.h:983:21: note: GC call here.
./src/heap/heap.h:981:21: note: GC call here.
V8_EXPORT_PRIVATE bool CollectGarbage(
^
tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order with interleaved GCs.