[heap] Sweep code pages on the background thread

We already make code pages writable & executable for concurrent
Sparkplug. We can use the same mechanism for sweeping of code pages on
the background thread, instead of scheduling incremental tasks on the
main thread. This allows us to remove almost all special
handling for code pages in the sweeper and allows us to off-load more
work from the main thread.

Bug: v8:12967
Change-Id: Idb8e9f8e2eadbec26a386f2de683a80087f671f3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3695557
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81139}
This commit is contained in:
Dominik Inführ 2022-06-14 12:35:03 +02:00 committed by V8 LUCI CQ
parent 051b704a22
commit 8325f86df3
5 changed files with 20 additions and 109 deletions

View File

@ -20,26 +20,19 @@ void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
code_object_registry_.push_back(code);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
DCHECK(is_sorted_);
DCHECK(code_object_registry_.empty() || code_object_registry_.back() < code);
code_object_registry_.push_back(code);
}
void CodeObjectRegistry::ReinitializeFrom(std::vector<Address>&& code_objects) {
base::MutexGuard guard(&code_object_registry_mutex_);
#if DEBUG
Address last_start = kNullAddress;
for (Address object_start : code_objects) {
DCHECK_LT(last_start, object_start);
last_start = object_start;
}
#endif // DEBUG
void CodeObjectRegistry::Clear() {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
code_object_registry_.clear();
is_sorted_ = true;
}
void CodeObjectRegistry::Finalize() {
// This function is not protected by the mutex, and should only be called
// by the sweeper.
DCHECK(is_sorted_);
code_object_registry_.shrink_to_fit();
code_object_registry_ = std::move(code_objects);
}
bool CodeObjectRegistry::Contains(Address object) const {

View File

@ -21,9 +21,7 @@ namespace internal {
class V8_EXPORT_PRIVATE CodeObjectRegistry {
public:
void RegisterNewlyAllocatedCodeObject(Address code);
void RegisterAlreadyExistingCodeObject(Address code);
void Clear();
void Finalize();
void ReinitializeFrom(std::vector<Address>&& code_objects);
bool Contains(Address code) const;
Address GetCodeObjectStartFromInnerAddress(Address address) const;

View File

@ -22,7 +22,6 @@ namespace internal {
Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
: heap_(heap),
marking_state_(marking_state),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
should_reduce_memory_(false) {}
@ -101,8 +100,6 @@ class Sweeper::SweeperJob final : public JobTask {
const AllocationSpace space_id = static_cast<AllocationSpace>(
FIRST_GROWABLE_PAGED_SPACE +
((i + offset) % kNumberOfSweepingSpaces));
// Do not sweep code space concurrently.
if (space_id == CODE_SPACE) continue;
DCHECK(IsValidSweepingSpace(space_id));
if (!sweeper_->ConcurrentSweepSpace(space_id, delegate)) return;
}
@ -111,33 +108,6 @@ class Sweeper::SweeperJob final : public JobTask {
GCTracer* const tracer_;
};
class Sweeper::IncrementalSweeperTask final : public CancelableTask {
public:
IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
: CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
~IncrementalSweeperTask() override = default;
IncrementalSweeperTask(const IncrementalSweeperTask&) = delete;
IncrementalSweeperTask& operator=(const IncrementalSweeperTask&) = delete;
private:
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
if (!sweeper_->IncrementalSweepSpace(CODE_SPACE)) {
sweeper_->ScheduleIncrementalSweepingTask();
}
}
}
Isolate* const isolate_;
Sweeper* const sweeper_;
};
void Sweeper::TearDown() {
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
}
@ -171,7 +141,6 @@ void Sweeper::StartSweeperTasks() {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
std::make_unique<SweeperJob>(heap_->isolate(), this));
ScheduleIncrementalSweepingTask();
}
}
@ -318,6 +287,8 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// Phase 1: Prepare the page for sweeping.
base::Optional<CodePageMemoryModificationScope> write_scope;
if (space->identity() == CODE_SPACE) write_scope.emplace(p);
// Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
// counter. The free operations below will decrease allocated_bytes_ to actual
@ -325,7 +296,7 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
p->ResetAllocationStatistics();
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
if (code_object_registry) code_object_registry->Clear();
std::vector<Address> code_objects;
base::Optional<ActiveSystemPages> active_system_pages_after_sweeping;
if (should_reduce_memory_) {
@ -375,8 +346,7 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
if (code_object_registry)
code_object_registry->RegisterAlreadyExistingCodeObject(object.address());
if (code_object_registry) code_objects.push_back(object.address());
DCHECK(marking_state_->IsBlack(object));
Address free_end = object.address();
if (free_end != free_start) {
@ -430,7 +400,8 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
*active_system_pages_after_sweeping);
}
if (code_object_registry) code_object_registry->Finalize();
if (code_object_registry)
code_object_registry->ReinitializeFrom(std::move(code_objects));
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
return static_cast<int>(
@ -457,18 +428,6 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
return false;
}
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
ThreadKind::kMain);
const double start = heap_->MonotonicallyIncreasingTimeInMs();
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
heap_->tracer()->AddIncrementalSweepingStep(duration);
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages) {
@ -560,17 +519,6 @@ bool Sweeper::TryRemoveSweepingPageSafe(AllocationSpace space, Page* page) {
return true;
}
void Sweeper::ScheduleIncrementalSweepingTask() {
if (!incremental_sweeper_pending_) {
incremental_sweeper_pending_ = true;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
taskrunner->PostTask(
std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
}
}
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
base::MutexGuard guard(&mutex_);

View File

@ -89,8 +89,6 @@ class Sweeper {
void EnsurePageIsSwept(Page* page);
void ScheduleIncrementalSweepingTask();
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
@ -109,7 +107,6 @@ class Sweeper {
Page* GetSweptPageSafe(PagedSpaceBase* space);
private:
class IncrementalSweeperTask;
class SweeperJob;
static const int kNumberOfSweepingSpaces =
@ -164,10 +161,6 @@ class Sweeper {
// are no more pages to sweep in the given space.
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate);
// Sweeps incrementally one page from the given space. Returns true if
// there are no more pages to sweep in the given space.
bool IncrementalSweepSpace(AllocationSpace identity);
Page* GetSweepingPageSafe(AllocationSpace space);
bool TryRemoveSweepingPageSafe(AllocationSpace space, Page* page);
@ -190,7 +183,6 @@ class Sweeper {
base::ConditionVariable cv_page_swept_;
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;

View File

@ -14,7 +14,7 @@ TEST(CodeObjectRegistry, RegisterAlreadyExistingObjectsAndContains) {
const int elements = 10;
const int offset = 100;
for (int i = 0; i < elements; i++) {
registry.RegisterAlreadyExistingCodeObject(i * offset);
registry.RegisterNewlyAllocatedCodeObject(i * offset);
}
for (int i = 0; i < elements; i++) {
@ -41,7 +41,7 @@ TEST(CodeObjectRegistry, FindAlreadyExistingObjects) {
const int offset = 100;
const int inner = 2;
for (int i = 1; i <= elements; i++) {
registry.RegisterAlreadyExistingCodeObject(i * offset);
registry.RegisterNewlyAllocatedCodeObject(i * offset);
}
for (int i = 1; i <= elements; i++) {
@ -69,25 +69,5 @@ TEST(CodeObjectRegistry, FindNewlyAllocatedObjects) {
}
}
TEST(CodeObjectRegistry, FindAlternatingObjects) {
CodeObjectRegistry registry;
const int elements = 10;
const int offset = 100;
const int inner = 2;
for (int i = 1; i <= elements; i++) {
if (i % 2 == 0) {
registry.RegisterAlreadyExistingCodeObject(i * offset);
} else {
registry.RegisterNewlyAllocatedCodeObject(i * offset);
}
}
for (int i = 1; i <= elements; i++) {
for (int j = 0; j < inner; j++) {
CHECK_EQ(registry.GetCodeObjectStartFromInnerAddress(i * offset + j),
i * offset);
}
}
}
} // namespace internal
} // namespace v8