[Heap]: Convert MemoryAllocator::Unmapper to using Jobs

GetMaxConcurrency() uses queue size to ensure enough threads.

Change-Id: I1f091da91928c196813630fbabf54f7e68f87ead
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2480942
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70711}
This commit is contained in:
Etienne Pierre-doray 2020-10-22 10:29:58 -04:00 committed by Commit Bot
parent ba2d001989
commit 6f73c6c816
2 changed files with 40 additions and 65 deletions

View File

@ -154,68 +154,55 @@ void MemoryAllocator::TearDown() {
data_page_allocator_ = nullptr;
}
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
public:
explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
: CancelableTask(isolate),
unmapper_(unmapper),
tracer_(isolate->heap()->tracer()) {}
explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper)
: unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {}
private:
void RunInternal() override {
void Run(JobDelegate* delegate) override {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->active_unmapping_tasks_--;
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
delegate);
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(),
"UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
}
size_t GetMaxConcurrency(size_t worker_count) const override {
const size_t kTaskPerChunk = 8;
return std::min<size_t>(
kMaxUnmapperTasks,
worker_count +
(unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) /
kTaskPerChunk);
}
private:
Unmapper* const unmapper_;
GCTracer* const tracer_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryJob);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
if (job_handle_ && job_handle_->IsValid()) {
job_handle_->NotifyConcurrencyIncrease();
} else {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible,
std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this));
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
kMaxUnmapperTasks);
PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n");
}
return;
}
auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
task->id());
}
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
DCHECK_GE(active_unmapping_tasks_, 0);
active_unmapping_tasks_++;
task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
TryAbortResult::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
pending_unmapping_tasks_ = 0;
active_unmapping_tasks_ = 0;
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
if (FLAG_trace_unmapper) {
PrintIsolate(
@ -234,26 +221,18 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
}
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
// All previous unmapping tasks have been run to completion.
// Finalize those tasks to make room for new ones.
CancelAndWaitForPendingTasks();
}
return pending_unmapping_tasks_ != kMaxUnmapperTasks;
}
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@ -266,6 +245,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
if (delegate && delegate->ShouldYield()) return;
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
@ -273,13 +253,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
// though.
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
PerformFreeMemoryOnQueuedNonRegularChunks();
}
void MemoryAllocator::Unmapper::TearDown() {
CHECK_EQ(0, pending_unmapping_tasks_);
CHECK(!job_handle_ || !job_handle_->IsValid());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());

View File

@ -61,14 +61,10 @@ class MemoryAllocator {
// chunks.
class Unmapper {
public:
class UnmapFreeMemoryTask;
class UnmapFreeMemoryJob;
Unmapper(Heap* heap, MemoryAllocator* allocator)
: heap_(heap),
allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
pending_unmapping_tasks_(0),
active_unmapping_tasks_(0) {
: heap_(heap), allocator_(allocator) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
@ -142,18 +138,16 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
void PerformFreeMemoryOnQueuedNonRegularChunks();
void PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate = nullptr);
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t pending_unmapping_tasks_;
std::atomic<intptr_t> active_unmapping_tasks_;
std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator;
};