[heap] Remove delayed chunks in Unmapper
The dependency between restoring iterability and the Scavenger is explicit. Delayed chunks are thus not needed anymore. Bug: chromium:791043 Change-Id: I9f2c95c1856f53299af2737f922a3cb4cc578aa5 Reviewed-on: https://chromium-review.googlesource.com/805816 Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/master@{#49855}
This commit is contained in:
parent
faad1c19d9
commit
e5a1993bff
@ -526,7 +526,6 @@ void GCTracer::PrintNVP() const {
|
||||
"semi_space_copy_rate=%.1f%% "
|
||||
"new_space_allocation_throughput=%.1f "
|
||||
"unmapper_chunks=%d "
|
||||
"unmapper_delayed_chunks=%d "
|
||||
"context_disposal_rate=%.1f\n",
|
||||
duration, spent_in_mutator, current_.TypeName(true),
|
||||
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
|
||||
@ -560,7 +559,6 @@ void GCTracer::PrintNVP() const {
|
||||
heap_->semi_space_copied_rate_,
|
||||
NewSpaceAllocationThroughputInBytesPerMillisecond(),
|
||||
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
|
||||
heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
|
||||
ContextDisposalRateInMilliseconds());
|
||||
break;
|
||||
case Event::MINOR_MARK_COMPACTOR:
|
||||
@ -707,7 +705,6 @@ void GCTracer::PrintNVP() const {
|
||||
"semi_space_copy_rate=%.1f%% "
|
||||
"new_space_allocation_throughput=%.1f "
|
||||
"unmapper_chunks=%d "
|
||||
"unmapper_delayed_chunks=%d "
|
||||
"context_disposal_rate=%.1f "
|
||||
"compaction_speed=%.f\n",
|
||||
duration, spent_in_mutator, current_.TypeName(true),
|
||||
@ -791,7 +788,6 @@ void GCTracer::PrintNVP() const {
|
||||
heap_->semi_space_copied_rate_,
|
||||
NewSpaceAllocationThroughputInBytesPerMillisecond(),
|
||||
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
|
||||
heap_->memory_allocator()->unmapper()->NumberOfDelayedChunks(),
|
||||
ContextDisposalRateInMilliseconds(),
|
||||
CompactionSpeedInBytesPerMillisecond());
|
||||
break;
|
||||
|
@ -1964,11 +1964,6 @@ void Heap::Scavenge() {
|
||||
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
|
||||
incremental_marking());
|
||||
|
||||
if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
|
||||
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
|
||||
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
|
||||
mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
|
||||
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
|
||||
|
||||
|
@ -625,9 +625,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
|
||||
verifier.Run();
|
||||
}
|
||||
#endif
|
||||
|
||||
if (heap()->memory_allocator()->unmapper()->has_delayed_chunks())
|
||||
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
|
||||
}
|
||||
|
||||
void MarkCompactCollector::ComputeEvacuationHeuristics(
|
||||
|
@ -326,7 +326,6 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
|
||||
};
|
||||
|
||||
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
|
||||
ReconsiderDelayedChunks();
|
||||
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
|
||||
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
|
||||
// kMaxUnmapperTasks are already running. Avoid creating any more.
|
||||
@ -377,23 +376,12 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
|
||||
|
||||
void MemoryAllocator::Unmapper::TearDown() {
|
||||
CHECK_EQ(0, concurrent_unmapping_tasks_active_);
|
||||
ReconsiderDelayedChunks();
|
||||
CHECK(delayed_regular_chunks_.empty());
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
DCHECK(chunks_[i].empty());
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
|
||||
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
|
||||
// Move constructed, so the permanent list should be empty.
|
||||
DCHECK(delayed_regular_chunks_.empty());
|
||||
for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
|
||||
AddMemoryChunkSafe<kRegular>(*it);
|
||||
}
|
||||
}
|
||||
|
||||
int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
size_t result = 0;
|
||||
@ -403,16 +391,6 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
return static_cast<int>(result);
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
|
||||
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
|
||||
// We cannot free a memory chunk in new space while the sweeper is running
|
||||
// because the memory chunk can be in the queue of a sweeper task.
|
||||
// Chunks in old generation are unmapped if they are empty.
|
||||
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
|
||||
return !chunk->InNewSpace() || mc == nullptr ||
|
||||
!mc->sweeper()->sweeping_in_progress();
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CommitMemory(Address base, size_t size,
|
||||
Executability executable) {
|
||||
if (!base::OS::SetPermissions(base, size,
|
||||
|
@ -1225,14 +1225,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
void FreeQueuedChunks();
|
||||
void WaitUntilCompleted();
|
||||
void TearDown();
|
||||
|
||||
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
|
||||
|
||||
int NumberOfDelayedChunks() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
return static_cast<int>(delayed_regular_chunks_.size());
|
||||
}
|
||||
|
||||
int NumberOfChunks();
|
||||
|
||||
private:
|
||||
@ -1255,12 +1247,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
template <ChunkQueueType type>
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
|
||||
chunks_[type].push_back(chunk);
|
||||
} else {
|
||||
DCHECK_EQ(type, kRegular);
|
||||
delayed_regular_chunks_.push_back(chunk);
|
||||
}
|
||||
chunks_[type].push_back(chunk);
|
||||
}
|
||||
|
||||
template <ChunkQueueType type>
|
||||
@ -1272,7 +1259,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
return chunk;
|
||||
}
|
||||
|
||||
void ReconsiderDelayedChunks();
|
||||
template <FreeMode mode>
|
||||
void PerformFreeMemoryOnQueuedChunks();
|
||||
|
||||
@ -1280,10 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
MemoryAllocator* const allocator_;
|
||||
base::Mutex mutex_;
|
||||
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
|
||||
// Delayed chunks cannot be processed in the current unmapping cycle because
|
||||
// of dependencies such as an active sweeper.
|
||||
// See MemoryAllocator::CanFreeMemoryChunk.
|
||||
std::list<MemoryChunk*> delayed_regular_chunks_;
|
||||
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
|
||||
base::Semaphore pending_unmapping_tasks_semaphore_;
|
||||
intptr_t concurrent_unmapping_tasks_active_;
|
||||
@ -1344,8 +1326,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
template <MemoryAllocator::FreeMode mode = kFull>
|
||||
void Free(MemoryChunk* chunk);
|
||||
|
||||
bool CanFreeMemoryChunk(MemoryChunk* chunk);
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
size_t Size() { return size_.Value(); }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user